22 lines
653 B
Python
22 lines
653 B
Python
from transformers import CLIPModel, CLIPProcessor
|
|
|
|
# pip install torch torchvision torchaudio
|
|
# pip install transformers
|
|
|
|
# Local folder where you want to save the model
|
|
#local_model_path = "models/clip-vit-base-patch32"
|
|
local_model_path = "models/clip-vit-large-patch14"
|
|
|
|
# Load model & processor
|
|
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
|
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
|
|
|
# Save them locally
|
|
model.save_pretrained(local_model_path)
|
|
processor.save_pretrained(local_model_path)
|
|
|
|
print(f"Model saved to {local_model_path}")
|
|
|
|
# Then, package model in cmd
|
|
# tar -czvf clip_model.tar.gz mode
|