koustuvs commited on
Commit
0120a6a
·
0 Parent(s):

Initial commit

Browse files
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ pipeline_tag: video-classification
4
+ tags:
5
+ - video
6
+ ---
7
+
8
+ # V-JEPA 2
9
+
10
+ A frontier video understanding model developed by FAIR, Meta, which extends the pretraining objectives of [VJEPA](https://ai.meta.com/blog/v-jepa-yann-lecun-ai-model-video-joint-embedding-predictive-architecture/), resulting in state-of-the-art video understanding capabilities, leveraging data and model sizes at scale.
11
+ The code is released [in this repository](https://github.com/facebookresearch/vjepa2).
12
+
13
+ <img src="https://dl.fbaipublicfiles.com/vjepa2/vjepa2-pretrain.gif">&nbsp;
14
+
15
+ ## Intended Uses
16
+
17
+ V-JEPA 2 is intended to represent any video (and image) to perform video classification, retrieval, or as a video encoder for VLMs.
18
+
19
+ ```python
20
+ from transformers import AutoVideoProcessor, AutoModel
21
+
22
+ hf_repo = "facebook/vjepa2-vitg-fpc64-256"
23
+
24
+ model = AutoModel.from_pretrained(hf_repo)
25
+ processor = AutoVideoProcessor.from_pretrained(hf_repo)
26
+ ```
27
+
28
+
29
+
30
+ To load a video, sample the number of frames according to the model. For this model, we use 64.
31
+
32
+ ```python
33
+ import torch
34
+ from torchcodec.decoders import VideoDecoder
35
+ import numpy as np
36
+
37
+ video_url = "https://huggingface.co/datasets/nateraw/kinetics-mini/resolve/main/val/archery/-Qz25rXdMjE_000014_000024.mp4"
38
+ vr = VideoDecoder(video_url)
39
+ frame_idx = np.arange(0, 64) # choosing some frames. here, you can define more complex sampling strategy
40
+ video = vr.get_frames_at(indices=frame_idx).data # T x C x H x W
41
+ video = processor(video, return_tensors="pt").to(model.device)
42
+ with torch.no_grad():
43
+ video_embeddings = model.get_vision_features(**video)
44
+
45
+ print(video_embeddings.shape)
46
+ ```
47
+
48
+ To load an image, simply copy the image to the desired number of frames.
49
+
50
+ ```python
51
+ from transformers.image_utils import load_image
52
+
53
+ image = load_image("https://huggingface.co/datasets/merve/coco/resolve/main/val2017/000000000285.jpg")
54
+ pixel_values = processor(image, return_tensors="pt").to(model.device)["pixel_values_videos"]
55
+ pixel_values = pixel_values.repeat(1, 16, 1, 1, 1) # repeating image 16 times
56
+
57
+ with torch.no_grad():
58
+ image_embeddings = model.get_vision_features(pixel_values)
59
+
60
+ print(image_embeddings.shape)
61
+ ```
62
+
63
+ For more code examples, please refer to the V-JEPA 2 documentation.
64
+
65
+
66
+ ### Citation
67
+
68
+ ```
69
+ @techreport{assran2025vjepa2,
70
+ title={V-JEPA~2: Self-Supervised Video Models Enable Understanding, Prediction and Planning},
71
+ author={Assran, Mahmoud and Bardes, Adrien and Fan, David and Garrido, Quentin and Howes, Russell and
72
+ Komeili, Mojtaba and Muckley, Matthew and Rizvi, Ammar and Roberts, Claire and Sinha, Koustuv and Zholus, Artem and
73
+ Arnaud, Sergio and Gejji, Abha and Martin, Ada and Robert Hogan, Francois and Dugas, Daniel and
74
+ Bojanowski, Piotr and Khalidov, Vasil and Labatut, Patrick and Massa, Francisco and Szafraniec, Marc and
75
+ Krishnakumar, Kapil and Li, Yong and Ma, Xiaodong and Chandar, Sarath and Meier, Franziska and LeCun, Yann and
76
+ Rabbat, Michael and Ballas, Nicolas},
77
+ institution={FAIR at Meta},
78
+ year={2025}
79
+ }
config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "VJEPA2Model"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.0,
6
+ "crop_size": 256,
7
+ "drop_path_rate": 0.0,
8
+ "frames_per_clip": 64,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.0,
11
+ "hidden_size": 1408,
12
+ "image_size": 256,
13
+ "in_chans": 3,
14
+ "initializer_range": 0.02,
15
+ "layer_norm_eps": 1e-06,
16
+ "mlp_ratio": 4.363636363636363,
17
+ "model_type": "vjepa2",
18
+ "num_attention_heads": 22,
19
+ "num_hidden_layers": 40,
20
+ "patch_size": 16,
21
+ "pred_hidden_size": 384,
22
+ "pred_mlp_ratio": 4.0,
23
+ "pred_num_attention_heads": 12,
24
+ "pred_num_hidden_layers": 12,
25
+ "pred_num_mask_tokens": 10,
26
+ "pred_zero_init_mask_tokens": true,
27
+ "qkv_bias": true,
28
+ "torch_dtype": "float32",
29
+ "transformers_version": "4.53.0.dev0",
30
+ "tubelet_size": 2,
31
+ "use_SiLU": false,
32
+ "wide_SiLU": true
33
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f205e77aa2ade168db6b09d4bc420d156141f64ab964278a9c181a2bdf2a232b
3
+ size 4138311608
original/model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67129f011434e605d894e69f2c8e13d9db118deabe59d54bf6e0fa62c2c5cb8e
3
+ size 16464208210
video_preprocessor_config.json ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_valid_kwargs_names": [
3
+ "do_convert_rgb",
4
+ "do_resize",
5
+ "size",
6
+ "size_divisor",
7
+ "default_to_square",
8
+ "resample",
9
+ "do_rescale",
10
+ "rescale_factor",
11
+ "do_normalize",
12
+ "image_mean",
13
+ "image_std",
14
+ "do_pad",
15
+ "do_center_crop",
16
+ "crop_size",
17
+ "data_format",
18
+ "input_data_format",
19
+ "device"
20
+ ],
21
+ "crop_size": {
22
+ "height": 256,
23
+ "width": 256
24
+ },
25
+ "data_format": "channels_first",
26
+ "default_to_square": true,
27
+ "device": null,
28
+ "do_center_crop": true,
29
+ "do_convert_rgb": null,
30
+ "do_normalize": true,
31
+ "do_pad": null,
32
+ "do_rescale": true,
33
+ "do_resize": true,
34
+ "image_mean": [
35
+ 0.485,
36
+ 0.456,
37
+ 0.406
38
+ ],
39
+ "image_std": [
40
+ 0.229,
41
+ 0.224,
42
+ 0.225
43
+ ],
44
+ "input_data_format": null,
45
+ "model_valid_processing_keys": [
46
+ "do_convert_rgb",
47
+ "do_resize",
48
+ "size",
49
+ "size_divisor",
50
+ "default_to_square",
51
+ "resample",
52
+ "do_rescale",
53
+ "rescale_factor",
54
+ "do_normalize",
55
+ "image_mean",
56
+ "image_std",
57
+ "do_pad",
58
+ "do_center_crop",
59
+ "crop_size",
60
+ "data_format",
61
+ "input_data_format",
62
+ "device"
63
+ ],
64
+ "resample": 2,
65
+ "rescale_factor": 0.00392156862745098,
66
+ "size": {
67
+ "shortest_edge": 292
68
+ },
69
+ "size_divisor": null,
70
+ "video_processor_type": "VJEPA2VideoProcessor"
71
+ }