Update modeling file
Browse files- processing_llava.py +1 -2
processing_llava.py
CHANGED
|
@@ -2,7 +2,6 @@ import math
|
|
| 2 |
from typing import List, Optional, Union
|
| 3 |
|
| 4 |
import torch
|
| 5 |
-
from modeling_llava import LlavaForCausalLM
|
| 6 |
from PIL import Image
|
| 7 |
from transformers import ImageProcessingMixin, ProcessorMixin, SiglipImageProcessor, AutoTokenizer, AutoImageProcessor
|
| 8 |
from transformers.feature_extraction_utils import BatchFeature
|
|
@@ -139,7 +138,7 @@ class LlavaProcessor(ProcessorMixin):
|
|
| 139 |
TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]
|
| 140 |
] = None,
|
| 141 |
images: ImageInput = None,
|
| 142 |
-
model
|
| 143 |
max_crops: int = 0,
|
| 144 |
num_tokens = None,
|
| 145 |
padding: Union[bool, str, PaddingStrategy] = False,
|
|
|
|
| 2 |
from typing import List, Optional, Union
|
| 3 |
|
| 4 |
import torch
|
|
|
|
| 5 |
from PIL import Image
|
| 6 |
from transformers import ImageProcessingMixin, ProcessorMixin, SiglipImageProcessor, AutoTokenizer, AutoImageProcessor
|
| 7 |
from transformers.feature_extraction_utils import BatchFeature
|
|
|
|
| 138 |
TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]
|
| 139 |
] = None,
|
| 140 |
images: ImageInput = None,
|
| 141 |
+
model = None,
|
| 142 |
max_crops: int = 0,
|
| 143 |
num_tokens = None,
|
| 144 |
padding: Union[bool, str, PaddingStrategy] = False,
|