136 lines
7.0 KiB
Python
136 lines
7.0 KiB
Python
# coding=utf-8
|
|
# Copyright 2023 The HuggingFace Inc. team.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
"""
|
|
Processor class for Llava.
|
|
"""
|
|
|
|
|
|
from typing import List, Optional, Union
|
|
|
|
from ...feature_extraction_utils import BatchFeature
|
|
from ...image_utils import ImageInput
|
|
from ...processing_utils import ProcessorMixin
|
|
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
|
|
from ...utils import TensorType
|
|
|
|
|
|
class LlavaProcessor(ProcessorMixin):
|
|
r"""
|
|
Constructs a Llava processor which wraps a Llava image processor and a Llava tokenizer into a single processor.
|
|
|
|
[`LlavaProcessor`] offers all the functionalities of [`CLIPImageProcessor`] and [`LlamaTokenizerFast`]. See the
|
|
[`~LlavaProcessor.__call__`] and [`~LlavaProcessor.decode`] for more information.
|
|
|
|
Args:
|
|
image_processor ([`CLIPImageProcessor`], *optional*):
|
|
The image processor is a required input.
|
|
tokenizer ([`LlamaTokenizerFast`], *optional*):
|
|
The tokenizer is a required input.
|
|
"""
|
|
|
|
attributes = ["image_processor", "tokenizer"]
|
|
image_processor_class = "CLIPImageProcessor"
|
|
tokenizer_class = ("LlamaTokenizer", "LlamaTokenizerFast")
|
|
|
|
def __init__(self, image_processor=None, tokenizer=None):
|
|
super().__init__(image_processor, tokenizer)
|
|
|
|
def __call__(
|
|
self,
|
|
text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
|
|
images: ImageInput = None,
|
|
padding: Union[bool, str, PaddingStrategy] = False,
|
|
truncation: Union[bool, str, TruncationStrategy] = None,
|
|
max_length=None,
|
|
return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH,
|
|
) -> BatchFeature:
|
|
"""
|
|
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
|
|
and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode
|
|
the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
|
|
CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
|
|
of the above two methods for more information.
|
|
|
|
Args:
|
|
text (`str`, `List[str]`, `List[List[str]]`):
|
|
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
|
|
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
|
|
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
|
|
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
|
|
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
|
|
tensor. Both channels-first and channels-last formats are supported.
|
|
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
|
|
Select a strategy to pad the returned sequences (according to the model's padding side and padding
|
|
index) among:
|
|
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
|
|
sequence if provided).
|
|
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
|
|
acceptable input length for the model if that argument is not provided.
|
|
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
|
|
lengths).
|
|
max_length (`int`, *optional*):
|
|
Maximum length of the returned list and optionally padding length (see above).
|
|
truncation (`bool`, *optional*):
|
|
Activates truncation to cut input sequences longer than `max_length` to `max_length`.
|
|
return_tensors (`str` or [`~utils.TensorType`], *optional*):
|
|
If set, will return tensors of a particular framework. Acceptable values are:
|
|
|
|
- `'tf'`: Return TensorFlow `tf.constant` objects.
|
|
- `'pt'`: Return PyTorch `torch.Tensor` objects.
|
|
- `'np'`: Return NumPy `np.ndarray` objects.
|
|
- `'jax'`: Return JAX `jnp.ndarray` objects.
|
|
|
|
Returns:
|
|
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
|
|
|
|
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
|
|
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
|
|
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
|
|
`None`).
|
|
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
|
|
"""
|
|
if images is not None:
|
|
pixel_values = self.image_processor(images, return_tensors=return_tensors)["pixel_values"]
|
|
else:
|
|
pixel_values = None
|
|
text_inputs = self.tokenizer(
|
|
text, return_tensors=return_tensors, padding=padding, truncation=truncation, max_length=max_length
|
|
)
|
|
|
|
return BatchFeature(data={**text_inputs, "pixel_values": pixel_values})
|
|
|
|
# Copied from transformers.models.clip.processing_clip.CLIPProcessor.batch_decode with CLIP->Llama
|
|
def batch_decode(self, *args, **kwargs):
|
|
"""
|
|
This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
|
|
refer to the docstring of this method for more information.
|
|
"""
|
|
return self.tokenizer.batch_decode(*args, **kwargs)
|
|
|
|
# Copied from transformers.models.clip.processing_clip.CLIPProcessor.decode with CLIP->Llama
|
|
def decode(self, *args, **kwargs):
|
|
"""
|
|
This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
|
|
the docstring of this method for more information.
|
|
"""
|
|
return self.tokenizer.decode(*args, **kwargs)
|
|
|
|
@property
|
|
# Copied from transformers.models.clip.processing_clip.CLIPProcessor.model_input_names
|
|
def model_input_names(self):
|
|
tokenizer_input_names = self.tokenizer.model_input_names
|
|
image_processor_input_names = self.image_processor.model_input_names
|
|
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
|