359 lines
17 KiB
Python
359 lines
17 KiB
Python
|
# coding=utf-8
|
||
|
# Copyright 2023 HuggingFace Inc. team. All rights reserved.
|
||
|
#
|
||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||
|
# you may not use this file except in compliance with the License.
|
||
|
# You may obtain a copy of the License at
|
||
|
#
|
||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||
|
#
|
||
|
# Unless required by applicable law or agreed to in writing, software
|
||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
|
# See the License for the specific language governing permissions and
|
||
|
# limitations under the License.
|
||
|
""" PyTorch Fuyu model."""
|
||
|
from typing import List, Optional, Tuple, Union
|
||
|
|
||
|
import torch
|
||
|
import torch.utils.checkpoint
|
||
|
from torch import nn
|
||
|
|
||
|
from ...modeling_outputs import CausalLMOutputWithPast
|
||
|
from ...modeling_utils import PreTrainedModel
|
||
|
from ...models.auto.modeling_auto import AutoModelForCausalLM
|
||
|
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
|
||
|
from .configuration_fuyu import FuyuConfig
|
||
|
|
||
|
|
||
|
logger = logging.get_logger(__name__)
|
||
|
|
||
|
_CONFIG_FOR_DOC = "FuyuConfig"
|
||
|
|
||
|
|
||
|
FUYU_START_DOCSTRING = r"""
|
||
|
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
||
|
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
||
|
etc.)
|
||
|
|
||
|
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
||
|
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
||
|
and behavior.
|
||
|
|
||
|
Parameters:
|
||
|
config ([`FuyuConfig`]):
|
||
|
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
||
|
load the weights associated with the model, only the configuration. Check out the
|
||
|
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
||
|
"""
|
||
|
|
||
|
|
||
|
@add_start_docstrings(
|
||
|
"The bare Fuyu Model outputting raw hidden-states without any specific head on top.",
|
||
|
FUYU_START_DOCSTRING,
|
||
|
)
|
||
|
class FuyuPreTrainedModel(PreTrainedModel):
|
||
|
config_class = FuyuConfig
|
||
|
base_model_prefix = "fuyu"
|
||
|
supports_gradient_checkpointing = True
|
||
|
_no_split_modules = []
|
||
|
_skip_keys_device_placement = "past_key_values"
|
||
|
|
||
|
def _init_weights(self, module):
|
||
|
std = self.config.initializer_range
|
||
|
if isinstance(module, nn.Linear):
|
||
|
module.weight.data.normal_(mean=0.0, std=std)
|
||
|
if module.bias is not None:
|
||
|
module.bias.data.zero_()
|
||
|
elif isinstance(module, nn.Embedding):
|
||
|
module.weight.data.normal_(mean=0.0, std=std)
|
||
|
if module.padding_idx is not None:
|
||
|
module.weight.data[module.padding_idx].zero_()
|
||
|
|
||
|
|
||
|
FUYU_INPUTS_DOCSTRING = r"""
|
||
|
Args:
|
||
|
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
||
|
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
||
|
it.
|
||
|
|
||
|
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
||
|
[`PreTrainedTokenizer.__call__`] for details.
|
||
|
|
||
|
[What are input IDs?](../glossary#input-ids)
|
||
|
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
||
|
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
||
|
|
||
|
- 1 for tokens that are **not masked**,
|
||
|
- 0 for tokens that are **masked**.
|
||
|
|
||
|
[What are attention masks?](../glossary#attention-mask)
|
||
|
|
||
|
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
||
|
[`PreTrainedTokenizer.__call__`] for details.
|
||
|
|
||
|
If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
|
||
|
`past_key_values`).
|
||
|
|
||
|
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
|
||
|
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
|
||
|
information on the default strategy.
|
||
|
|
||
|
- 1 indicates the head is **not masked**,
|
||
|
- 0 indicates the head is **masked**.
|
||
|
image_patches (`torch.FloatTensor` of shape `(batch_size, num_total_patches, patch_size_ x patch_size x num_channels)`, *optional*):
|
||
|
Image patches to be used as continuous embeddings. The patches are flattened and then projected to the
|
||
|
hidden size of the model.
|
||
|
image_patches_indices (`torch.LongTensor` of shape `(batch_size, num_total_patches + number_of_newline_tokens + number_of_text_tokens, patch_size_ x patch_size x num_channels )`, *optional*):
|
||
|
Indices indicating at which position the image_patches have to be inserted in input_embeds.
|
||
|
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
||
|
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
||
|
config.n_positions - 1]`.
|
||
|
|
||
|
[What are position IDs?](../glossary#position-ids)
|
||
|
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
||
|
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
||
|
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
|
||
|
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
|
||
|
|
||
|
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
||
|
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
|
||
|
|
||
|
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
|
||
|
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
|
||
|
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
|
||
|
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
||
|
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
||
|
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
||
|
model's internal embedding lookup matrix.
|
||
|
use_cache (`bool`, *optional*):
|
||
|
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
||
|
`past_key_values`).
|
||
|
output_attentions (`bool`, *optional*):
|
||
|
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
||
|
tensors for more detail.
|
||
|
output_hidden_states (`bool`, *optional*):
|
||
|
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
||
|
more detail.
|
||
|
return_dict (`bool`, *optional*):
|
||
|
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
||
|
"""
|
||
|
|
||
|
|
||
|
@add_start_docstrings(
|
||
|
"Fuyu Model with a language modeling head on top for causal language model conditioned on image patches and text.",
|
||
|
FUYU_START_DOCSTRING,
|
||
|
)
|
||
|
class FuyuForCausalLM(FuyuPreTrainedModel):
|
||
|
def __init__(self, config: FuyuConfig):
|
||
|
super().__init__(config)
|
||
|
self.padding_idx = config.pad_token_id
|
||
|
self.vocab_size = config.vocab_size
|
||
|
self.language_model = AutoModelForCausalLM.from_config(config.text_config)
|
||
|
|
||
|
self.vision_embed_tokens = nn.Linear(
|
||
|
config.patch_size * config.patch_size * config.num_channels, config.hidden_size
|
||
|
)
|
||
|
|
||
|
self.gradient_checkpointing = False
|
||
|
# Initialize weights and apply final processing
|
||
|
self.post_init()
|
||
|
|
||
|
def get_input_embeddings(self):
|
||
|
return self.language_model.get_input_embeddings()
|
||
|
|
||
|
def set_input_embeddings(self, value):
|
||
|
self.language_model.set_input_embeddings(value)
|
||
|
|
||
|
def gather_continuous_embeddings(
|
||
|
self,
|
||
|
word_embeddings: torch.Tensor,
|
||
|
continuous_embeddings: List[torch.Tensor],
|
||
|
image_patch_input_indices: torch.Tensor,
|
||
|
) -> torch.Tensor:
|
||
|
"""This function places the continuous_embeddings into the word_embeddings at the locations
|
||
|
indicated by image_patch_input_indices. Different batch elements can have different numbers of continuous
|
||
|
embeddings.
|
||
|
|
||
|
Args:
|
||
|
word_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
||
|
Tensor of word embeddings.
|
||
|
continuous_embeddings (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`):
|
||
|
Tensor of continuous embeddings. The length of the list is the batch size. Each entry is shape
|
||
|
[num_image_embeddings, hidden], and num_image_embeddings needs to match the number of non-negative
|
||
|
indices in image_patch_input_indices for that batch element.
|
||
|
image_patch_input_indices (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
||
|
Tensor of indices of the image patches in the input_ids tensor.
|
||
|
"""
|
||
|
if not (word_embeddings.shape[0] == len(continuous_embeddings)):
|
||
|
raise ValueError(
|
||
|
f"Batch sizes must match! Got {len(continuous_embeddings)=} and {word_embeddings.shape[0]=}"
|
||
|
)
|
||
|
|
||
|
output_embeddings = word_embeddings.clone()
|
||
|
for batch_idx in range(word_embeddings.shape[0]):
|
||
|
# First, find the positions of all the non-negative values in image_patch_input_indices, those are the
|
||
|
# positions in word_embeddings that we want to replace with content from continuous_embeddings.
|
||
|
dst_indices = torch.nonzero(image_patch_input_indices[batch_idx] >= 0, as_tuple=True)[0]
|
||
|
# Next look up those indices in image_patch_input_indices to find the indices in continuous_embeddings that we
|
||
|
# want to use to replace the values in word_embeddings.
|
||
|
src_indices = image_patch_input_indices[batch_idx][dst_indices]
|
||
|
# Check if we have more indices than embeddings. Note that we could have fewer indices if images got truncated.
|
||
|
if src_indices.shape[0] > continuous_embeddings[batch_idx].shape[0]:
|
||
|
raise ValueError(
|
||
|
f"Number of continuous embeddings {continuous_embeddings[batch_idx].shape=} does not match "
|
||
|
f"number of continuous token ids {src_indices.shape=} in batch element {batch_idx}."
|
||
|
)
|
||
|
output_embeddings[batch_idx, dst_indices] = continuous_embeddings[batch_idx][src_indices]
|
||
|
return output_embeddings
|
||
|
|
||
|
@add_start_docstrings_to_model_forward(FUYU_INPUTS_DOCSTRING)
|
||
|
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
||
|
def forward(
|
||
|
self,
|
||
|
input_ids: torch.LongTensor = None,
|
||
|
image_patches: torch.Tensor = None, # [batch_size, num_total_patches, patch_size_ x patch_size x num_channels ]
|
||
|
image_patches_indices: torch.Tensor = None,
|
||
|
attention_mask: Optional[torch.Tensor] = None,
|
||
|
position_ids: Optional[torch.LongTensor] = None,
|
||
|
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
||
|
inputs_embeds: Optional[torch.FloatTensor] = None,
|
||
|
use_cache: Optional[bool] = None,
|
||
|
labels: Optional[torch.Tensor] = None,
|
||
|
output_attentions: Optional[bool] = None,
|
||
|
output_hidden_states: Optional[bool] = None,
|
||
|
return_dict: Optional[bool] = None,
|
||
|
) -> Union[Tuple, CausalLMOutputWithPast]:
|
||
|
r"""
|
||
|
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
||
|
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
||
|
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
||
|
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
||
|
|
||
|
Returns:
|
||
|
|
||
|
Examples:
|
||
|
|
||
|
```python
|
||
|
>>> from transformers import FuyuProcessor, FuyuForCausalLM
|
||
|
>>> from PIL import Image
|
||
|
>>> import requests
|
||
|
|
||
|
>>> processor = FuyuProcessor.from_pretrained("adept/fuyu-8b")
|
||
|
>>> model = FuyuForCausalLM.from_pretrained("adept/fuyu-8b")
|
||
|
|
||
|
>>> url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/bus.png"
|
||
|
>>> image = Image.open(requests.get(url, stream=True).raw)
|
||
|
>>> prompt = "Generate a coco-style caption.\n"
|
||
|
|
||
|
>>> inputs = processor(text=prompt, images=image, return_tensors="pt")
|
||
|
>>> outputs = model(**inputs)
|
||
|
|
||
|
>>> generated_ids = model.generate(**inputs, max_new_tokens=7)
|
||
|
>>> generation_text = processor.batch_decode(generated_ids[:, -7:], skip_special_tokens=True)
|
||
|
>>> print(generation_text[0])
|
||
|
A blue bus parked on the side of a road.
|
||
|
```"""
|
||
|
|
||
|
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
||
|
output_hidden_states = (
|
||
|
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
||
|
)
|
||
|
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
||
|
|
||
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
||
|
|
||
|
if input_ids is not None and inputs_embeds is not None:
|
||
|
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
||
|
elif input_ids is not None:
|
||
|
batch_size, seq_length = input_ids.shape
|
||
|
elif inputs_embeds is not None:
|
||
|
batch_size, seq_length, _ = inputs_embeds.shape
|
||
|
else:
|
||
|
raise ValueError("You have to specify either input_is or inputs_embeds")
|
||
|
|
||
|
seq_length_with_past = seq_length
|
||
|
past_key_values_length = 0
|
||
|
|
||
|
if past_key_values is not None:
|
||
|
past_key_values_length = past_key_values[0][0].shape[2]
|
||
|
seq_length_with_past = seq_length_with_past + past_key_values_length
|
||
|
|
||
|
if position_ids is None:
|
||
|
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
||
|
position_ids = torch.arange(
|
||
|
past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
|
||
|
)
|
||
|
position_ids = position_ids.unsqueeze(0)
|
||
|
|
||
|
if inputs_embeds is None:
|
||
|
inputs_embeds = self.language_model.get_input_embeddings()(input_ids)
|
||
|
if image_patches is not None and past_key_values is None:
|
||
|
patch_embeddings = [
|
||
|
self.vision_embed_tokens(patch.to(self.vision_embed_tokens.weight.dtype))
|
||
|
.squeeze(0)
|
||
|
.to(inputs_embeds.device)
|
||
|
for patch in image_patches
|
||
|
]
|
||
|
inputs_embeds = self.gather_continuous_embeddings(
|
||
|
word_embeddings=inputs_embeds,
|
||
|
continuous_embeddings=patch_embeddings,
|
||
|
image_patch_input_indices=image_patches_indices,
|
||
|
)
|
||
|
|
||
|
outputs = self.language_model(
|
||
|
inputs_embeds=inputs_embeds,
|
||
|
attention_mask=attention_mask,
|
||
|
position_ids=position_ids,
|
||
|
past_key_values=past_key_values,
|
||
|
output_attentions=output_attentions,
|
||
|
output_hidden_states=output_hidden_states,
|
||
|
labels=labels,
|
||
|
use_cache=use_cache,
|
||
|
return_dict=return_dict,
|
||
|
)
|
||
|
|
||
|
return outputs
|
||
|
|
||
|
def prepare_inputs_for_generation(
|
||
|
self,
|
||
|
input_ids,
|
||
|
past_key_values=None,
|
||
|
attention_mask=None,
|
||
|
inputs_embeds=None,
|
||
|
image_patches=None,
|
||
|
image_patches_indices=None,
|
||
|
**kwargs,
|
||
|
):
|
||
|
if past_key_values:
|
||
|
input_ids = input_ids[:, -1:]
|
||
|
|
||
|
position_ids = kwargs.get("position_ids", None)
|
||
|
if attention_mask is not None and position_ids is None:
|
||
|
# create position_ids on the fly for batch generation
|
||
|
position_ids = attention_mask.long().cumsum(-1) - 1
|
||
|
position_ids.masked_fill_(attention_mask == 0, 1)
|
||
|
if past_key_values:
|
||
|
position_ids = position_ids[:, -1].unsqueeze(-1)
|
||
|
|
||
|
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
||
|
if inputs_embeds is not None and past_key_values is None:
|
||
|
model_inputs = {"inputs_embeds": inputs_embeds}
|
||
|
else:
|
||
|
model_inputs = {"input_ids": input_ids}
|
||
|
|
||
|
if image_patches_indices is not None:
|
||
|
model_inputs["image_patches_indices"] = image_patches_indices
|
||
|
|
||
|
model_inputs.update(
|
||
|
{
|
||
|
"position_ids": position_ids,
|
||
|
"past_key_values": past_key_values,
|
||
|
"use_cache": kwargs.get("use_cache"),
|
||
|
"attention_mask": attention_mask,
|
||
|
"image_patches_indices": image_patches_indices if past_key_values is None else None,
|
||
|
"image_patches": image_patches if past_key_values is None else None,
|
||
|
}
|
||
|
)
|
||
|
return model_inputs
|