1568 lines
70 KiB
Python
1568 lines
70 KiB
Python
# coding=utf-8
|
|
# Copyright 2023 The Salesforce Authors and The HuggingFace Team. All rights reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
""" PyTorch InstructBLIP model."""
|
|
|
|
import math
|
|
from dataclasses import dataclass
|
|
from typing import Any, Optional, Tuple, Union
|
|
|
|
import torch
|
|
import torch.utils.checkpoint
|
|
from torch import nn
|
|
from torch.nn import CrossEntropyLoss
|
|
|
|
from ...activations import ACT2FN
|
|
from ...modeling_outputs import (
|
|
BaseModelOutput,
|
|
BaseModelOutputWithPastAndCrossAttentions,
|
|
BaseModelOutputWithPooling,
|
|
BaseModelOutputWithPoolingAndCrossAttentions,
|
|
)
|
|
from ...modeling_utils import PreTrainedModel
|
|
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
|
|
from ...utils import (
|
|
ModelOutput,
|
|
add_start_docstrings,
|
|
add_start_docstrings_to_model_forward,
|
|
logging,
|
|
replace_return_docstrings,
|
|
)
|
|
from ..auto import AutoModelForCausalLM, AutoModelForSeq2SeqLM
|
|
from .configuration_instructblip import InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig
|
|
|
|
|
|
logger = logging.get_logger(__name__)
|
|
|
|
_CHECKPOINT_FOR_DOC = "Salesforce/instructblip-flan-t5-xl"
|
|
|
|
|
|
from ..deprecated._archive_maps import INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
|
|
|
|
|
|
@dataclass
|
|
# Copied from transformers.models.blip_2.modeling_blip_2.Blip2ForConditionalGenerationModelOutput with Blip2->InstructBlip
|
|
class InstructBlipForConditionalGenerationModelOutput(ModelOutput):
|
|
"""
|
|
Class defining the outputs of [`InstructBlipForConditionalGeneration`].
|
|
|
|
Args:
|
|
loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
|
|
Language modeling loss from the language model.
|
|
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
|
|
Prediction scores of the language modeling head of the language model.
|
|
vision_outputs (`BaseModelOutputWithPooling`):
|
|
Outputs of the vision encoder.
|
|
qformer_outputs (`BaseModelOutputWithPoolingAndCrossAttentions`):
|
|
Outputs of the Q-Former (Querying Transformer).
|
|
language_model_outputs (`CausalLMOutputWithPast` or `Seq2SeqLMOutput`):
|
|
Outputs of the language model.
|
|
"""
|
|
|
|
loss: Optional[Tuple[torch.FloatTensor]] = None
|
|
logits: Optional[Tuple[torch.FloatTensor]] = None
|
|
vision_outputs: Optional[torch.FloatTensor] = None
|
|
qformer_outputs: Optional[Tuple[torch.FloatTensor]] = None
|
|
language_model_outputs: Optional[Tuple[torch.FloatTensor]] = None
|
|
|
|
def to_tuple(self) -> Tuple[Any]:
|
|
return tuple(
|
|
self[k]
|
|
if k not in ["vision_outputs", "qformer_outputs", "language_model_outputs"]
|
|
else getattr(self, k).to_tuple()
|
|
for k in self.keys()
|
|
)
|
|
|
|
|
|
# Copied from transformers.models.blip.modeling_blip.BlipVisionEmbeddings with Blip->InstructBlip
|
|
class InstructBlipVisionEmbeddings(nn.Module):
|
|
def __init__(self, config: InstructBlipVisionConfig):
|
|
super().__init__()
|
|
self.config = config
|
|
self.embed_dim = config.hidden_size
|
|
self.image_size = config.image_size
|
|
self.patch_size = config.patch_size
|
|
|
|
self.class_embedding = nn.Parameter(torch.randn(1, 1, self.embed_dim))
|
|
|
|
self.patch_embedding = nn.Conv2d(
|
|
in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
|
|
)
|
|
|
|
self.num_patches = (self.image_size // self.patch_size) ** 2
|
|
self.num_positions = self.num_patches + 1
|
|
|
|
self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
|
|
|
|
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
|
|
batch_size = pixel_values.shape[0]
|
|
target_dtype = self.patch_embedding.weight.dtype
|
|
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
|
|
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
|
|
|
|
class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
|
|
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
|
|
embeddings = embeddings + self.position_embedding[:, : embeddings.size(1), :].to(target_dtype)
|
|
return embeddings
|
|
|
|
|
|
# Copied from transformers.models.blip_2.modeling_blip_2.Blip2Attention with Blip2->InstructBlip
|
|
class InstructBlipAttention(nn.Module):
|
|
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
|
|
|
def __init__(self, config):
|
|
super().__init__()
|
|
self.config = config
|
|
self.embed_dim = config.hidden_size
|
|
self.num_heads = config.num_attention_heads
|
|
self.head_dim = self.embed_dim // self.num_heads
|
|
if self.head_dim * self.num_heads != self.embed_dim:
|
|
raise ValueError(
|
|
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
|
|
f" {self.num_heads})."
|
|
)
|
|
self.scale = self.head_dim**-0.5
|
|
self.dropout = nn.Dropout(config.attention_dropout)
|
|
|
|
# small tweak here compared to CLIP, no bias here
|
|
self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=False)
|
|
|
|
if config.qkv_bias:
|
|
q_bias = nn.Parameter(torch.zeros(self.embed_dim))
|
|
v_bias = nn.Parameter(torch.zeros(self.embed_dim))
|
|
else:
|
|
q_bias = None
|
|
v_bias = None
|
|
|
|
if q_bias is not None:
|
|
qkv_bias = torch.cat((q_bias, torch.zeros_like(v_bias, requires_grad=False), v_bias))
|
|
self.qkv.bias = nn.Parameter(qkv_bias)
|
|
|
|
self.projection = nn.Linear(self.embed_dim, self.embed_dim)
|
|
|
|
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
|
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
|
|
|
|
def forward(
|
|
self,
|
|
hidden_states: torch.Tensor,
|
|
head_mask: Optional[torch.Tensor] = None,
|
|
output_attentions: Optional[bool] = False,
|
|
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
|
"""Input shape: Batch x Time x Channel"""
|
|
|
|
bsz, tgt_len, embed_dim = hidden_states.size()
|
|
|
|
mixed_qkv = self.qkv(hidden_states)
|
|
|
|
mixed_qkv = mixed_qkv.reshape(bsz, tgt_len, 3, self.num_heads, embed_dim // self.num_heads).permute(
|
|
2, 0, 3, 1, 4
|
|
)
|
|
query_states, key_states, value_states = mixed_qkv[0], mixed_qkv[1], mixed_qkv[2]
|
|
|
|
# Take the dot product between "query" and "key" to get the raw attention scores.
|
|
attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2))
|
|
|
|
attention_scores = attention_scores * self.scale
|
|
|
|
# Normalize the attention scores to probabilities.
|
|
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
|
|
|
|
# This is actually dropping out entire tokens to attend to, which might
|
|
# seem a bit unusual, but is taken from the original Transformer paper.
|
|
attention_probs = self.dropout(attention_probs)
|
|
|
|
# Mask heads if we want to
|
|
if head_mask is not None:
|
|
attention_probs = attention_probs * head_mask
|
|
|
|
context_layer = torch.matmul(attention_probs, value_states).permute(0, 2, 1, 3)
|
|
|
|
new_context_layer_shape = context_layer.size()[:-2] + (self.embed_dim,)
|
|
context_layer = context_layer.reshape(new_context_layer_shape)
|
|
|
|
output = self.projection(context_layer)
|
|
|
|
outputs = (output, attention_probs) if output_attentions else (output, None)
|
|
|
|
return outputs
|
|
|
|
|
|
# Copied from transformers.models.blip.modeling_blip.BlipMLP
|
|
class InstructBlipMLP(nn.Module):
|
|
def __init__(self, config):
|
|
super().__init__()
|
|
self.config = config
|
|
self.activation_fn = ACT2FN[config.hidden_act]
|
|
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
|
|
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
|
|
|
|
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
|
hidden_states = self.fc1(hidden_states)
|
|
hidden_states = self.activation_fn(hidden_states)
|
|
hidden_states = self.fc2(hidden_states)
|
|
return hidden_states
|
|
|
|
|
|
# Copied from transformers.models.blip.modeling_blip.BlipEncoderLayer with Blip->InstructBlip
|
|
class InstructBlipEncoderLayer(nn.Module):
|
|
def __init__(self, config: InstructBlipConfig):
|
|
super().__init__()
|
|
self.embed_dim = config.hidden_size
|
|
self.self_attn = InstructBlipAttention(config)
|
|
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
|
|
self.mlp = InstructBlipMLP(config)
|
|
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
|
|
|
|
def forward(
|
|
self,
|
|
hidden_states: torch.Tensor,
|
|
attention_mask: torch.Tensor,
|
|
output_attentions: Optional[bool] = False,
|
|
) -> Tuple[torch.FloatTensor]:
|
|
"""
|
|
Args:
|
|
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
|
attention_mask (`torch.FloatTensor`): attention mask of size
|
|
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
|
|
`(config.encoder_attention_heads,)`.
|
|
output_attentions (`bool`, *optional*):
|
|
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
|
returned tensors for more detail.
|
|
"""
|
|
residual = hidden_states
|
|
|
|
hidden_states = self.layer_norm1(hidden_states)
|
|
hidden_states, attn_weights = self.self_attn(
|
|
hidden_states=hidden_states,
|
|
head_mask=attention_mask,
|
|
output_attentions=output_attentions,
|
|
)
|
|
hidden_states = hidden_states + residual
|
|
residual = hidden_states
|
|
hidden_states = self.layer_norm2(hidden_states)
|
|
hidden_states = self.mlp(hidden_states)
|
|
|
|
hidden_states = hidden_states + residual
|
|
|
|
outputs = (hidden_states,)
|
|
|
|
if output_attentions:
|
|
outputs += (attn_weights,)
|
|
|
|
return outputs
|
|
|
|
|
|
class InstructBlipPreTrainedModel(PreTrainedModel):
|
|
"""
|
|
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
|
models.
|
|
"""
|
|
|
|
config_class = InstructBlipConfig
|
|
base_model_prefix = "blip"
|
|
supports_gradient_checkpointing = True
|
|
_no_split_modules = [
|
|
"InstructBlipQFormerEmbeddings",
|
|
"InstructBlipAttention",
|
|
"InstructBlipQFormerMultiHeadAttention",
|
|
"InstructBlipQFormerSelfOutput",
|
|
]
|
|
_keep_in_fp32_modules = []
|
|
|
|
# Copied from transformers.models.blip_2.modeling_blip_2.Blip2PreTrainedModel._init_weights with Blip2->InstructBlip
|
|
def _init_weights(self, module):
|
|
"""Initialize the weights"""
|
|
factor = self.config.initializer_range
|
|
if isinstance(module, nn.Conv2d) or isinstance(module, nn.Embedding) or isinstance(module, nn.Linear):
|
|
module.weight.data.normal_(mean=0.0, std=factor)
|
|
if hasattr(module, "bias") and module.bias is not None:
|
|
module.bias.data.zero_()
|
|
|
|
if isinstance(module, InstructBlipVisionEmbeddings):
|
|
if hasattr(self.config, "vision_config"):
|
|
factor = self.config.vision_config.initializer_range
|
|
nn.init.trunc_normal_(module.position_embedding, mean=0.0, std=factor)
|
|
nn.init.trunc_normal_(module.class_embedding, mean=0.0, std=factor)
|
|
|
|
elif isinstance(module, nn.LayerNorm):
|
|
module.bias.data.zero_()
|
|
module.weight.data.fill_(1.0)
|
|
elif isinstance(module, nn.Linear) and module.bias is not None:
|
|
module.bias.data.zero_()
|
|
|
|
|
|
INSTRUCTBLIP_START_DOCSTRING = r"""
|
|
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
|
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
|
etc.)
|
|
|
|
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
|
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
|
and behavior.
|
|
|
|
Parameters:
|
|
config ([`InstructBlipConfig`]): Model configuration class with all the parameters of the model.
|
|
Initializing with a config file does not load the weights associated with the model, only the
|
|
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
|
"""
|
|
|
|
INSTRUCTBLIP_VISION_INPUTS_DOCSTRING = r"""
|
|
Args:
|
|
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
|
Pixel values. Pixel values can be obtained using [`InstructBlipProcessor`]. See
|
|
[`InstructBlipProcessor.__call__`] for details.
|
|
output_attentions (`bool`, *optional*):
|
|
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
|
tensors for more detail.
|
|
output_hidden_states (`bool`, *optional*):
|
|
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
|
more detail.
|
|
return_dict (`bool`, *optional*):
|
|
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
|
"""
|
|
|
|
INSTRUCTBLIP_INPUTS_DOCSTRING = r"""
|
|
Args:
|
|
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
|
Pixel values. Pixel values can be obtained using [`InstructBlipProcessor`]. See
|
|
[`InstructBlipProcessor.__call__`] for details.
|
|
|
|
qformer_input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
|
Indices of input sequence tokens in the vocabulary of the Q-Former. Input tokens can optionally be provided
|
|
to serve as text prompt, which the Q-Former model will encode.
|
|
|
|
Indices can be obtained using [`InstructBlipProcessor`]. See [`InstructBlipProcessor.__call__`] for
|
|
details.
|
|
|
|
[What are input IDs?](../glossary#input-ids)
|
|
|
|
qformer_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
|
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
|
|
|
- 1 for tokens that are **not masked**,
|
|
- 0 for tokens that are **masked**.
|
|
|
|
[What are attention masks?](../glossary#attention-mask)
|
|
|
|
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
|
Indices of input sequence tokens in the vocabulary of the language model. Input tokens can optionally be
|
|
provided to serve as text prompt, which the language model can continue.
|
|
|
|
Indices can be obtained using [`InstructBlipProcessor`]. See [`InstructBlipProcessor.__call__`] for
|
|
details.
|
|
|
|
[What are input IDs?](../glossary#input-ids)
|
|
|
|
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
|
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
|
|
|
- 1 for tokens that are **not masked**,
|
|
- 0 for tokens that are **masked**.
|
|
|
|
[What are attention masks?](../glossary#attention-mask)
|
|
|
|
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
|
|
Indices of decoder input sequence tokens in the vocabulary of the language model. Only relevant in case an
|
|
encoder-decoder language model (like T5) is used.
|
|
|
|
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
|
[`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids)
|
|
|
|
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
|
|
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
|
|
be used by default.
|
|
|
|
Only relevant in case an encoder-decoder language model (like T5) is used.
|
|
|
|
output_attentions (`bool`, *optional*):
|
|
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
|
tensors for more detail.
|
|
output_hidden_states (`bool`, *optional*):
|
|
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
|
more detail.
|
|
return_dict (`bool`, *optional*):
|
|
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
|
"""
|
|
|
|
|
|
# Copied from transformers.models.blip.modeling_blip.BlipEncoder with Blip->InstructBlip
|
|
class InstructBlipEncoder(nn.Module):
|
|
"""
|
|
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
|
|
[`InstructBlipEncoderLayer`].
|
|
|
|
Args:
|
|
config (`InstructBlipConfig`):
|
|
The corresponding vision configuration for the `InstructBlipEncoder`.
|
|
"""
|
|
|
|
def __init__(self, config: InstructBlipConfig):
|
|
super().__init__()
|
|
self.config = config
|
|
self.layers = nn.ModuleList([InstructBlipEncoderLayer(config) for _ in range(config.num_hidden_layers)])
|
|
self.gradient_checkpointing = False
|
|
|
|
def forward(
|
|
self,
|
|
inputs_embeds,
|
|
attention_mask: Optional[torch.Tensor] = None,
|
|
output_attentions: Optional[bool] = None,
|
|
output_hidden_states: Optional[bool] = None,
|
|
return_dict: Optional[bool] = None,
|
|
) -> Union[Tuple, BaseModelOutput]:
|
|
r"""
|
|
Args:
|
|
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
|
Embedded representation of the inputs. Should be float, not int tokens.
|
|
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
|
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
|
|
|
- 1 for tokens that are **not masked**,
|
|
- 0 for tokens that are **masked**.
|
|
|
|
[What are attention masks?](../glossary#attention-mask)
|
|
output_attentions (`bool`, *optional*):
|
|
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
|
returned tensors for more detail.
|
|
output_hidden_states (`bool`, *optional*):
|
|
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
|
|
for more detail.
|
|
return_dict (`bool`, *optional*):
|
|
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
|
"""
|
|
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
|
output_hidden_states = (
|
|
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
|
)
|
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
|
encoder_states = () if output_hidden_states else None
|
|
all_attentions = () if output_attentions else None
|
|
|
|
hidden_states = inputs_embeds
|
|
for idx, encoder_layer in enumerate(self.layers):
|
|
if output_hidden_states:
|
|
encoder_states = encoder_states + (hidden_states,)
|
|
if self.gradient_checkpointing and self.training:
|
|
layer_outputs = self._gradient_checkpointing_func(
|
|
encoder_layer.__call__,
|
|
hidden_states,
|
|
attention_mask,
|
|
output_attentions,
|
|
)
|
|
else:
|
|
layer_outputs = encoder_layer(
|
|
hidden_states,
|
|
attention_mask,
|
|
output_attentions=output_attentions,
|
|
)
|
|
|
|
hidden_states = layer_outputs[0]
|
|
|
|
if output_attentions:
|
|
all_attentions = all_attentions + (layer_outputs[1],)
|
|
|
|
if output_hidden_states:
|
|
encoder_states = encoder_states + (hidden_states,)
|
|
|
|
if not return_dict:
|
|
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
|
|
return BaseModelOutput(
|
|
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
|
|
)
|
|
|
|
|
|
# Copied from transformers.models.blip.modeling_blip.BlipVisionModel with Blip->InstructBlip, BLIP->INSTRUCTBLIP
|
|
class InstructBlipVisionModel(InstructBlipPreTrainedModel):
|
|
main_input_name = "pixel_values"
|
|
config_class = InstructBlipVisionConfig
|
|
|
|
def __init__(self, config: InstructBlipVisionConfig):
|
|
super().__init__(config)
|
|
self.config = config
|
|
embed_dim = config.hidden_size
|
|
|
|
self.embeddings = InstructBlipVisionEmbeddings(config)
|
|
self.encoder = InstructBlipEncoder(config)
|
|
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
|
|
|
|
self.post_init()
|
|
|
|
@add_start_docstrings_to_model_forward(INSTRUCTBLIP_VISION_INPUTS_DOCSTRING)
|
|
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=InstructBlipVisionConfig)
|
|
def forward(
|
|
self,
|
|
pixel_values: Optional[torch.FloatTensor] = None,
|
|
output_attentions: Optional[bool] = None,
|
|
output_hidden_states: Optional[bool] = None,
|
|
return_dict: Optional[bool] = None,
|
|
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
|
r"""
|
|
Returns:
|
|
|
|
"""
|
|
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
|
output_hidden_states = (
|
|
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
|
)
|
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
|
if pixel_values is None:
|
|
raise ValueError("You have to specify pixel_values")
|
|
|
|
hidden_states = self.embeddings(pixel_values)
|
|
|
|
encoder_outputs = self.encoder(
|
|
inputs_embeds=hidden_states,
|
|
output_attentions=output_attentions,
|
|
output_hidden_states=output_hidden_states,
|
|
return_dict=return_dict,
|
|
)
|
|
|
|
last_hidden_state = encoder_outputs[0]
|
|
last_hidden_state = self.post_layernorm(last_hidden_state)
|
|
|
|
pooled_output = last_hidden_state[:, 0, :]
|
|
pooled_output = self.post_layernorm(pooled_output)
|
|
|
|
if not return_dict:
|
|
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
|
|
|
|
return BaseModelOutputWithPooling(
|
|
last_hidden_state=last_hidden_state,
|
|
pooler_output=pooled_output,
|
|
hidden_states=encoder_outputs.hidden_states,
|
|
attentions=encoder_outputs.attentions,
|
|
)
|
|
|
|
def get_input_embeddings(self):
|
|
return self.embeddings
|
|
|
|
|
|
class InstructBlipQFormerMultiHeadAttention(nn.Module):
|
|
def __init__(self, config, is_cross_attention=False):
|
|
super().__init__()
|
|
self.config = config
|
|
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
|
|
raise ValueError(
|
|
"The hidden size (%d) is not a multiple of the number of attention heads (%d)"
|
|
% (config.hidden_size, config.num_attention_heads)
|
|
)
|
|
|
|
self.num_attention_heads = config.num_attention_heads
|
|
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
|
|
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
|
|
|
self.query = nn.Linear(config.hidden_size, self.all_head_size)
|
|
if is_cross_attention:
|
|
self.key = nn.Linear(config.encoder_hidden_size, self.all_head_size)
|
|
self.value = nn.Linear(config.encoder_hidden_size, self.all_head_size)
|
|
else:
|
|
self.key = nn.Linear(config.hidden_size, self.all_head_size)
|
|
self.value = nn.Linear(config.hidden_size, self.all_head_size)
|
|
|
|
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
|
|
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
|
|
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
|
|
self.max_position_embeddings = config.max_position_embeddings
|
|
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
|
|
self.save_attention = False
|
|
|
|
def save_attn_gradients(self, attn_gradients):
|
|
self.attn_gradients = attn_gradients
|
|
|
|
def get_attn_gradients(self):
|
|
return self.attn_gradients
|
|
|
|
def save_attention_map(self, attention_map):
|
|
self.attention_map = attention_map
|
|
|
|
def get_attention_map(self):
|
|
return self.attention_map
|
|
|
|
def transpose_for_scores(self, x):
|
|
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
|
|
x = x.view(*new_x_shape)
|
|
return x.permute(0, 2, 1, 3)
|
|
|
|
def forward(
|
|
self,
|
|
hidden_states,
|
|
attention_mask=None,
|
|
head_mask=None,
|
|
encoder_hidden_states=None,
|
|
encoder_attention_mask=None,
|
|
past_key_value=None,
|
|
output_attentions=False,
|
|
):
|
|
# If this is instantiated as a cross-attention module, the keys
|
|
# and values come from an encoder; the attention mask needs to be
|
|
# such that the encoder's padding tokens are not attended to.
|
|
is_cross_attention = encoder_hidden_states is not None
|
|
|
|
if is_cross_attention:
|
|
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
|
|
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
|
|
attention_mask = encoder_attention_mask
|
|
elif past_key_value is not None:
|
|
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
|
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
|
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
|
|
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
|
|
else:
|
|
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
|
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
|
|
|
mixed_query_layer = self.query(hidden_states)
|
|
|
|
query_layer = self.transpose_for_scores(mixed_query_layer)
|
|
|
|
past_key_value = (key_layer, value_layer)
|
|
|
|
# Take the dot product between "query" and "key" to get the raw attention scores.
|
|
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
|
|
|
|
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
|
|
seq_length = hidden_states.size()[1]
|
|
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
|
|
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
|
|
distance = position_ids_l - position_ids_r
|
|
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
|
|
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
|
|
|
|
if self.position_embedding_type == "relative_key":
|
|
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
|
|
attention_scores = attention_scores + relative_position_scores
|
|
elif self.position_embedding_type == "relative_key_query":
|
|
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
|
|
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
|
|
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
|
|
|
|
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
|
|
attention_scores_dtype = attention_scores.dtype
|
|
|
|
if attention_mask is not None:
|
|
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
|
|
attention_scores = attention_scores + attention_mask
|
|
|
|
# Normalize the attention scores to probabilities.
|
|
attention_probs = nn.Softmax(dim=-1)(attention_scores).to(attention_scores_dtype)
|
|
|
|
if is_cross_attention and self.save_attention:
|
|
self.save_attention_map(attention_probs)
|
|
attention_probs.register_hook(self.save_attn_gradients)
|
|
|
|
# This is actually dropping out entire tokens to attend to, which might
|
|
# seem a bit unusual, but is taken from the original Transformer paper.
|
|
attention_probs_dropped = self.dropout(attention_probs)
|
|
|
|
# Mask heads if we want to
|
|
if head_mask is not None:
|
|
attention_probs_dropped = attention_probs_dropped * head_mask
|
|
|
|
context_layer = torch.matmul(attention_probs_dropped, value_layer)
|
|
|
|
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
|
|
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
|
|
context_layer = context_layer.view(*new_context_layer_shape)
|
|
|
|
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
|
|
|
|
outputs = outputs + (past_key_value,)
|
|
return outputs
|
|
|
|
|
|
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->InstructBlipQFormer
|
|
class InstructBlipQFormerSelfOutput(nn.Module):
|
|
def __init__(self, config):
|
|
super().__init__()
|
|
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
|
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
|
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
|
|
|
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
|
|
hidden_states = self.dense(hidden_states)
|
|
hidden_states = self.dropout(hidden_states)
|
|
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
|
return hidden_states
|
|
|
|
|
|
# Copied from transformers.models.blip_2.modeling_blip_2.Blip2QFormerAttention with Blip2->InstructBlip
|
|
class InstructBlipQFormerAttention(nn.Module):
|
|
def __init__(self, config, is_cross_attention=False):
|
|
super().__init__()
|
|
self.attention = InstructBlipQFormerMultiHeadAttention(config, is_cross_attention)
|
|
self.output = InstructBlipQFormerSelfOutput(config)
|
|
self.pruned_heads = set()
|
|
|
|
def prune_heads(self, heads):
|
|
if len(heads) == 0:
|
|
return
|
|
heads, index = find_pruneable_heads_and_indices(
|
|
heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
|
|
)
|
|
|
|
# Prune linear layers
|
|
self.attention.query = prune_linear_layer(self.attention.query, index)
|
|
self.attention.key = prune_linear_layer(self.attention.key, index)
|
|
self.attention.value = prune_linear_layer(self.attention.value, index)
|
|
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
|
|
|
|
# Update hyper params and store pruned heads
|
|
self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
|
|
self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
|
|
self.pruned_heads = self.pruned_heads.union(heads)
|
|
|
|
def forward(
|
|
self,
|
|
hidden_states: torch.Tensor,
|
|
attention_mask: Optional[torch.FloatTensor] = None,
|
|
head_mask: Optional[torch.FloatTensor] = None,
|
|
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
|
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
|
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
|
output_attentions: Optional[bool] = False,
|
|
) -> Tuple[torch.Tensor]:
|
|
self_outputs = self.attention(
|
|
hidden_states,
|
|
attention_mask,
|
|
head_mask,
|
|
encoder_hidden_states,
|
|
encoder_attention_mask,
|
|
past_key_value,
|
|
output_attentions,
|
|
)
|
|
attention_output = self.output(self_outputs[0], hidden_states)
|
|
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
|
|
return outputs
|
|
|
|
|
|
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->InstructBlipQFormer
|
|
class InstructBlipQFormerIntermediate(nn.Module):
|
|
def __init__(self, config):
|
|
super().__init__()
|
|
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
|
|
if isinstance(config.hidden_act, str):
|
|
self.intermediate_act_fn = ACT2FN[config.hidden_act]
|
|
else:
|
|
self.intermediate_act_fn = config.hidden_act
|
|
|
|
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
|
hidden_states = self.dense(hidden_states)
|
|
hidden_states = self.intermediate_act_fn(hidden_states)
|
|
return hidden_states
|
|
|
|
|
|
# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->InstructBlipQFormer
|
|
class InstructBlipQFormerOutput(nn.Module):
|
|
def __init__(self, config):
|
|
super().__init__()
|
|
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
|
|
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
|
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
|
|
|
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
|
|
hidden_states = self.dense(hidden_states)
|
|
hidden_states = self.dropout(hidden_states)
|
|
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
|
return hidden_states
|
|
|
|
|
|
class InstructBlipQFormerLayer(nn.Module):
|
|
def __init__(self, config, layer_idx):
|
|
super().__init__()
|
|
self.chunk_size_feed_forward = config.chunk_size_feed_forward
|
|
self.seq_len_dim = 1
|
|
self.attention = InstructBlipQFormerAttention(config)
|
|
|
|
self.layer_idx = layer_idx
|
|
|
|
if layer_idx % config.cross_attention_frequency == 0:
|
|
self.crossattention = InstructBlipQFormerAttention(config, is_cross_attention=True)
|
|
self.has_cross_attention = True
|
|
else:
|
|
self.has_cross_attention = False
|
|
|
|
self.intermediate = InstructBlipQFormerIntermediate(config)
|
|
self.output = InstructBlipQFormerOutput(config)
|
|
|
|
self.intermediate_query = InstructBlipQFormerIntermediate(config)
|
|
self.output_query = InstructBlipQFormerOutput(config)
|
|
|
|
def forward(
|
|
self,
|
|
hidden_states,
|
|
attention_mask=None,
|
|
head_mask=None,
|
|
encoder_hidden_states=None,
|
|
encoder_attention_mask=None,
|
|
past_key_value=None,
|
|
output_attentions=False,
|
|
query_length=0,
|
|
):
|
|
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
|
|
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
|
|
self_attention_outputs = self.attention(
|
|
hidden_states,
|
|
attention_mask,
|
|
head_mask,
|
|
output_attentions=output_attentions,
|
|
past_key_value=self_attn_past_key_value,
|
|
)
|
|
attention_output = self_attention_outputs[0]
|
|
outputs = self_attention_outputs[1:-1]
|
|
|
|
present_key_value = self_attention_outputs[-1]
|
|
|
|
if query_length > 0:
|
|
query_attention_output = attention_output[:, :query_length, :]
|
|
|
|
if self.has_cross_attention:
|
|
if encoder_hidden_states is None:
|
|
raise ValueError("encoder_hidden_states must be given for cross-attention layers")
|
|
cross_attention_outputs = self.crossattention(
|
|
query_attention_output,
|
|
attention_mask,
|
|
head_mask,
|
|
encoder_hidden_states,
|
|
encoder_attention_mask,
|
|
output_attentions=output_attentions,
|
|
)
|
|
query_attention_output = cross_attention_outputs[0]
|
|
# add cross attentions if we output attention weights
|
|
outputs = outputs + cross_attention_outputs[1:-1]
|
|
|
|
layer_output = apply_chunking_to_forward(
|
|
self.feed_forward_chunk_query,
|
|
self.chunk_size_feed_forward,
|
|
self.seq_len_dim,
|
|
query_attention_output,
|
|
)
|
|
|
|
if attention_output.shape[1] > query_length:
|
|
layer_output_text = apply_chunking_to_forward(
|
|
self.feed_forward_chunk,
|
|
self.chunk_size_feed_forward,
|
|
self.seq_len_dim,
|
|
attention_output[:, query_length:, :],
|
|
)
|
|
layer_output = torch.cat([layer_output, layer_output_text], dim=1)
|
|
else:
|
|
layer_output = apply_chunking_to_forward(
|
|
self.feed_forward_chunk,
|
|
self.chunk_size_feed_forward,
|
|
self.seq_len_dim,
|
|
attention_output,
|
|
)
|
|
outputs = (layer_output,) + outputs
|
|
|
|
outputs = outputs + (present_key_value,)
|
|
|
|
return outputs
|
|
|
|
def feed_forward_chunk(self, attention_output):
|
|
intermediate_output = self.intermediate(attention_output)
|
|
layer_output = self.output(intermediate_output, attention_output)
|
|
return layer_output
|
|
|
|
def feed_forward_chunk_query(self, attention_output):
|
|
intermediate_output = self.intermediate_query(attention_output)
|
|
layer_output = self.output_query(intermediate_output, attention_output)
|
|
return layer_output
|
|
|
|
|
|
# Copied from transformers.models.blip_2.modeling_blip_2.Blip2QFormerEncoder with Blip2->InstructBlip
|
|
class InstructBlipQFormerEncoder(nn.Module):
|
|
def __init__(self, config):
|
|
super().__init__()
|
|
self.config = config
|
|
self.layer = nn.ModuleList(
|
|
[InstructBlipQFormerLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
|
|
)
|
|
self.gradient_checkpointing = False
|
|
|
|
def forward(
|
|
self,
|
|
hidden_states,
|
|
attention_mask=None,
|
|
head_mask=None,
|
|
encoder_hidden_states=None,
|
|
encoder_attention_mask=None,
|
|
past_key_values=None,
|
|
use_cache=None,
|
|
output_attentions=False,
|
|
output_hidden_states=False,
|
|
return_dict=True,
|
|
query_length=0,
|
|
):
|
|
all_hidden_states = () if output_hidden_states else None
|
|
all_self_attentions = () if output_attentions else None
|
|
all_cross_attentions = () if output_attentions else None
|
|
|
|
next_decoder_cache = () if use_cache else None
|
|
|
|
for i in range(self.config.num_hidden_layers):
|
|
layer_module = self.layer[i]
|
|
if output_hidden_states:
|
|
all_hidden_states = all_hidden_states + (hidden_states,)
|
|
|
|
layer_head_mask = head_mask[i] if head_mask is not None else None
|
|
past_key_value = past_key_values[i] if past_key_values is not None else None
|
|
|
|
if getattr(self.config, "gradient_checkpointing", False) and self.training:
|
|
if use_cache:
|
|
logger.warning(
|
|
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
|
)
|
|
use_cache = False
|
|
layer_outputs = self._gradient_checkpointing_func(
|
|
layer_module.__call__,
|
|
hidden_states,
|
|
attention_mask,
|
|
layer_head_mask,
|
|
encoder_hidden_states,
|
|
encoder_attention_mask,
|
|
)
|
|
else:
|
|
layer_outputs = layer_module(
|
|
hidden_states,
|
|
attention_mask,
|
|
layer_head_mask,
|
|
encoder_hidden_states,
|
|
encoder_attention_mask,
|
|
past_key_value,
|
|
output_attentions,
|
|
query_length,
|
|
)
|
|
|
|
hidden_states = layer_outputs[0]
|
|
if use_cache:
|
|
next_decoder_cache += (layer_outputs[-1],)
|
|
if output_attentions:
|
|
all_self_attentions = all_self_attentions + (layer_outputs[1],)
|
|
if layer_module.has_cross_attention:
|
|
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
|
|
|
|
if output_hidden_states:
|
|
all_hidden_states = all_hidden_states + (hidden_states,)
|
|
|
|
if not return_dict:
|
|
return tuple(
|
|
v
|
|
for v in [
|
|
hidden_states,
|
|
next_decoder_cache,
|
|
all_hidden_states,
|
|
all_self_attentions,
|
|
all_cross_attentions,
|
|
]
|
|
if v is not None
|
|
)
|
|
return BaseModelOutputWithPastAndCrossAttentions(
|
|
last_hidden_state=hidden_states,
|
|
past_key_values=next_decoder_cache,
|
|
hidden_states=all_hidden_states,
|
|
attentions=all_self_attentions,
|
|
cross_attentions=all_cross_attentions,
|
|
)
|
|
|
|
|
|
class InstructBlipQFormerEmbeddings(nn.Module):
|
|
"""Construct the embeddings from word and position embeddings."""
|
|
|
|
def __init__(self, config):
|
|
super().__init__()
|
|
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
|
|
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
|
|
|
|
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
|
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
|
|
|
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
|
|
self.register_buffer(
|
|
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
|
|
)
|
|
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
|
|
|
|
self.config = config
|
|
|
|
def forward(
|
|
self,
|
|
input_ids=None,
|
|
position_ids=None,
|
|
query_embeds=None,
|
|
past_key_values_length=0,
|
|
):
|
|
if input_ids is not None:
|
|
seq_length = input_ids.size()[1]
|
|
else:
|
|
seq_length = 0
|
|
|
|
if position_ids is None:
|
|
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length].clone()
|
|
|
|
if input_ids is not None:
|
|
embeddings = self.word_embeddings(input_ids)
|
|
if self.position_embedding_type == "absolute":
|
|
position_embeddings = self.position_embeddings(position_ids.to(embeddings.device))
|
|
embeddings = embeddings + position_embeddings
|
|
|
|
if query_embeds is not None:
|
|
embeddings = torch.cat((query_embeds, embeddings), dim=1)
|
|
else:
|
|
embeddings = query_embeds
|
|
|
|
embeddings = embeddings.to(self.layernorm.weight.dtype)
|
|
embeddings = self.layernorm(embeddings)
|
|
embeddings = self.dropout(embeddings)
|
|
return embeddings
|
|
|
|
|
|
class InstructBlipQFormerModel(InstructBlipPreTrainedModel):
|
|
"""
|
|
Querying Transformer (Q-Former), used in InstructBLIP. Slightly modified from BLIP-2 as it also takes the
|
|
instruction as input.
|
|
"""
|
|
|
|
def __init__(self, config: InstructBlipQFormerConfig):
|
|
super().__init__(config)
|
|
self.config = config
|
|
|
|
self.embeddings = InstructBlipQFormerEmbeddings(config)
|
|
|
|
self.encoder = InstructBlipQFormerEncoder(config)
|
|
|
|
self.post_init()
|
|
|
|
def get_input_embeddings(self):
|
|
return self.embeddings.word_embeddings
|
|
|
|
def set_input_embeddings(self, value):
|
|
self.embeddings.word_embeddings = value
|
|
|
|
def _prune_heads(self, heads_to_prune):
|
|
"""
|
|
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
|
|
class PreTrainedModel
|
|
"""
|
|
for layer, heads in heads_to_prune.items():
|
|
self.encoder.layer[layer].attention.prune_heads(heads)
|
|
|
|
def get_extended_attention_mask(
|
|
self,
|
|
attention_mask: torch.Tensor,
|
|
input_shape: Tuple[int],
|
|
device: torch.device,
|
|
has_query: bool = False,
|
|
) -> torch.Tensor:
|
|
"""
|
|
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
|
|
|
|
Arguments:
|
|
attention_mask (`torch.Tensor`):
|
|
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
|
|
input_shape (`Tuple[int]`):
|
|
The shape of the input to the model.
|
|
device: (`torch.device`):
|
|
The device of the input to the model.
|
|
|
|
Returns:
|
|
`torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.
|
|
"""
|
|
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
|
|
# ourselves in which case we just need to make it broadcastable to all heads.
|
|
if attention_mask.dim() == 3:
|
|
extended_attention_mask = attention_mask[:, None, :, :]
|
|
elif attention_mask.dim() == 2:
|
|
# Provided a padding mask of dimensions [batch_size, seq_length]
|
|
# - the model is an encoder, so make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
|
|
extended_attention_mask = attention_mask[:, None, None, :]
|
|
else:
|
|
raise ValueError(
|
|
f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})",
|
|
)
|
|
|
|
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
|
|
# masked positions, this operation will create a tensor which is 0.0 for
|
|
# positions we want to attend and -10000.0 for masked positions.
|
|
# Since we are adding it to the raw scores before the softmax, this is
|
|
# effectively the same as removing these entirely.
|
|
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
|
|
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
|
|
return extended_attention_mask
|
|
|
|
def forward(
|
|
self,
|
|
input_ids: torch.LongTensor,
|
|
attention_mask: Optional[torch.FloatTensor] = None,
|
|
position_ids: Optional[torch.LongTensor] = None,
|
|
query_embeds: Optional[torch.Tensor] = None,
|
|
head_mask: Optional[torch.FloatTensor] = None,
|
|
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
|
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
|
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
|
use_cache: Optional[bool] = None,
|
|
output_attentions: Optional[bool] = None,
|
|
output_hidden_states: Optional[bool] = None,
|
|
return_dict: Optional[bool] = None,
|
|
) -> Union[Tuple[torch.FloatTensor], BaseModelOutputWithPoolingAndCrossAttentions]:
|
|
r"""
|
|
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
|
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
|
|
the model is configured as a decoder.
|
|
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
|
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
|
|
the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
|
|
- 1 for tokens that are **not masked**,
|
|
- 0 for tokens that are **masked**.
|
|
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of:
|
|
shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and
|
|
value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are
|
|
used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key
|
|
value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape
|
|
`(batch_size, sequence_length)`.
|
|
use_cache (`bool`, *optional*):
|
|
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
|
`past_key_values`).
|
|
"""
|
|
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
|
output_hidden_states = (
|
|
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
|
)
|
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
|
if input_ids is None and query_embeds is None:
|
|
raise ValueError("You have to specify query_embeds when input_ids is None")
|
|
|
|
# past_key_values_length
|
|
past_key_values_length = (
|
|
past_key_values[0][0].shape[2] - self.config.query_length if past_key_values is not None else 0
|
|
)
|
|
|
|
query_length = query_embeds.shape[1] if query_embeds is not None else 0
|
|
|
|
embedding_output = self.embeddings(
|
|
input_ids=input_ids,
|
|
position_ids=position_ids,
|
|
query_embeds=query_embeds,
|
|
past_key_values_length=past_key_values_length,
|
|
)
|
|
|
|
input_shape = embedding_output.size()[:-1]
|
|
batch_size, seq_length = input_shape
|
|
device = embedding_output.device
|
|
|
|
if attention_mask is None:
|
|
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
|
|
|
|
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
|
|
# ourselves in which case we just need to make it broadcastable to all heads.
|
|
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device)
|
|
|
|
# If a 2D or 3D attention mask is provided for the cross-attention
|
|
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
|
|
if encoder_hidden_states is not None:
|
|
if isinstance(encoder_hidden_states, list):
|
|
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
|
|
else:
|
|
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
|
|
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
|
|
|
|
if isinstance(encoder_attention_mask, list):
|
|
encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
|
|
elif encoder_attention_mask is None:
|
|
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
|
|
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
|
|
else:
|
|
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
|
|
else:
|
|
encoder_extended_attention_mask = None
|
|
|
|
# Prepare head mask if needed
|
|
# 1.0 in head_mask indicate we keep the head
|
|
# attention_probs has shape bsz x n_heads x N x N
|
|
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
|
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
|
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
|
|
|
encoder_outputs = self.encoder(
|
|
embedding_output,
|
|
attention_mask=extended_attention_mask,
|
|
head_mask=head_mask,
|
|
encoder_hidden_states=encoder_hidden_states,
|
|
encoder_attention_mask=encoder_extended_attention_mask,
|
|
past_key_values=past_key_values,
|
|
use_cache=use_cache,
|
|
output_attentions=output_attentions,
|
|
output_hidden_states=output_hidden_states,
|
|
return_dict=return_dict,
|
|
query_length=query_length,
|
|
)
|
|
sequence_output = encoder_outputs[0]
|
|
pooled_output = sequence_output[:, 0, :]
|
|
|
|
if not return_dict:
|
|
return (sequence_output, pooled_output) + encoder_outputs[1:]
|
|
|
|
return BaseModelOutputWithPoolingAndCrossAttentions(
|
|
last_hidden_state=sequence_output,
|
|
pooler_output=pooled_output,
|
|
past_key_values=encoder_outputs.past_key_values,
|
|
hidden_states=encoder_outputs.hidden_states,
|
|
attentions=encoder_outputs.attentions,
|
|
cross_attentions=encoder_outputs.cross_attentions,
|
|
)
|
|
|
|
|
|
@add_start_docstrings(
|
|
"""
|
|
InstructBLIP Model for generating text given an image and an optional text prompt. The model consists of a vision
|
|
encoder, Querying Transformer (Q-Former) and a language model.
|
|
|
|
One can optionally pass `input_ids` to the model, which serve as a text prompt, to make the language model continue
|
|
the prompt. Otherwise, the language model starts generating text from the [BOS] (beginning-of-sequence) token.
|
|
""",
|
|
INSTRUCTBLIP_START_DOCSTRING,
|
|
)
|
|
class InstructBlipForConditionalGeneration(InstructBlipPreTrainedModel):
|
|
config_class = InstructBlipConfig
|
|
main_input_name = "pixel_values"
|
|
|
|
def __init__(self, config: InstructBlipConfig):
|
|
super().__init__(config)
|
|
|
|
self.vision_model = InstructBlipVisionModel(config.vision_config)
|
|
|
|
self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size))
|
|
self.qformer = InstructBlipQFormerModel(config.qformer_config)
|
|
|
|
self.language_projection = nn.Linear(config.qformer_config.hidden_size, config.text_config.hidden_size)
|
|
|
|
if config.use_decoder_only_language_model:
|
|
language_model = AutoModelForCausalLM.from_config(config.text_config)
|
|
else:
|
|
language_model = AutoModelForSeq2SeqLM.from_config(config.text_config)
|
|
|
|
if language_model._no_split_modules is not None:
|
|
self._no_split_modules.extend(language_model._no_split_modules)
|
|
|
|
if language_model._keep_in_fp32_modules is not None:
|
|
self._keep_in_fp32_modules.extend(language_model._keep_in_fp32_modules)
|
|
|
|
self.language_model = language_model
|
|
|
|
# Initialize weights and apply final processing
|
|
self.post_init()
|
|
|
|
def get_input_embeddings(self):
|
|
return self.language_model.get_input_embeddings()
|
|
|
|
def set_input_embeddings(self, value):
|
|
self.language_model.set_input_embeddings(value)
|
|
|
|
def set_output_embeddings(self, new_embeddings):
|
|
self.language_model.set_output_embeddings(new_embeddings)
|
|
|
|
def get_output_embeddings(self) -> nn.Module:
|
|
return self.language_model.get_output_embeddings()
|
|
|
|
def get_encoder(self):
|
|
return self.language_model.get_encoder()
|
|
|
|
def get_decoder(self):
|
|
return self.language_model.get_decoder()
|
|
|
|
def _tie_weights(self):
|
|
if not self.config.use_decoder_only_language_model:
|
|
self.language_model.encoder.embed_tokens = self.language_model.shared
|
|
self.language_model.decoder.embed_tokens = self.language_model.shared
|
|
|
|
def _preprocess_accelerate(self):
|
|
r"""
|
|
Some pre-processing hacks to make the model `accelerate` compatible. Check
|
|
https://github.com/huggingface/transformers/pull/21707 for more details.
|
|
"""
|
|
hf_device_map = self.hf_device_map
|
|
|
|
if len(hf_device_map) > 1 and "language_model" not in hf_device_map and torch.cuda.device_count() > 1:
|
|
# warn users about unexpected behavior when using multi-GPU + InstructBLIP + `accelerate`.
|
|
logger.warning(
|
|
"The `language_model` is not in the `hf_device_map` dictionary and you are running your script"
|
|
" in a multi-GPU environment. this may lead to unexpected behavior when using `accelerate`."
|
|
" Please pass a `device_map` that contains `language_model` to remove this warning."
|
|
" Please refer to https://github.com/huggingface/blog/blob/main/accelerate-large-models.md for"
|
|
" more details on creating a `device_map` for large models.",
|
|
)
|
|
|
|
if hasattr(self.language_model, "_hf_hook"):
|
|
self.language_model._hf_hook.io_same_device = True # For `generate` compatibility
|
|
|
|
@add_start_docstrings_to_model_forward(INSTRUCTBLIP_INPUTS_DOCSTRING)
|
|
@replace_return_docstrings(
|
|
output_type=InstructBlipForConditionalGenerationModelOutput, config_class=InstructBlipVisionConfig
|
|
)
|
|
def forward(
|
|
self,
|
|
pixel_values: torch.FloatTensor,
|
|
qformer_input_ids: torch.FloatTensor,
|
|
qformer_attention_mask: Optional[torch.LongTensor] = None,
|
|
input_ids: Optional[torch.FloatTensor] = None,
|
|
attention_mask: Optional[torch.LongTensor] = None,
|
|
decoder_input_ids: Optional[torch.LongTensor] = None,
|
|
decoder_attention_mask: Optional[torch.LongTensor] = None,
|
|
output_attentions: Optional[bool] = None,
|
|
output_hidden_states: Optional[bool] = None,
|
|
labels: Optional[torch.LongTensor] = None,
|
|
return_dict: Optional[bool] = None,
|
|
) -> Union[Tuple, InstructBlipForConditionalGenerationModelOutput]:
|
|
r"""
|
|
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
|
Labels for computing the language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size -
|
|
1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
|
|
config.vocab_size]`
|
|
|
|
Returns:
|
|
|
|
Examples:
|
|
|
|
```python
|
|
>>> from transformers import InstructBlipProcessor, InstructBlipForConditionalGeneration
|
|
>>> import torch
|
|
>>> from PIL import Image
|
|
>>> import requests
|
|
|
|
>>> model = InstructBlipForConditionalGeneration.from_pretrained("Salesforce/instructblip-vicuna-7b")
|
|
>>> processor = InstructBlipProcessor.from_pretrained("Salesforce/instructblip-vicuna-7b")
|
|
|
|
>>> device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
>>> model.to(device) # doctest: +IGNORE_RESULT
|
|
|
|
>>> url = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
|
|
>>> image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
|
|
>>> prompt = "What is unusual about this image?"
|
|
>>> inputs = processor(images=image, text=prompt, return_tensors="pt").to(device)
|
|
|
|
>>> outputs = model.generate(
|
|
... **inputs,
|
|
... do_sample=False,
|
|
... num_beams=5,
|
|
... max_length=256,
|
|
... min_length=1,
|
|
... top_p=0.9,
|
|
... repetition_penalty=1.5,
|
|
... length_penalty=1.0,
|
|
... temperature=1,
|
|
... )
|
|
>>> generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0].strip()
|
|
>>> print(generated_text)
|
|
The unusual aspect of this image is that a man is ironing clothes on the back of a yellow SUV, which is parked in the middle of a busy city street. This is an unconventional approach to ironing clothes, as it requires the man to balance himself and his ironing equipment on top of the vehicle while navigating through traffic. Additionally, the presence of taxis and other vehicles in the scene further emphasizes the unusual nature of this situation.
|
|
```"""
|
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
|
# step 1: forward the images through the vision encoder,
|
|
# to get image embeddings of shape (batch_size, seq_len, hidden_size)
|
|
vision_outputs = self.vision_model(
|
|
pixel_values=pixel_values,
|
|
output_attentions=output_attentions,
|
|
output_hidden_states=output_hidden_states,
|
|
return_dict=return_dict,
|
|
)
|
|
image_embeds = vision_outputs[0]
|
|
|
|
# step 2: forward the query tokens through the QFormer, using the image embeddings for cross-attention
|
|
image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
|
|
|
|
# difference with BLIP-2 here: we also feed the instruction prompt to the Q-Former
|
|
query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
|
|
query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=image_embeds.device)
|
|
if qformer_attention_mask is None:
|
|
qformer_attention_mask = torch.ones_like(qformer_input_ids)
|
|
qformer_attention_mask = torch.cat([query_attention_mask, qformer_attention_mask], dim=1)
|
|
query_outputs = self.qformer(
|
|
input_ids=qformer_input_ids,
|
|
attention_mask=qformer_attention_mask,
|
|
query_embeds=query_tokens,
|
|
encoder_hidden_states=image_embeds,
|
|
encoder_attention_mask=image_attention_mask,
|
|
output_attentions=output_attentions,
|
|
output_hidden_states=output_hidden_states,
|
|
return_dict=return_dict,
|
|
)
|
|
query_output = query_outputs[0][:, : query_tokens.size(1), :]
|
|
|
|
# step 3: use the language model, conditioned on the query outputs and the prompt
|
|
language_model_inputs = self.language_projection(query_output)
|
|
language_model_attention_mask = torch.ones(
|
|
language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device
|
|
)
|
|
|
|
inputs_embeds = self.language_model.get_input_embeddings()(input_ids)
|
|
|
|
inputs_embeds = torch.cat([language_model_inputs, inputs_embeds.to(language_model_inputs.device)], dim=1)
|
|
|
|
if attention_mask is None:
|
|
attention_mask = torch.ones_like(input_ids)
|
|
attention_mask = torch.cat([language_model_attention_mask.to(attention_mask.device), attention_mask], dim=1)
|
|
|
|
if self.config.use_decoder_only_language_model:
|
|
outputs = self.language_model(
|
|
inputs_embeds=inputs_embeds,
|
|
attention_mask=attention_mask,
|
|
output_attentions=output_attentions,
|
|
output_hidden_states=output_hidden_states,
|
|
return_dict=return_dict,
|
|
)
|
|
logits = outputs.logits if return_dict else outputs[0]
|
|
loss = None
|
|
# we compute the loss here since we need to take into account the sequence length of the query embeds
|
|
if labels is not None:
|
|
labels = labels.to(logits.device)
|
|
logits = logits[:, -labels.size(1) :, :]
|
|
# Shift so that tokens < n predict n
|
|
shift_logits = logits[..., :-1, :].contiguous()
|
|
shift_labels = labels[..., 1:].contiguous().to(logits.device)
|
|
|
|
# Flatten the tokens
|
|
loss_fct = CrossEntropyLoss(reduction="mean")
|
|
|
|
loss = loss_fct(shift_logits.view(-1, self.config.text_config.vocab_size), shift_labels.view(-1))
|
|
else:
|
|
outputs = self.language_model(
|
|
inputs_embeds=inputs_embeds,
|
|
attention_mask=attention_mask,
|
|
decoder_input_ids=decoder_input_ids,
|
|
decoder_attention_mask=decoder_attention_mask,
|
|
output_attentions=output_attentions,
|
|
output_hidden_states=output_hidden_states,
|
|
return_dict=return_dict,
|
|
labels=labels,
|
|
)
|
|
loss = outputs.loss if return_dict else outputs[0]
|
|
logits = outputs.logits if return_dict else outputs[1]
|
|
|
|
if not return_dict:
|
|
output = (logits, vision_outputs, query_outputs, outputs)
|
|
return ((loss,) + output) if loss is not None else output
|
|
|
|
return InstructBlipForConditionalGenerationModelOutput(
|
|
loss=loss,
|
|
logits=logits,
|
|
vision_outputs=vision_outputs,
|
|
qformer_outputs=query_outputs,
|
|
language_model_outputs=outputs,
|
|
)
|
|
|
|
@torch.no_grad()
|
|
def generate(
|
|
self,
|
|
pixel_values: torch.FloatTensor,
|
|
qformer_input_ids: Optional[torch.LongTensor] = None,
|
|
qformer_attention_mask: Optional[torch.LongTensor] = None,
|
|
input_ids: Optional[torch.LongTensor] = None,
|
|
attention_mask: Optional[torch.LongTensor] = None,
|
|
**generate_kwargs,
|
|
) -> torch.LongTensor:
|
|
"""
|
|
Overrides `generate` function to be able to use the model as a conditional generator.
|
|
|
|
Args:
|
|
pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width)):
|
|
Input images to be processed.
|
|
qformer_input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
|
|
The sequence used as a prompt to be fed to the Q-Former module.
|
|
qformer_attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
|
|
Mask to avoid performing attention on padding token indices.
|
|
input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
|
|
The sequence used as a prompt for the generation.
|
|
attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
|
|
Mask to avoid performing attention on padding token indices.
|
|
|
|
Returns:
|
|
captions (list): A list of strings of length batch_size * num_captions.
|
|
"""
|
|
if hasattr(self, "hf_device_map"):
|
|
# preprocess for `accelerate`
|
|
self._preprocess_accelerate()
|
|
|
|
batch_size = pixel_values.shape[0]
|
|
image_embeds = self.vision_model(pixel_values, return_dict=True).last_hidden_state
|
|
|
|
image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
|
|
|
|
query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
|
|
query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=image_embeds.device)
|
|
if qformer_attention_mask is None:
|
|
qformer_attention_mask = torch.ones_like(qformer_input_ids)
|
|
qformer_attention_mask = torch.cat([query_attention_mask, qformer_attention_mask], dim=1)
|
|
query_outputs = self.qformer(
|
|
input_ids=qformer_input_ids,
|
|
attention_mask=qformer_attention_mask,
|
|
query_embeds=query_tokens,
|
|
encoder_hidden_states=image_embeds,
|
|
encoder_attention_mask=image_attention_mask,
|
|
return_dict=True,
|
|
)
|
|
query_output = query_outputs.last_hidden_state[:, : query_tokens.size(1), :]
|
|
|
|
language_model_inputs = self.language_projection(query_output)
|
|
language_attention_mask = torch.ones(
|
|
language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device
|
|
)
|
|
|
|
if input_ids is None:
|
|
input_ids = (
|
|
torch.LongTensor([[self.config.text_config.bos_token_id]])
|
|
.repeat(batch_size, 1)
|
|
.to(image_embeds.device)
|
|
)
|
|
if attention_mask is None:
|
|
attention_mask = torch.ones_like(input_ids)
|
|
attention_mask = torch.cat([language_attention_mask, attention_mask.to(language_attention_mask.device)], dim=1)
|
|
|
|
# concatenate query embeddings with prompt embeddings
|
|
inputs_embeds = self.get_input_embeddings()(input_ids)
|
|
inputs_embeds = torch.cat([language_model_inputs, inputs_embeds.to(language_model_inputs.device)], dim=1)
|
|
|
|
# add image_embeds length to max_length, so that the final max_length in counted only on token embeds
|
|
# -1 is to account for the prepended BOS after `generate.`
|
|
if not self.language_model.config.is_encoder_decoder:
|
|
generate_kwargs["max_length"] = generate_kwargs.get("max_length", 20) + language_model_inputs.shape[1] - 1
|
|
generate_kwargs["min_length"] = generate_kwargs.get("min_length", 0) + language_model_inputs.shape[1]
|
|
|
|
outputs = self.language_model.generate(
|
|
inputs_embeds=inputs_embeds,
|
|
attention_mask=attention_mask,
|
|
**generate_kwargs,
|
|
)
|
|
|
|
# this is a temporary workaround to be consistent with other generation models and
|
|
# have BOS as the first token, even though under the hood we are calling LM with embeds
|
|
if not self.language_model.config.is_encoder_decoder:
|
|
# the InstructBLIP authors used inconsistent tokenizer/model files during training,
|
|
# with the tokenizer's bos token being set to </s> which has ID=2,
|
|
# whereas the model's text config has bos token id = 0
|
|
bos_token_id = (
|
|
2
|
|
if self.config.text_config.architectures[0] == "LLaMAForCausalLM"
|
|
else self.config.text_config.bos_token_id
|
|
)
|
|
bos_tokens = torch.LongTensor([[bos_token_id]]).repeat(batch_size, 1).to(image_embeds.device)
|
|
if not isinstance(outputs, torch.Tensor):
|
|
outputs.sequences = torch.cat([bos_tokens, outputs.sequences], dim=-1)
|
|
else:
|
|
outputs = torch.cat([bos_tokens, outputs], dim=-1)
|
|
|
|
return outputs
|