mirror of
https://github.com/Wan-Video/Wan2.1.git
synced 2025-11-04 14:16:57 +00:00
532 lines
26 KiB
Python
532 lines
26 KiB
Python
# coding=utf-8
|
|
# Copyright 2023 the HuggingFace Inc. team. All rights reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
"""PyTorch Llava model."""
|
|
|
|
from dataclasses import dataclass
|
|
from typing import List, Optional, Tuple, Union
|
|
|
|
import torch
|
|
import torch.utils.checkpoint
|
|
from torch import nn
|
|
|
|
from transformers.activations import ACT2FN
|
|
from transformers.generation import GenerationMixin
|
|
from transformers.modeling_outputs import ModelOutput
|
|
from transformers.modeling_utils import PreTrainedModel
|
|
from transformers.utils import (
|
|
add_start_docstrings,
|
|
add_start_docstrings_to_model_forward,
|
|
is_torchdynamo_compiling,
|
|
logging,
|
|
replace_return_docstrings,
|
|
)
|
|
from transformers.utils.deprecation import deprecate_kwarg
|
|
from transformers.models.auto import AutoModel, AutoModelForCausalLM
|
|
from .configuration_llava import LlavaConfig
|
|
|
|
|
|
logger = logging.get_logger(__name__)
|
|
|
|
_CONFIG_FOR_DOC = "LlavaConfig"
|
|
|
|
# Base docstring
|
|
_CHECKPOINT_FOR_DOC = "llava-hf/llava-1.5-7b-hf"
|
|
|
|
|
|
@dataclass
|
|
class LlavaCausalLMOutputWithPast(ModelOutput):
|
|
"""
|
|
Base class for Llava causal language model (or autoregressive) outputs.
|
|
|
|
Args:
|
|
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
|
|
Language modeling loss (for next-token prediction).
|
|
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
|
|
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
|
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
|
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
|
`(batch_size, num_heads, sequence_length, embed_size_per_head)`)
|
|
|
|
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
|
|
`past_key_values` input) to speed up sequential decoding.
|
|
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
|
|
|
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
|
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
sequence_length)`.
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
heads.
|
|
image_hidden_states (`torch.FloatTensor`, *optional*):
|
|
A `torch.FloatTensor` of size (batch_size, num_images, sequence_length, hidden_size)`.
|
|
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
|
|
"""
|
|
|
|
loss: Optional[torch.FloatTensor] = None
|
|
logits: Optional[torch.FloatTensor] = None
|
|
past_key_values: Optional[List[torch.FloatTensor]] = None
|
|
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
|
attentions: Optional[Tuple[torch.FloatTensor]] = None
|
|
image_hidden_states: Optional[torch.FloatTensor] = None
|
|
|
|
|
|
class LlavaMultiModalProjector(nn.Module):
|
|
def __init__(self, config: LlavaConfig):
|
|
super().__init__()
|
|
# We have hidden_size * the number of vision feature layers
|
|
num_feature_layers = 1 if isinstance(config.vision_feature_layer, int) else len(config.vision_feature_layer)
|
|
self.linear_1 = nn.Linear(
|
|
config.vision_config.hidden_size * num_feature_layers,
|
|
config.text_config.hidden_size,
|
|
bias=config.multimodal_projector_bias,
|
|
)
|
|
self.act = ACT2FN[config.projector_hidden_act]
|
|
self.linear_2 = nn.Linear(
|
|
config.text_config.hidden_size, config.text_config.hidden_size, bias=config.multimodal_projector_bias
|
|
)
|
|
|
|
def forward(self, image_features):
|
|
hidden_states = self.linear_1(image_features)
|
|
hidden_states = self.act(hidden_states)
|
|
hidden_states = self.linear_2(hidden_states)
|
|
return hidden_states
|
|
|
|
|
|
LLAVA_START_DOCSTRING = r"""
|
|
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
|
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
|
etc.)
|
|
|
|
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
|
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
|
and behavior.
|
|
|
|
Parameters:
|
|
config ([`LlavaConfig`] or [`LlavaVisionConfig`]):
|
|
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
|
load the weights associated with the model, only the configuration. Check out the
|
|
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
|
"""
|
|
|
|
|
|
@add_start_docstrings(
|
|
"The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
|
|
LLAVA_START_DOCSTRING,
|
|
)
|
|
class LlavaPreTrainedModel(PreTrainedModel):
|
|
config_class = LlavaConfig
|
|
base_model_prefix = "model"
|
|
supports_gradient_checkpointing = True
|
|
_no_split_modules = ["LlavaVisionAttention"]
|
|
_skip_keys_device_placement = "past_key_values"
|
|
_supports_cache_class = True
|
|
_supports_flash_attn_2 = True
|
|
_supports_sdpa = True
|
|
_supports_quantized_cache = True
|
|
_supports_static_cache = True
|
|
|
|
def _init_weights(self, module):
|
|
# important: this ported version of Llava isn't meant for training from scratch - only
|
|
# inference and fine-tuning - so the proper init weights code has been removed - the original codebase
|
|
# https://github.com/haotian-liu/LLaVA/tree/main/llava should serve for that purpose
|
|
std = (
|
|
self.config.initializer_range
|
|
if hasattr(self.config, "initializer_range")
|
|
else self.config.text_config.initializer_range
|
|
)
|
|
|
|
if hasattr(module, "class_embedding"):
|
|
module.class_embedding.data.normal_(mean=0.0, std=std)
|
|
|
|
if isinstance(module, (nn.Linear, nn.Conv2d)):
|
|
module.weight.data.normal_(mean=0.0, std=std)
|
|
if module.bias is not None:
|
|
module.bias.data.zero_()
|
|
elif isinstance(module, nn.Embedding):
|
|
module.weight.data.normal_(mean=0.0, std=std)
|
|
if module.padding_idx is not None:
|
|
module.weight.data[module.padding_idx].zero_()
|
|
|
|
|
|
LLAVA_INPUTS_DOCSTRING = r"""
|
|
Args:
|
|
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
|
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
|
it.
|
|
|
|
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
|
[`PreTrainedTokenizer.__call__`] for details.
|
|
|
|
[What are input IDs?](../glossary#input-ids)
|
|
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)):
|
|
The tensors corresponding to the input images. Pixel values can be obtained using
|
|
[`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details ([]`LlavaProcessor`] uses
|
|
[`CLIPImageProcessor`] for processing images).
|
|
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
|
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
|
|
|
- 1 for tokens that are **not masked**,
|
|
- 0 for tokens that are **masked**.
|
|
|
|
[What are attention masks?](../glossary#attention-mask)
|
|
|
|
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
|
[`PreTrainedTokenizer.__call__`] for details.
|
|
|
|
If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
|
|
`past_key_values`).
|
|
|
|
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
|
|
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
|
|
information on the default strategy.
|
|
|
|
- 1 indicates the head is **not masked**,
|
|
- 0 indicates the head is **masked**.
|
|
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
|
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
|
config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
|
|
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
|
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
|
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
|
|
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
|
|
|
|
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
|
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
|
|
|
|
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
|
|
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
|
|
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
|
|
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
|
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
|
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
|
model's internal embedding lookup matrix.
|
|
vision_feature_layer (`Union[int, List[int]], *optional*, defaults to -2`):
|
|
The index of the layer to select the vision feature. If multiple indices are provided,
|
|
the vision feature of the corresponding indices will be concatenated to form the
|
|
vision features.
|
|
vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`):
|
|
The feature selection strategy used to select the vision feature from the vision backbone.
|
|
Can be one of `"default"` or `"full"`.
|
|
use_cache (`bool`, *optional*):
|
|
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
|
`past_key_values`).
|
|
output_attentions (`bool`, *optional*):
|
|
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
|
tensors for more detail.
|
|
output_hidden_states (`bool`, *optional*):
|
|
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
|
more detail.
|
|
return_dict (`bool`, *optional*):
|
|
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
|
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
|
|
Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
|
|
this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
|
|
the complete sequence length.
|
|
"""
|
|
|
|
|
|
@add_start_docstrings(
|
|
"""The LLAVA model which consists of a vision backbone and a language model.""",
|
|
LLAVA_START_DOCSTRING,
|
|
)
|
|
class LlavaForConditionalGeneration(LlavaPreTrainedModel, GenerationMixin):
|
|
def __init__(self, config: LlavaConfig):
|
|
super().__init__(config)
|
|
self.vision_tower = AutoModel.from_config(config.vision_config)
|
|
|
|
self.multi_modal_projector = LlavaMultiModalProjector(config)
|
|
self.vocab_size = config.text_config.vocab_size
|
|
self.language_model = AutoModelForCausalLM.from_config(config.text_config)
|
|
|
|
if self.language_model._tied_weights_keys is not None:
|
|
self._tied_weights_keys = [f"language_model.{k}" for k in self.language_model._tied_weights_keys]
|
|
|
|
self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1
|
|
|
|
self.post_init()
|
|
|
|
def get_input_embeddings(self):
|
|
return self.language_model.get_input_embeddings()
|
|
|
|
def set_input_embeddings(self, value):
|
|
self.language_model.set_input_embeddings(value)
|
|
|
|
def get_output_embeddings(self):
|
|
return self.language_model.get_output_embeddings()
|
|
|
|
def set_output_embeddings(self, new_embeddings):
|
|
self.language_model.set_output_embeddings(new_embeddings)
|
|
|
|
def set_decoder(self, decoder):
|
|
self.language_model.set_decoder(decoder)
|
|
|
|
def get_decoder(self):
|
|
return self.language_model.get_decoder()
|
|
|
|
def get_image_features(
|
|
self,
|
|
pixel_values: torch.FloatTensor,
|
|
vision_feature_layer: Union[int, List[int]],
|
|
vision_feature_select_strategy: str,
|
|
**kwargs,
|
|
):
|
|
"""
|
|
Obtains image last hidden states from the vision tower and apply multimodal projection.
|
|
|
|
Args:
|
|
pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`)
|
|
The tensors corresponding to the input images.
|
|
vision_feature_layer (`Union[int, List[int]]`):
|
|
The index of the layer to select the vision feature. If multiple indices are provided,
|
|
the vision feature of the corresponding indices will be concatenated to form the
|
|
vision features.
|
|
vision_feature_select_strategy (`str`):
|
|
The feature selection strategy used to select the vision feature from the vision backbone.
|
|
Can be one of `"default"` or `"full"`
|
|
Returns:
|
|
image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).
|
|
"""
|
|
if vision_feature_select_strategy not in ["default", "full"]:
|
|
raise ValueError(f"Unexpected select feature strategy: {self.config.vision_feature_select_strategy}")
|
|
|
|
kwargs = {k: v for k, v in kwargs.items() if v is not None}
|
|
# this is not memory efficient at all (output_hidden_states=True) will save all the hidden states.
|
|
image_outputs = self.vision_tower(pixel_values, output_hidden_states=True, **kwargs)
|
|
|
|
# If we have one vision feature layer, return the corresponding hidden states,
|
|
# otherwise, select the hidden states of each feature layer and concatenate them
|
|
if isinstance(vision_feature_layer, int):
|
|
selected_image_feature = image_outputs.hidden_states[vision_feature_layer]
|
|
if vision_feature_select_strategy == "default":
|
|
selected_image_feature = selected_image_feature[:, 1:]
|
|
else:
|
|
hs_pool = [image_outputs.hidden_states[layer_idx] for layer_idx in vision_feature_layer]
|
|
# For default; crop CLS from each hidden state in the hidden state pool
|
|
if vision_feature_select_strategy == "default":
|
|
hs_pool = [hs[:, 1:] for hs in hs_pool]
|
|
selected_image_feature = torch.cat(hs_pool, dim=-1)
|
|
|
|
image_features = self.multi_modal_projector(selected_image_feature)
|
|
return image_features
|
|
|
|
|
|
def _merge_input_ids_with_image_features(self, image_features, inputs_embeds, input_ids, attention_mask, labels):
|
|
num_images, num_image_patches, embed_dim = image_features.shape
|
|
batch_size, sequence_length = input_ids.shape
|
|
left_padding = not torch.sum(input_ids[:, -1] == torch.tensor(self.pad_token_id))
|
|
# 1. Create a mask to know where special image tokens are
|
|
special_image_token_mask = input_ids == self.config.image_token_index
|
|
num_special_image_tokens = torch.sum(special_image_token_mask, dim=-1)
|
|
# Compute the maximum embed dimension
|
|
max_embed_dim = (num_special_image_tokens.max() * (num_image_patches - 1)) + sequence_length
|
|
batch_indices, non_image_indices = torch.where(input_ids != self.config.image_token_index)
|
|
|
|
# 2. Compute the positions where text should be written
|
|
# Calculate new positions for text tokens in merged image-text sequence.
|
|
# `special_image_token_mask` identifies image tokens. Each image token will be replaced by `nb_text_tokens_per_images - 1` text tokens.
|
|
# `torch.cumsum` computes how each image token shifts subsequent text token positions.
|
|
# - 1 to adjust for zero-based indexing, as `cumsum` inherently increases indices by one.
|
|
new_token_positions = torch.cumsum((special_image_token_mask * (num_image_patches - 1) + 1), -1) - 1
|
|
nb_image_pad = max_embed_dim - 1 - new_token_positions[:, -1]
|
|
if left_padding:
|
|
new_token_positions += nb_image_pad[:, None] # offset for left padding
|
|
text_to_overwrite = new_token_positions[batch_indices, non_image_indices]
|
|
|
|
# 3. Create the full embedding, already padded to the maximum position
|
|
final_embedding = torch.zeros(
|
|
batch_size, max_embed_dim, embed_dim, dtype=inputs_embeds.dtype, device=inputs_embeds.device
|
|
)
|
|
final_attention_mask = torch.zeros(
|
|
batch_size, max_embed_dim, dtype=attention_mask.dtype, device=inputs_embeds.device
|
|
)
|
|
if labels is not None:
|
|
final_labels = torch.full(
|
|
(batch_size, max_embed_dim), self.config.ignore_index, dtype=input_ids.dtype, device=input_ids.device
|
|
)
|
|
# In case the Vision model or the Language model has been offloaded to CPU, we need to manually
|
|
# set the corresponding tensors into their correct target device.
|
|
target_device = inputs_embeds.device
|
|
batch_indices, non_image_indices, text_to_overwrite = (
|
|
batch_indices.to(target_device),
|
|
non_image_indices.to(target_device),
|
|
text_to_overwrite.to(target_device),
|
|
)
|
|
attention_mask = attention_mask.to(target_device)
|
|
|
|
# 4. Fill the embeddings based on the mask. If we have ["hey" "<image>", "how", "are"]
|
|
# we need to index copy on [0, 577, 578, 579] for the text and [1:576] for the image features
|
|
final_embedding[batch_indices, text_to_overwrite] = inputs_embeds[batch_indices, non_image_indices]
|
|
final_attention_mask[batch_indices, text_to_overwrite] = attention_mask[batch_indices, non_image_indices]
|
|
if labels is not None:
|
|
final_labels[batch_indices, text_to_overwrite] = labels[batch_indices, non_image_indices]
|
|
|
|
# 5. Fill the embeddings corresponding to the images. Anything that is not `text_positions` needs filling (#29835)
|
|
image_to_overwrite = torch.full(
|
|
(batch_size, max_embed_dim), True, dtype=torch.bool, device=inputs_embeds.device
|
|
)
|
|
image_to_overwrite[batch_indices, text_to_overwrite] = False
|
|
image_to_overwrite &= image_to_overwrite.cumsum(-1) - 1 >= nb_image_pad[:, None].to(target_device)
|
|
|
|
if image_to_overwrite.sum() != image_features.shape[:-1].numel():
|
|
raise ValueError(
|
|
f"The input provided to the model are wrong. The number of image tokens is {torch.sum(special_image_token_mask)} while"
|
|
f" the number of image given to the model is {num_images}. This prevents correct indexing and breaks batch generation."
|
|
)
|
|
|
|
final_embedding[image_to_overwrite] = image_features.contiguous().reshape(-1, embed_dim).to(target_device)
|
|
final_attention_mask |= image_to_overwrite
|
|
position_ids = (final_attention_mask.cumsum(-1) - 1).masked_fill_((final_attention_mask == 0), 1)
|
|
|
|
# 6. Mask out the embedding at padding positions, as we later use the past_key_value value to determine the non-attended tokens.
|
|
batch_indices, pad_indices = torch.where(input_ids == self.pad_token_id)
|
|
indices_to_mask = new_token_positions[batch_indices, pad_indices]
|
|
|
|
final_embedding[batch_indices, indices_to_mask] = 0
|
|
|
|
if labels is None:
|
|
final_labels = None
|
|
|
|
return final_embedding, final_attention_mask, final_labels, position_ids
|
|
|
|
# @deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
|
# @add_start_docstrings_to_model_forward(LLAVA_INPUTS_DOCSTRING)
|
|
# @replace_return_docstrings(output_type=LlavaCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
|
def forward(
|
|
self,
|
|
input_ids: torch.LongTensor = None,
|
|
pixel_values: torch.FloatTensor = None,
|
|
attention_mask: Optional[torch.Tensor] = None,
|
|
position_ids: Optional[torch.LongTensor] = None,
|
|
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
|
inputs_embeds: Optional[torch.FloatTensor] = None,
|
|
vision_feature_layer: Optional[int] = None,
|
|
vision_feature_select_strategy: Optional[str] = None,
|
|
labels: Optional[torch.LongTensor] = None,
|
|
use_cache: Optional[bool] = None,
|
|
output_attentions: Optional[bool] = None,
|
|
output_hidden_states: Optional[bool] = None,
|
|
return_dict: Optional[bool] = None,
|
|
cache_position: Optional[torch.LongTensor] = None,
|
|
num_logits_to_keep: int = 0,
|
|
):
|
|
from transformers.models.llava.modeling_llava import LlavaCausalLMOutputWithPast
|
|
|
|
|
|
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
|
output_hidden_states = (
|
|
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
|
)
|
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
vision_feature_layer = (
|
|
vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
|
|
)
|
|
vision_feature_select_strategy = (
|
|
vision_feature_select_strategy
|
|
if vision_feature_select_strategy is not None
|
|
else self.config.vision_feature_select_strategy
|
|
)
|
|
|
|
if (input_ids is None) ^ (inputs_embeds is not None):
|
|
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
|
|
|
|
if pixel_values is not None and inputs_embeds is not None:
|
|
raise ValueError(
|
|
"You cannot specify both pixel_values and inputs_embeds at the same time, and must specify either one"
|
|
)
|
|
|
|
if inputs_embeds is None:
|
|
inputs_embeds = self.get_input_embeddings()(input_ids)
|
|
|
|
image_features = None
|
|
if pixel_values is not None:
|
|
image_features = self.get_image_features(
|
|
pixel_values=pixel_values,
|
|
vision_feature_layer=vision_feature_layer,
|
|
vision_feature_select_strategy=vision_feature_select_strategy,
|
|
)
|
|
|
|
|
|
inputs_embeds, attention_mask, labels, position_ids = self._merge_input_ids_with_image_features(
|
|
image_features, inputs_embeds, input_ids, attention_mask, labels
|
|
)
|
|
cache_position = torch.arange(attention_mask.shape[1], device=attention_mask.device)
|
|
|
|
|
|
outputs = self.language_model(
|
|
attention_mask=attention_mask,
|
|
position_ids=position_ids,
|
|
past_key_values=past_key_values,
|
|
inputs_embeds=inputs_embeds,
|
|
use_cache=use_cache,
|
|
output_attentions=output_attentions,
|
|
output_hidden_states=output_hidden_states,
|
|
return_dict=return_dict,
|
|
cache_position=cache_position,
|
|
num_logits_to_keep=num_logits_to_keep,
|
|
)
|
|
|
|
logits = outputs[0]
|
|
|
|
loss = None
|
|
|
|
if not return_dict:
|
|
output = (logits,) + outputs[1:]
|
|
return (loss,) + output if loss is not None else output
|
|
|
|
return LlavaCausalLMOutputWithPast(
|
|
loss=loss,
|
|
logits=logits,
|
|
past_key_values=outputs.past_key_values,
|
|
hidden_states=outputs.hidden_states,
|
|
attentions=outputs.attentions,
|
|
image_hidden_states=image_features if pixel_values is not None else None,
|
|
)
|
|
|
|
def prepare_inputs_for_generation(
|
|
self,
|
|
input_ids,
|
|
past_key_values=None,
|
|
inputs_embeds=None,
|
|
pixel_values=None,
|
|
attention_mask=None,
|
|
cache_position=None,
|
|
logits_to_keep=None,
|
|
**kwargs,
|
|
):
|
|
# Overwritten -- in specific circumstances we don't want to forward image inputs to the model
|
|
|
|
model_inputs = self.language_model.prepare_inputs_for_generation(
|
|
input_ids,
|
|
past_key_values=past_key_values,
|
|
inputs_embeds=inputs_embeds,
|
|
attention_mask=attention_mask,
|
|
cache_position=cache_position,
|
|
logits_to_keep=logits_to_keep,
|
|
**kwargs,
|
|
)
|
|
|
|
if cache_position[0] == 0:
|
|
# If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore
|
|
# Otherwise we need pixel values to be passed to model
|
|
model_inputs["pixel_values"] = pixel_values
|
|
|
|
return model_inputs
|
|
|
|
|
|
__all__ = ["LlavaForConditionalGeneration", "LlavaPreTrainedModel"]
|