Fails to generate with `inputs_embeds`

#18
by JaronTHU - opened

I can run the following code successfully with transformers==4.42.3

from transformers import AutoTokenizer, AutoModelForCausalLM
import torch

tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-9b-it")
model = AutoModelForCausalLM.from_pretrained(
    "google/gemma-2-9b-it",
    device_map="auto",
    torch_dtype=torch.bfloat16
)

input_text = "Write me a poem about Machine Learning."
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")

outputs = model.generate(**input_ids)
print(tokenizer.decode(outputs[0]))

However, when I try to replace input_ids with inputs_embeds:

from transformers import AutoTokenizer, AutoModelForCausalLM
import torch

tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-9b-it")
model = AutoModelForCausalLM.from_pretrained(
    "google/gemma-2-9b-it",
    device_map="auto",
    torch_dtype=torch.bfloat16
)

input_text = "Write me a poem about Machine Learning."
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")

+  input_ids['inputs_embeds'] = model.get_input_embeddings()(input_ids.pop('input_ids'))

outputs = model.generate(**input_ids)
print(tokenizer.decode(outputs[0]))

I get the following error

Traceback (most recent call last):
  File "/workspace/tmp1.py", line 22, in <module>
    outputs = model.generate(**input_ids)
  File "/data/miniconda3/envs/env-3.10.6/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 115, in decorate_context
    return func(*args, **kwargs)
  File "/usr/local/app/.local/lib/python3.10/site-packages/transformers/generation/utils.py", line 1914, in generate
    result = self._sample(
  File "/usr/local/app/.local/lib/python3.10/site-packages/transformers/generation/utils.py", line 2651, in _sample
    outputs = self(
  File "/data/miniconda3/envs/env-3.10.6/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
  File "/data/miniconda3/envs/env-3.10.6/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
    return forward_call(*args, **kwargs)
  File "/usr/local/app/.local/lib/python3.10/site-packages/transformers/models/gemma2/modeling_gemma2.py", line 1068, in forward
    outputs = self.model(
  File "/data/miniconda3/envs/env-3.10.6/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
  File "/data/miniconda3/envs/env-3.10.6/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
    return forward_call(*args, **kwargs)
  File "/usr/local/app/.local/lib/python3.10/site-packages/transformers/models/gemma2/modeling_gemma2.py", line 908, in forward
    layer_outputs = decoder_layer(
  File "/data/miniconda3/envs/env-3.10.6/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
  File "/data/miniconda3/envs/env-3.10.6/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
    return forward_call(*args, **kwargs)
  File "/usr/local/app/.local/lib/python3.10/site-packages/transformers/models/gemma2/modeling_gemma2.py", line 650, in forward
    hidden_states, self_attn_weights, present_key_value = self.self_attn(
  File "/data/miniconda3/envs/env-3.10.6/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
  File "/data/miniconda3/envs/env-3.10.6/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
    return forward_call(*args, **kwargs)
  File "/usr/local/app/.local/lib/python3.10/site-packages/transformers/models/gemma2/modeling_gemma2.py", line 252, in forward
    key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
  File "/usr/local/app/.local/lib/python3.10/site-packages/transformers/cache_utils.py", line 1071, in update
    return update_fn(
  File "/usr/local/app/.local/lib/python3.10/site-packages/transformers/cache_utils.py", line 1046, in _static_update
    k_out[:, :, cache_position] = key_states
IndexError: index 11 is out of bounds for dimension 0 with size 11

This error does not happen if I replace google/gemma-2-9b-it with other LLMs, e.g., mistralai/Mistral-7B-Instruct-v0.3. How to fix it?

Sign up or log in to comment