Traceback (most recent call last):
File "/mnt2/wsj/table/basetest/test_single.py", line 243, in <module>
QWen2VL()
File "/mnt2/wsj/table/basetest/test_single.py", line 116, in QWen2VL
generated_ids = model.generate(**inputs, max_new_tokens=512)
File "/home/turing1/miniconda3/envs/MiniCPMV_wsj/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 116, in decorate_context
return func(*args, **kwargs)
File "/home/turing1/miniconda3/envs/MiniCPMV_wsj/lib/python3.10/site-packages/transformers/generation/utils.py", line 2048, in generate
result = self._sample(
File "/home/turing1/miniconda3/envs/MiniCPMV_wsj/lib/python3.10/site-packages/transformers/generation/utils.py", line 3044, in _sample
next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
RuntimeError: CUDA error: device-side assert triggered
Compile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.
试试看只使用一个gpu
python
model = Qwen2VLForConditionalGeneration.from_pretrained(
weight_path,
torch_dtype=torch.bfloat16,
device_map="sequential",
)
processor = AutoProcessor.from_pretrained(weight_path)
python
device = torch.device('cuda:0')
text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
image_inputs, video_inputs = process_vision_info(messages)
inputs = processor(
text=[text],
images=image_inputs,
videos=video_inputs,
padding=True,
return_tensors="pt",
).to(device)
generated_ids = model.generate(**inputs, max_new_tokens=512)
generated_ids_trimmed = [
out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_text = processor.batch_decode(
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)