transformers调用llama的方式

transformers调用llama的使用方式

不同版本llama对应的transformers库版本

python 复制代码
# llama2
pip install torch==1.13.1+cu116 torchvision==0.14.1+cu116 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu116
pip install transformers==4.32.1
pip install accelerate==0.22.0
# llama3
pip install torch==1.13.1+cu116 torchvision==0.14.1+cu116 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu116
pip install transformers==4.35.0
pip install accelerate==0.22.0
# llama3.1
pip install torch==2.1.2 torchvision==0.16.2 torchaudio==2.1.2 --index-url https://download.pytorch.org/whl/cu118
pip install transformers==4.43.1
pip install accelerate==0.22.0

llama2

待补充

llama3

Meta-Llama-3-8B-Instruct

可用于QA,summarize,示例代码

python 复制代码
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch

model_id = "meta-llama/Meta-Llama-3-8B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
    model_id,
    torch_dtype=torch.bfloat16,
    device_map='cuda',
)

messages = [
    {"role": "system", "content": "You are an assistant who provides precise and direct answers."},
    {"role": "user", "content": "In the sentence 'A boy is playing football', what is the exact action activity described? Provide only the exact phrase."},
]
input_ids = tokenizer.apply_chat_template(
    messages,
    add_generation_prompt=True,
    return_tensors="pt"
).to(model.device)

terminators = [
    tokenizer.eos_token_id,
    tokenizer.convert_tokens_to_ids("<|eot_id|>")
]

outputs = model.generate(
    input_ids,
    max_new_tokens=20,
    eos_token_id=terminators,
    do_sample=False,
    temperature=0.0,
    top_p=1.0,
)
response = outputs[0][input_ids.shape[-1]:]
print(tokenizer.decode(response, skip_special_tokens=True)) # 输出 "playing football"

Meta-Llama-3-8B

可用于文本生成,使用体验一般

python 复制代码
import transformers
import torch
from transformers import AutoTokenizer
model_id = "/home/mayunchuan/.cache/huggingface/transformers/meta-llama/Meta-Llama-3-8B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
pipeline = transformers.pipeline(
    "text-generation", model=model_id, model_kwargs={"torch_dtype": torch.bfloat16}, device_map="cuda",max_length=40,
    num_return_sequences=1,
    eos_token_id=tokenizer.eos_token_id
)
result = pipeline("Hey how are you doing today?")
print(result) # 输出 [{'generated_text': 'Hey how are you doing today? I am doing well. I am a little bit tired because I have been working a lot. I am a little bit tired because I have been working a lot.'}]

llama3.1

Meta-Llama-3.1-8B-Instruct

可用于QA,summarize,可使用llama3-chat同样的示例代码

python 复制代码
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch

model_id = "meta-llama/Meta-Llama-3.1-8B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
    model_id,
    torch_dtype=torch.bfloat16,
    device_map='cuda',
)

messages = [
    {"role": "system", "content": "You are an assistant who provides precise and direct answers."},
    {"role": "user", "content": "In the sentence 'A boy is playing football', what is the exact action activity described? Provide only the exact phrase."},
]
input_ids = tokenizer.apply_chat_template(
    messages,
    add_generation_prompt=True,
    return_tensors="pt"
).to(model.device)

terminators = [
    tokenizer.eos_token_id,
    tokenizer.convert_tokens_to_ids("<|eot_id|>")
]

outputs = model.generate(
    input_ids,
    max_new_tokens=20,
    eos_token_id=terminators,
    do_sample=False,
    temperature=0.0,
    top_p=1.0,
)
response = outputs[0][input_ids.shape[-1]:]
print(tokenizer.decode(response, skip_special_tokens=True)) # 输出 Playing football.

也可以使用另一个demo

python 复制代码
import transformers
import torch
from transformers import AutoTokenizer
model_id = "meta-llama/Meta-Llama-3.1-8B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
pipeline = transformers.pipeline(
    "text-generation", model=model_id, model_kwargs={"torch_dtype": torch.bfloat16}, device_map="cuda",max_length=35,
    num_return_sequences=1,
    eos_token_id=tokenizer.eos_token_id
)
result = pipeline("who are you?")
print(result)

import transformers
import torch

model_id = "meta-llama/Meta-Llama-3.1-8B-Instruct"

pipeline = transformers.pipeline(
    "text-generation",
    model=model_id,
    model_kwargs={"torch_dtype": torch.bfloat16},
    device_map="auto",
)

messages = [
    {"role": "system", "content": "You are an assistant who provides precise and direct answers."},
    {"role": "user", "content": "In the sentence 'A boy is playing football', what is the exact action activity described? Provide only the exact phrase."},
]
outputs = pipeline(
    messages,
    max_new_tokens=256,
)
print(outputs[0]["generated_text"][-1]) # 输出 {'role': 'assistant', 'content': 'Playing football.'}
相关推荐
孤独且没人爱的纸鹤10 分钟前
【机器学习】深入无监督学习分裂型层次聚类的原理、算法结构与数学基础全方位解读,深度揭示其如何在数据空间中构建层次化聚类结构
人工智能·python·深度学习·机器学习·支持向量机·ai·聚类
Galerkin码农选手23 分钟前
寒武纪使用cnnl库函数实现卷积算子
pytorch
小深ai硬件分享2 小时前
Keras、TensorFlow、PyTorch框架对比及服务器配置揭秘
服务器·人工智能·深度学习
盼小辉丶9 小时前
TensorFlow深度学习实战——情感分析模型
深度学习·神经网络·tensorflow
好评笔记9 小时前
AIGC视频生成模型:Stability AI的SVD(Stable Video Diffusion)模型
论文阅读·人工智能·深度学习·机器学习·计算机视觉·面试·aigc
AI街潜水的八角10 小时前
工业缺陷检测实战——基于深度学习YOLOv10神经网络PCB缺陷检测系统
pytorch·深度学习·yolo
坐吃山猪14 小时前
机器学习10-解读CNN代码Pytorch版
pytorch·机器学习·cnn
池央14 小时前
DCGAN - 深度卷积生成对抗网络:基于卷积神经网络的GAN
深度学习·生成对抗网络·cnn
scdifsn15 小时前
动手学深度学习11.6. 动量法-笔记&练习(PyTorch)
pytorch·笔记·深度学习
羊小猪~~15 小时前
深度学习基础--LSTM学习笔记(李沐《动手学习深度学习》)
人工智能·rnn·深度学习·学习·机器学习·gru·lstm