代码拉取完成,页面将自动刷新
import gradio
from transformers import AutoTokenizer, AutoModel
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_community.llms import HuggingFacePipeline
from transformers import pipeline
import time
from langchain.prompts import PromptTemplate
from transformers import AutoModelForCausalLM
# from ChatGLM_new import zhipu_llm
# model = zhipu_llm
# model = HuggingFacePipeline.from_model_id(
# model_id="THUDM/chatglm3-6b",
# task="text-generation",
# device=0,
# model_kwargs={"trust_remote_code":True},
# pipeline_kwargs={"max_new_tokens": 5000},
# )
model_id = "THUDM/chatglm3-6b"
model = AutoModelForCausalLM.from_pretrained(
model_id,
torch_dtype="auto",
trust_remote_code=True ,
# device="0",
temperature= 0.9,
do_sample= True,
cache_dir="D:/chatglm3-6b/"
).half().cuda()
model = model.eval()
tokenizer = AutoTokenizer.from_pretrained(model_id,
trust_remote_code=True,
cache_dir="D:/chatglm3-6b/")
pipe = pipeline(
"text-generation", model=model, tokenizer=tokenizer, max_new_tokens=8000
)
hf = HuggingFacePipeline(pipeline=pipe)
prompt = ChatPromptTemplate.from_template("{user_input}")
# prompt_template = PromptTemplate.from_template(template, **kwargs)
# message = HumanMessagePromptTemplate(prompt=prompt_template)
# return cls.from_messages([message])
# prompt = ChatPromptTemplate.from_messages([
# ("system", "记住:对所有问题你只回答下面的4个字:我不知道,"),
# # ("human", "Hello, how are you doing?"),
# # ("ai", "I'm doing well, thanks!"),
# ("human", "{user_input}"),
# ])
output_parser = StrOutputParser()
chain = prompt | hf | output_parser
def greet(name):
response = chain.invoke({"user_input": name})
return response
demo = gradio.Interface(fn=greet, inputs="text", outputs="text")
demo.launch(server_name="0.0.0.0")
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。