代码拉取完成,页面将自动刷新
from transformers import AutoTokenizer, AutoModel
import time
from langchain_community.llms import HuggingFacePipeline
from transformers import pipeline
from langchain.prompts import PromptTemplate
from langchain_community.llms import Tongyi
import ChatGLM
# import os
# os.environ["DASHSCOPE_API_KEY"] = "sk-cc1c8314fdbd43ceaf26ec1824d5dd3b"
# model = Tongyi()
# model= ChatGLM.ChatGLM_LLM()
# tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm3-6b", trust_remote_code=True)
# model = AutoModel.from_pretrained("THUDM/chatglm3-6b", trust_remote_code=True).half().cuda()
# model = model.eval()
pipeline = pipeline("text-generation",
model="THUDM/chatglm3-6b",
# device="cuda:0",
# trust_remote_code=True
)
print(pipeline("我今天考了"))
# model = HuggingFacePipeline(pipeline=pipeline)
# template = """Question: {question}
# Answer: Let's think step by step."""
# prompt = PromptTemplate.from_template(template)
# chain = prompt | model
# question = "What is the result of 1+ 1?"
# print(chain.invoke({"question": question}))
# response, history = model.chat(tokenizer, "你好", history=[])
# print(response)
# response, history = model.chat(tokenizer, "晚上睡不着应该怎么办", history=history)
# print(response)
# 使用 range 函数生成一个从0到9的整数序列,共10个数字
# for i in range(10):
# # 在这里写入需要循环执行的代码
# a = time.time()
# response = chain.invoke({"question": "我今天考了一百分"})
# print(response)
# print(time.time()-a)
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。