关于 glm4-9b 下载到本地后,加载到langchain 中,也会报错,跟 qwen-7b-chat 一样的,对glm参考解决方案:self-llm/GLM-4/02-GLM-4-9B-chat langchain 接入.md at master · datawhalechina/self-llm · GitHub
- from langchain.llms.base import LLM
- from typing import Any, List, Optional, Dict
- from langchain.callbacks.manager import CallbackManagerForLLMRun
- from transformers import AutoTokenizer, AutoModelForCausalLM
- import torch
-
- class ChatGLM4_LLM(LLM):
- # 基于本地 ChatGLM4 自定义 LLM 类
- tokenizer: AutoTokenizer = None
- model: AutoModelForCausalLM = None
- gen_kwargs: dict = None
-
- def __init__(self, mode_name_or_path: str, gen_kwargs: dict = None):
- super().__init__()
- print("正在从本地加载模型...")
- self.tokenizer = AutoTokenizer.from_pretrained(
- mode_name_or_path, trust_remote_code=True
- )
- self.model = AutoModelForCausalLM.from_pretrained(
- mode_name_or_path,
- torch_dtype=torch.bfloat16,
- trust_remote_code=True,
- device_map="auto"
- ).eval()
- print("完成本地模型的加载")
-
- if gen_kwargs is None:
- gen_kwargs = {"max_length": 2500, "do_sample": True, "top_k": 1}
- self.gen_kwargs = gen_kwargs
-
- def _call(self, prompt: str, stop: Optional[List[str]] = None,
- run_manager: Optional[CallbackManagerForLLMRun] = None,
- **kwargs: Any) -> str:
- messages = [{"role": "user", "content": prompt}]
- model_inputs = self.tokenizer.apply_chat_template(
- messages, tokenize=True, return_tensors="pt", return_dict=True, add_generation_prompt=True
- )
- generated_ids = self.model.generate(**model_inputs, **self.gen_kwargs)
- generated_ids = [
- output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs['input_ids'], generated_ids)
- ]
- response = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
- return response
-
- @property
- def _identifying_params(self) -> Dict[str, Any]:
- """返回用于识别LLM的字典,这对于缓存和跟踪目的至关重要。"""
- return {
- "model_name": "glm-4-9b-chat",
- "max_length": self.gen_kwargs.get("max_length"),
- "do_sample": self.gen_kwargs.get("do_sample"),
- "top_k": self.gen_kwargs.get("top_k"),
- }
-
- @property
- def _llm_type(self) -> str:
- return "glm-4-9b-chat"
- from LLM import ChatGLM4_LLM
- gen_kwargs = {"max_length": 2500, "do_sample": True, "top_k": 1}
- llm = ChatGLM4_LLM(mode_name_or_path="/root/autodl-tmp/ZhipuAI/glm-4-9b-chat", gen_kwargs=gen_kwargs)
- print(llm.invoke("你是谁"))
langchian 使用已经下载到本地的模型,我们使用通义千问
显存:24G
模型:qwen1.5-7B-Chat,qwen-7B-Chat
先使用 qwen-7B-Chat,会报错用不了:
看了下是不支持这中模型,但看列表中有一个 Qwen 字样,想着应该是支持的,就去 hugging face 搜了下这个东西 “Qwen2”找到了对应的 qwen1.5-7B-Chat 模型
https://huggingface.co/Qwen/Qwen1.5-7B-Chat
其实也就是一种公测版本,,所以总结来说目前直接导入本地 通义千问 langchaing 支持不是很好,可以使用 ollama,但这个下载非常慢,还会失败
qwen1.5-7B-Chat 我们用这个模型,是可以加载成功的,并输出的,但是非常非常慢
- from transformers import AutoTokenizer, AutoModelForCausalLM
- from transformers import pipeline
- from langchain import HuggingFacePipeline
- from langchain_core.prompts import ChatPromptTemplate
- import torch
-
- device = torch.device("cuda")
-
- model_path = "/root/autodl-tmp/Qwen1___5-7B-Chat"
- tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
- model = AutoModelForCausalLM.from_pretrained(
- model_path,
- device_map='auto',
- trust_remote_code=True
- ).to(device).eval()
- pipe = pipeline(
- "text-generation",
- model=model.to(device),
- tokenizer=tokenizer,
- # max_length=4096,
- # max_tokens=4096,
- max_new_tokens=512,
- top_p=1,
- repetition_penalty=1.15
- )
- llama_model = HuggingFacePipeline(pipeline=pipe)
-
- prompt = ChatPromptTemplate.from_template("请编写一篇关于{topic}的中文小故事,不超过100字")
- chain = prompt | llama_model
- res = chain.invoke({"topic": "小白兔"})
- print(res)
qwen-14b-chat 可以运行
指定 gpu,必须指定到开头,langchain 前面
- import os
- os.environ["CUDA_VISIBLE_DEVICES"] = "5,6"
-
- from transformers import AutoTokenizer, AutoModelForCausalLM
- from transformers import pipeline
- from langchain import HuggingFacePipeline
- from langchain_core.prompts import ChatPromptTemplate
-
-
- tokenizer = AutoTokenizer.from_pretrained("/home/qwen-14b-chat/",
- trust_remote_code=True)
- model = AutoModelForCausalLM.from_pretrained("/home/qwen-14b-chat/",
- device_map="auto",
- trust_remote_code=True).eval()
-
- pipe = pipeline(
- "text-generation",
- model=model,
- tokenizer=tokenizer,
- # max_length=4096,
- # max_tokens=4096,
- max_new_tokens=512,
- top_p=1,
- repetition_penalty=1.15
- )
- llama_model = HuggingFacePipeline(pipeline=pipe)
-
- prompt = ChatPromptTemplate.from_template("请编写一篇关于{topic}的中文小故事,不超过100字")
- chain = prompt | llama_model
- res = chain.invoke({"topic": "小白兔"})
- print(res)