Langchain-Chatchat
123 строки · 4.3 Кб
1import sys2from fastchat.conversation import Conversation3from server.model_workers.base import *4from server.utils import get_httpx_client5from fastchat import conversation as conv6import json, httpx7from typing import List, Dict8from configs import logger, log_verbose9
10
11class GeminiWorker(ApiModelWorker):12def __init__(13self,14*,15controller_addr: str = None,16worker_addr: str = None,17model_names: List[str] = ["gemini-api"],18**kwargs,19):20kwargs.update(model_names=model_names, controller_addr=controller_addr, worker_addr=worker_addr)21kwargs.setdefault("context_len", 4096)22super().__init__(**kwargs)23
24def create_gemini_messages(self, messages) -> json:25has_history = any(msg['role'] == 'assistant' for msg in messages)26gemini_msg = []27
28for msg in messages:29role = msg['role']30content = msg['content']31if role == 'system':32continue33if has_history:34if role == 'assistant':35role = "model"36transformed_msg = {"role": role, "parts": [{"text": content}]}37else:38if role == 'user':39transformed_msg = {"parts": [{"text": content}]}40
41gemini_msg.append(transformed_msg)42
43msg = dict(contents=gemini_msg)44return msg45
46def do_chat(self, params: ApiChatParams) -> Dict:47params.load_config(self.model_names[0])48data = self.create_gemini_messages(messages=params.messages)49generationConfig = dict(50temperature=params.temperature,51topK=1,52topP=1,53maxOutputTokens=4096,54stopSequences=[]55)56
57data['generationConfig'] = generationConfig58url = "https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent" + '?key=' + params.api_key59headers = {60'Content-Type': 'application/json',61}62if log_verbose:63logger.info(f'{self.__class__.__name__}:url: {url}')64logger.info(f'{self.__class__.__name__}:headers: {headers}')65logger.info(f'{self.__class__.__name__}:data: {data}')66
67text = ""68json_string = ""69timeout = httpx.Timeout(60.0)70client = get_httpx_client(timeout=timeout)71with client.stream("POST", url, headers=headers, json=data) as response:72for line in response.iter_lines():73line = line.strip()74if not line or "[DONE]" in line:75continue76
77json_string += line78
79try:80resp = json.loads(json_string)81if 'candidates' in resp:82for candidate in resp['candidates']:83content = candidate.get('content', {})84parts = content.get('parts', [])85for part in parts:86if 'text' in part:87text += part['text']88yield {89"error_code": 0,90"text": text91}92print(text)93except json.JSONDecodeError as e:94print("Failed to decode JSON:", e)95print("Invalid JSON string:", json_string)96
97def get_embeddings(self, params):98print("embedding")99print(params)100
101def make_conv_template(self, conv_template: str = None, model_path: str = None) -> Conversation:102return conv.Conversation(103name=self.model_names[0],104system_message="You are a helpful, respectful and honest assistant.",105messages=[],106roles=["user", "assistant"],107sep="\n### ",108stop_str="###",109)110
111
112if __name__ == "__main__":113import uvicorn114from server.utils import MakeFastAPIOffline115from fastchat.serve.base_model_worker import app116
117worker = GeminiWorker(118controller_addr="http://127.0.0.1:20001",119worker_addr="http://127.0.0.1:21012",120)121sys.modules["fastchat.serve.model_worker"].worker = worker122MakeFastAPIOffline(app)123uvicorn.run(app, port=21012)124