Langchain-Chatchat
70 строк · 2.1 Кб
1import requests
2import json
3import sys
4from pathlib import Path
5
6root_path = Path(__file__).parent.parent.parent
7sys.path.append(str(root_path))
8from configs.server_config import FSCHAT_MODEL_WORKERS
9from server.utils import api_address, get_model_worker_config
10
11from pprint import pprint
12import random
13from typing import List
14
15
16def get_configured_models() -> List[str]:
17model_workers = list(FSCHAT_MODEL_WORKERS)
18if "default" in model_workers:
19model_workers.remove("default")
20return model_workers
21
22
23api_base_url = api_address()
24
25
26def get_running_models(api="/llm_model/list_models"):
27url = api_base_url + api
28r = requests.post(url)
29if r.status_code == 200:
30return r.json()["data"]
31return []
32
33
34def test_running_models(api="/llm_model/list_running_models"):
35url = api_base_url + api
36r = requests.post(url)
37assert r.status_code == 200
38print("\n获取当前正在运行的模型列表:")
39pprint(r.json())
40assert isinstance(r.json()["data"], list)
41assert len(r.json()["data"]) > 0
42
43
44# 不建议使用stop_model功能。按现在的实现,停止了就只能手动再启动
45# def test_stop_model(api="/llm_model/stop"):
46# url = api_base_url + api
47# r = requests.post(url, json={""})
48
49
50def test_change_model(api="/llm_model/change_model"):
51url = api_base_url + api
52
53running_models = get_running_models()
54assert len(running_models) > 0
55
56model_workers = get_configured_models()
57
58availabel_new_models = list(set(model_workers) - set(running_models))
59assert len(availabel_new_models) > 0
60print(availabel_new_models)
61
62local_models = [x for x in running_models if not get_model_worker_config(x).get("online_api")]
63model_name = random.choice(local_models)
64new_model_name = random.choice(availabel_new_models)
65print(f"\n尝试将模型从 {model_name} 切换到 {new_model_name}")
66r = requests.post(url, json={"model_name": model_name, "new_model_name": new_model_name})
67assert r.status_code == 200
68
69running_models = get_running_models()
70assert new_model_name in running_models
71