10
from fastapi import FastAPI, Response, Request
11
from fastapi.responses import StreamingResponse
12
from typing import List, Union, Any, Dict, AnyStr
21
def __init__(self, engine: g4f, debug: bool = True, sentry: bool = False,
22
list_ignored_providers: List[str] = None) -> None:
26
self.list_ignored_providers = list_ignored_providers
31
JSONObject = Dict[AnyStr, Any]
33
JSONStructure = Union[JSONArray, JSONObject]
36
async def read_root():
37
return Response(content=json.dumps({"info": "g4f API"}, indent=4), media_type="application/json")
40
async def read_root_v1():
41
return Response(content=json.dumps({"info": "Go to /v1/chat/completions or /v1/models."}, indent=4), media_type="application/json")
43
@self.app.get("/v1/models")
46
for model in g4f.Model.__all__():
47
model_info = (g4f.ModelUtils.convert[model])
52
'owned_by': model_info.base_provider}
54
return Response(content=json.dumps({
56
'data': model_list}, indent=4), media_type="application/json")
58
@self.app.get("/v1/models/{model_name}")
59
async def model_info(model_name: str):
61
model_info = (g4f.ModelUtils.convert[model_name])
63
return Response(content=json.dumps({
67
'owned_by': model_info.base_provider
68
}, indent=4), media_type="application/json")
70
return Response(content=json.dumps({"error": "The model does not exist."}, indent=4), media_type="application/json")
72
@self.app.post("/v1/chat/completions")
73
async def chat_completions(request: Request, item: JSONStructure = None):
75
'model': 'gpt-3.5-turbo',
81
key.decode('utf-8') if isinstance(key, bytes) else key: str(value)
82
for key, value in (item or {}).items()
85
if isinstance(item_data.get('messages'), str):
86
item_data['messages'] = ast.literal_eval(item_data.get('messages'))
88
model = item_data.get('model')
89
stream = True if item_data.get("stream") == "True" else False
90
messages = item_data.get('messages')
91
provider = item_data.get('provider', '').replace('g4f.Provider.', '')
92
provider = provider if provider and provider != "Auto" else None
95
response = g4f.ChatCompletion.create(
100
ignored=self.list_ignored_providers
102
except Exception as e:
104
content = json.dumps({
105
"error": {"message": f"An error occurred while generating the response:\n{e}"},
107
"provider": g4f.get_last_provider(True)
109
return Response(content=content, status_code=500, media_type="application/json")
110
completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28))
111
completion_timestamp = int(time.time())
118
'id': f'chatcmpl-{completion_id}',
119
'object': 'chat.completion',
120
'created': completion_timestamp,
122
'provider': g4f.get_last_provider(True),
130
'finish_reason': 'stop',
135
'completion_tokens': 0,
140
return Response(content=json.dumps(json_data, indent=4), media_type="application/json")
144
for chunk in response:
146
'id': f'chatcmpl-{completion_id}',
147
'object': 'chat.completion.chunk',
148
'created': completion_timestamp,
150
'provider': g4f.get_last_provider(True),
158
'finish_reason': None,
162
yield f'data: {json.dumps(completion_data)}\n\n'
164
end_completion_data = {
165
'id': f'chatcmpl-{completion_id}',
166
'object': 'chat.completion.chunk',
167
'created': completion_timestamp,
169
'provider': g4f.get_last_provider(True),
174
'finish_reason': 'stop',
178
yield f'data: {json.dumps(end_completion_data)}\n\n'
179
except GeneratorExit:
181
except Exception as e:
183
content = json.dumps({
184
"error": {"message": f"An error occurred while generating the response:\n{e}"},
186
"provider": g4f.get_last_provider(True),
188
yield f'data: {content}'
190
return StreamingResponse(streaming(), media_type="text/event-stream")
192
@self.app.post("/v1/completions")
193
async def completions():
194
return Response(content=json.dumps({'info': 'Not working yet.'}, indent=4), media_type="application/json")
197
split_ip = ip.split(":")
198
uvicorn.run(app=self.app, host=split_ip[0], port=int(split_ip[1]), use_colors=False)