-
Notifications
You must be signed in to change notification settings - Fork 4
Expand file tree
/
Copy pathmodel_api.py
More file actions
156 lines (137 loc) · 3.9 KB
/
model_api.py
File metadata and controls
156 lines (137 loc) · 3.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
from abc import ABC, abstractmethod
from typing import Optional, TYPE_CHECKING, Union
from .data import BaseInfo
if TYPE_CHECKING:
from litellm import ResponseInputParam
class ModelAPI(ABC):
@abstractmethod
def model_info(self) -> BaseInfo:
...
def completions(
self,
**kwargs,
):
"""
Deprecated. Use completion() instead.
"""
import warnings
warnings.warn(
"completions() is deprecated, use completion() instead",
DeprecationWarning,
stacklevel=2,
)
return self.completion(**kwargs)
def completion(
self,
messages=[],
model: Optional[str] = None,
custom_llm_provider: Optional[str] = None,
**kwargs,
):
from litellm import completion
info = self.model_info()
return completion(
**kwargs,
api_key=info.api_key,
base_url=info.base_url,
model=model or info.model or "",
custom_llm_provider=custom_llm_provider
or info.provider
or "openai",
messages=messages,
)
async def acompletion(
self,
messages=[],
model: Optional[str] = None,
custom_llm_provider: Optional[str] = None,
**kwargs,
):
from litellm import acompletion
info = self.model_info()
return await acompletion(
**kwargs,
api_key=info.api_key,
base_url=info.base_url,
model=model or info.model or "",
custom_llm_provider=custom_llm_provider
or info.provider
or "openai",
messages=messages,
)
def responses(
self,
input: Union[str, "ResponseInputParam"],
model: Optional[str] = None,
custom_llm_provider: Optional[str] = None,
**kwargs,
):
from litellm import responses
info = self.model_info()
return responses(
**kwargs,
api_key=info.api_key,
base_url=info.base_url,
model=model or info.model or "",
custom_llm_provider=custom_llm_provider
or info.provider
or "openai",
input=input,
)
async def aresponses(
self,
input: Union[str, "ResponseInputParam"],
model: Optional[str] = None,
custom_llm_provider: Optional[str] = None,
**kwargs,
):
from litellm import aresponses
info = self.model_info()
return await aresponses(
**kwargs,
api_key=info.api_key,
base_url=info.base_url,
model=model or info.model or "",
custom_llm_provider=custom_llm_provider
or info.provider
or "openai",
input=input,
)
def embedding(
self,
input=[],
model: Optional[str] = None,
custom_llm_provider: Optional[str] = None,
**kwargs,
):
from litellm import embedding
info = self.model_info()
return embedding(
**kwargs,
api_key=info.api_key,
api_base=info.base_url,
model=model or info.model or "",
custom_llm_provider=custom_llm_provider
or info.provider
or "openai",
input=input,
)
def aembedding(
self,
input=[],
model: Optional[str] = None,
custom_llm_provider: Optional[str] = None,
**kwargs,
):
from litellm import aembedding
info = self.model_info()
return aembedding(
**kwargs,
api_key=info.api_key,
api_base=info.base_url,
model=model or info.model or "",
custom_llm_provider=custom_llm_provider
or info.provider
or "openai",
input=input,
)