railtracks.llm
1from .content import ToolCall, ToolResponse 2from .history import MessageHistory 3from .message import AssistantMessage, Message, SystemMessage, ToolMessage, UserMessage 4from .model import ModelBase 5from .models import ( 6 AnthropicLLM, 7 AzureAILLM, 8 CohereLLM, 9 GeminiLLM, 10 HuggingFaceLLM, 11 OllamaLLM, 12 OpenAICompatibleProvider, 13 OpenAILLM, 14 PortKeyLLM, 15 # TelusLLM, 16) 17from .providers import ModelProvider 18from .tools import ( 19 ArrayParameter, 20 ObjectParameter, 21 Parameter, 22 RefParameter, 23 Tool, 24 UnionParameter, 25) 26 27__all__ = [ 28 "ModelBase", 29 "ToolCall", 30 "ToolResponse", 31 "UserMessage", 32 "SystemMessage", 33 "AssistantMessage", 34 "Message", 35 "ToolMessage", 36 "MessageHistory", 37 "ModelProvider", 38 "Tool", 39 "AnthropicLLM", 40 "AzureAILLM", 41 "CohereLLM", 42 "HuggingFaceLLM", 43 "OpenAILLM", 44 "GeminiLLM", 45 "OllamaLLM", 46 "AzureAILLM", 47 "GeminiLLM", 48 # "TelusLLM", 49 "PortKeyLLM", 50 "OpenAICompatibleProvider", 51 "CohereLLM", 52 # Parameter types 53 "Parameter", 54 "UnionParameter", 55 "ArrayParameter", 56 "ObjectParameter", 57 "RefParameter", 58]
31class ModelBase(ABC, Generic[_TStream]): 32 """ 33 A simple base that represents the behavior of a model that can be used for chat, structured interactions, and streaming. 34 35 The base class allows for the insertion of hooks that can modify the messages before they are sent to the model, 36 response after they are received, and map exceptions that may occur during the interaction. 37 38 All the hooks are optional and can be added or removed as needed. 39 """ 40 41 def __init__( 42 self, 43 __pre_hooks: List[Callable[[MessageHistory], MessageHistory]] | None = None, 44 __post_hooks: List[Callable[[MessageHistory, Response], Response]] 45 | None = None, 46 __exception_hooks: List[Callable[[MessageHistory, Exception], None]] 47 | None = None, 48 stream: _TStream = False, 49 ): 50 if __pre_hooks is None: 51 pre_hooks: List[Callable[[MessageHistory], MessageHistory]] = [] 52 else: 53 pre_hooks = __pre_hooks 54 55 if __post_hooks is None: 56 post_hooks: List[Callable[[MessageHistory, Response], Response]] = [] 57 else: 58 post_hooks = __post_hooks 59 60 if __exception_hooks is None: 61 exception_hooks: List[Callable[[MessageHistory, Exception], None]] = [] 62 else: 63 exception_hooks = __exception_hooks 64 65 self._pre_hooks = pre_hooks 66 self._post_hooks = post_hooks 67 self._exception_hooks = exception_hooks 68 self.stream = stream 69 70 def add_pre_hook(self, hook: Callable[[MessageHistory], MessageHistory]) -> None: 71 """Adds a pre-hook to modify messages before sending them to the model.""" 72 self._pre_hooks.append(hook) 73 74 def add_post_hook( 75 self, hook: Callable[[MessageHistory, Response], Response] 76 ) -> None: 77 """Adds a post-hook to modify the response after receiving it from the model.""" 78 self._post_hooks.append(hook) 79 80 def add_exception_hook( 81 self, hook: Callable[[MessageHistory, Exception], None] 82 ) -> None: 83 """Adds an exception hook to handle exceptions during model interactions.""" 84 self._exception_hooks.append(hook) 85 86 def remove_pre_hooks(self) -> None: 87 """Removes all of the hooks that modify messages before sending them to the model.""" 88 self._pre_hooks = [] 89 90 def remove_post_hooks(self) -> None: 91 """Removes all of the hooks that modify the response after receiving it from the model.""" 92 self._post_hooks = [] 93 94 def remove_exception_hooks(self) -> None: 95 """Removes all of the hooks that handle exceptions during model interactions.""" 96 self._exception_hooks = [] 97 98 @abstractmethod 99 def model_name(self) -> str: 100 """ 101 Returns the name of the model being used. 102 103 It can be treated as unique identifier for the model when paired with the `model_type`. 104 """ 105 pass 106 107 @abstractmethod 108 def model_provider(self) -> ModelProvider: 109 """The name of the provider of this model (The Company that owns the model)""" 110 pass 111 112 @classmethod 113 @abstractmethod 114 def model_gateway(cls) -> ModelProvider: 115 """ 116 Gets the API distrubutor of the model. Note nessecarily the same as the model itself. 117 118 E.g. if you are calling openai LLM through Azure AI foundry 119 """ 120 pass 121 122 def _run_pre_hooks(self, message_history: MessageHistory) -> MessageHistory: 123 """Runs all pre-hooks on the provided message history.""" 124 for hook in self._pre_hooks: 125 message_history = hook(message_history) 126 return message_history 127 128 def _run_post_hooks( 129 self, message_history: MessageHistory, result: Response 130 ) -> Response: 131 """Runs all post-hooks on the provided message history and result.""" 132 for hook in self._post_hooks: 133 result = hook(message_history, result) 134 return result 135 136 def _run_exception_hooks( 137 self, message_history: MessageHistory, exception: Exception 138 ) -> None: 139 """Runs all exception hooks on the provided message history and exception.""" 140 for hook in self._exception_hooks: 141 hook(message_history, exception) 142 143 def generator_wrapper( 144 self, 145 generator: Generator[str | Response, None, Response], 146 message_history: MessageHistory, 147 ) -> Generator[str | Response, None, Response]: 148 new_response: Response | None = None 149 for g in generator: 150 if isinstance(g, Response): 151 g.message_info 152 new_response = self._run_post_hooks(message_history, g) 153 yield new_response 154 155 yield g 156 157 assert new_response is not None, ( 158 "The generator did not yield a final Response object so nothing could be done." 159 ) 160 161 return new_response 162 163 @overload 164 def chat(self: ModelBase[Literal[False]], messages: MessageHistory) -> Response: 165 pass 166 167 @overload 168 def chat( 169 self: ModelBase[Literal[True]], messages: MessageHistory 170 ) -> Generator[str | Response, None, Response]: 171 pass 172 173 def chat( 174 self, messages: MessageHistory 175 ) -> Response | Generator[str | Response, None, Response]: 176 """Chat with the model using the provided messages.""" 177 178 messages = self._run_pre_hooks(messages) 179 180 try: 181 response = self._chat(messages) 182 except Exception as e: 183 self._run_exception_hooks(messages, e) 184 raise e 185 186 if isinstance(response, Generator): 187 return self.generator_wrapper(response, messages) 188 189 response = self._run_post_hooks(messages, response) 190 return response 191 192 @overload 193 async def achat( 194 self: ModelBase[Literal[False]], messages: MessageHistory 195 ) -> Response: 196 pass 197 198 @overload 199 async def achat( 200 self: ModelBase[Literal[True]], messages: MessageHistory 201 ) -> Generator[str | Response, None, Response]: 202 pass 203 204 async def achat(self, messages: MessageHistory): 205 """Asynchronous chat with the model using the provided messages.""" 206 messages = self._run_pre_hooks(messages) 207 208 try: 209 response = await self._achat(messages) 210 except Exception as e: 211 self._run_exception_hooks(messages, e) 212 raise e 213 214 if isinstance(response, Generator): 215 return self.generator_wrapper(response, messages) 216 217 response = self._run_post_hooks(messages, response) 218 219 return response 220 221 @overload 222 def structured( 223 self: ModelBase[Literal[False]], 224 messages: MessageHistory, 225 schema: Type[BaseModel], 226 ) -> Response: 227 pass 228 229 @overload 230 def structured( 231 self: ModelBase[Literal[True]], 232 messages: MessageHistory, 233 schema: Type[BaseModel], 234 ) -> Generator[str | Response, None, Response]: 235 pass 236 237 def structured(self, messages: MessageHistory, schema: Type[BaseModel]): 238 """Structured interaction with the model using the provided messages and output_schema.""" 239 messages = self._run_pre_hooks(messages) 240 241 try: 242 response = self._structured(messages, schema) 243 except Exception as e: 244 self._run_exception_hooks(messages, e) 245 raise e 246 247 if isinstance(response, Generator): 248 return self.generator_wrapper(response, messages) 249 250 response = self._run_post_hooks(messages, response) 251 252 return response 253 254 @overload 255 async def astructured( 256 self: ModelBase[Literal[False]], 257 messages: MessageHistory, 258 schema: Type[BaseModel], 259 ) -> Response: 260 pass 261 262 @overload 263 async def astructured( 264 self: ModelBase[Literal[True]], 265 messages: MessageHistory, 266 schema: Type[BaseModel], 267 ) -> Generator[str | Response, None, Response]: 268 pass 269 270 async def astructured(self, messages: MessageHistory, schema: Type[BaseModel]): 271 """Asynchronous structured interaction with the model using the provided messages and output_schema.""" 272 messages = self._run_pre_hooks(messages) 273 274 try: 275 response = await self._astructured(messages, schema) 276 except Exception as e: 277 self._run_exception_hooks(messages, e) 278 raise e 279 280 if isinstance(response, Generator): 281 return self.generator_wrapper(response, messages) 282 283 response = self._run_post_hooks(messages, response) 284 285 return response 286 287 @overload 288 def chat_with_tools( 289 self: ModelBase[Literal[False]], messages: MessageHistory, tools: List[Tool] 290 ) -> Response: 291 pass 292 293 @overload 294 def chat_with_tools( 295 self: ModelBase[Literal[True]], messages: MessageHistory, tools: List[Tool] 296 ) -> Generator[str | Response, None, Response]: 297 pass 298 299 def chat_with_tools(self, messages: MessageHistory, tools: List[Tool]): 300 """Chat with the model using the provided messages and tools.""" 301 messages = self._run_pre_hooks(messages) 302 303 try: 304 response = self._chat_with_tools(messages, tools) 305 except Exception as e: 306 self._run_exception_hooks(messages, e) 307 raise e 308 309 if isinstance(response, Generator): 310 return self.generator_wrapper(response, messages) 311 312 response = self._run_post_hooks(messages, response) 313 return response 314 315 @overload 316 async def achat_with_tools( 317 self: ModelBase[Literal[False]], messages: MessageHistory, tools: List[Tool] 318 ) -> Response: 319 pass 320 321 @overload 322 async def achat_with_tools( 323 self: ModelBase[Literal[True]], messages: MessageHistory, tools: List[Tool] 324 ) -> Generator[str | Response, None, Response]: 325 pass 326 327 async def achat_with_tools(self, messages: MessageHistory, tools: List[Tool]): 328 """Asynchronous chat with the model using the provided messages and tools.""" 329 messages = self._run_pre_hooks(messages) 330 331 try: 332 response = await self._achat_with_tools(messages, tools) 333 except Exception as e: 334 self._run_exception_hooks(messages, e) 335 raise e 336 337 if isinstance(response, Generator): 338 return self.generator_wrapper(response, messages) 339 340 response = self._run_post_hooks(messages, response) 341 342 return response 343 344 @abstractmethod 345 def _chat( 346 self, messages: MessageHistory 347 ) -> Response | Generator[str | Response, None, Response]: 348 pass 349 350 @abstractmethod 351 def _structured( 352 self, messages: MessageHistory, schema: Type[BaseModel] 353 ) -> Response | Generator[str | Response, None, Response]: 354 pass 355 356 @abstractmethod 357 def _chat_with_tools( 358 self, messages: MessageHistory, tools: List[Tool] 359 ) -> Response | Generator[str | Response, None, Response]: 360 pass 361 362 @abstractmethod 363 async def _achat( 364 self, messages: MessageHistory 365 ) -> Response | AsyncGenerator[str | Response, None]: 366 pass 367 368 @abstractmethod 369 async def _astructured( 370 self, 371 messages: MessageHistory, 372 schema: Type[BaseModel], 373 ) -> Response | AsyncGenerator[str | Response, None]: 374 pass 375 376 @abstractmethod 377 async def _achat_with_tools( 378 self, messages: MessageHistory, tools: List[Tool] 379 ) -> Response | AsyncGenerator[str | Response, None]: 380 pass
A simple base that represents the behavior of a model that can be used for chat, structured interactions, and streaming.
The base class allows for the insertion of hooks that can modify the messages before they are sent to the model, response after they are received, and map exceptions that may occur during the interaction.
All the hooks are optional and can be added or removed as needed.
70 def add_pre_hook(self, hook: Callable[[MessageHistory], MessageHistory]) -> None: 71 """Adds a pre-hook to modify messages before sending them to the model.""" 72 self._pre_hooks.append(hook)
Adds a pre-hook to modify messages before sending them to the model.
74 def add_post_hook( 75 self, hook: Callable[[MessageHistory, Response], Response] 76 ) -> None: 77 """Adds a post-hook to modify the response after receiving it from the model.""" 78 self._post_hooks.append(hook)
Adds a post-hook to modify the response after receiving it from the model.
80 def add_exception_hook( 81 self, hook: Callable[[MessageHistory, Exception], None] 82 ) -> None: 83 """Adds an exception hook to handle exceptions during model interactions.""" 84 self._exception_hooks.append(hook)
Adds an exception hook to handle exceptions during model interactions.
86 def remove_pre_hooks(self) -> None: 87 """Removes all of the hooks that modify messages before sending them to the model.""" 88 self._pre_hooks = []
Removes all of the hooks that modify messages before sending them to the model.
90 def remove_post_hooks(self) -> None: 91 """Removes all of the hooks that modify the response after receiving it from the model.""" 92 self._post_hooks = []
Removes all of the hooks that modify the response after receiving it from the model.
94 def remove_exception_hooks(self) -> None: 95 """Removes all of the hooks that handle exceptions during model interactions.""" 96 self._exception_hooks = []
Removes all of the hooks that handle exceptions during model interactions.
98 @abstractmethod 99 def model_name(self) -> str: 100 """ 101 Returns the name of the model being used. 102 103 It can be treated as unique identifier for the model when paired with the `model_type`. 104 """ 105 pass
Returns the name of the model being used.
It can be treated as unique identifier for the model when paired with the model_type.
107 @abstractmethod 108 def model_provider(self) -> ModelProvider: 109 """The name of the provider of this model (The Company that owns the model)""" 110 pass
The name of the provider of this model (The Company that owns the model)
112 @classmethod 113 @abstractmethod 114 def model_gateway(cls) -> ModelProvider: 115 """ 116 Gets the API distrubutor of the model. Note nessecarily the same as the model itself. 117 118 E.g. if you are calling openai LLM through Azure AI foundry 119 """ 120 pass
Gets the API distrubutor of the model. Note nessecarily the same as the model itself.
E.g. if you are calling openai LLM through Azure AI foundry
143 def generator_wrapper( 144 self, 145 generator: Generator[str | Response, None, Response], 146 message_history: MessageHistory, 147 ) -> Generator[str | Response, None, Response]: 148 new_response: Response | None = None 149 for g in generator: 150 if isinstance(g, Response): 151 g.message_info 152 new_response = self._run_post_hooks(message_history, g) 153 yield new_response 154 155 yield g 156 157 assert new_response is not None, ( 158 "The generator did not yield a final Response object so nothing could be done." 159 ) 160 161 return new_response
173 def chat( 174 self, messages: MessageHistory 175 ) -> Response | Generator[str | Response, None, Response]: 176 """Chat with the model using the provided messages.""" 177 178 messages = self._run_pre_hooks(messages) 179 180 try: 181 response = self._chat(messages) 182 except Exception as e: 183 self._run_exception_hooks(messages, e) 184 raise e 185 186 if isinstance(response, Generator): 187 return self.generator_wrapper(response, messages) 188 189 response = self._run_post_hooks(messages, response) 190 return response
Chat with the model using the provided messages.
204 async def achat(self, messages: MessageHistory): 205 """Asynchronous chat with the model using the provided messages.""" 206 messages = self._run_pre_hooks(messages) 207 208 try: 209 response = await self._achat(messages) 210 except Exception as e: 211 self._run_exception_hooks(messages, e) 212 raise e 213 214 if isinstance(response, Generator): 215 return self.generator_wrapper(response, messages) 216 217 response = self._run_post_hooks(messages, response) 218 219 return response
Asynchronous chat with the model using the provided messages.
237 def structured(self, messages: MessageHistory, schema: Type[BaseModel]): 238 """Structured interaction with the model using the provided messages and output_schema.""" 239 messages = self._run_pre_hooks(messages) 240 241 try: 242 response = self._structured(messages, schema) 243 except Exception as e: 244 self._run_exception_hooks(messages, e) 245 raise e 246 247 if isinstance(response, Generator): 248 return self.generator_wrapper(response, messages) 249 250 response = self._run_post_hooks(messages, response) 251 252 return response
Structured interaction with the model using the provided messages and output_schema.
270 async def astructured(self, messages: MessageHistory, schema: Type[BaseModel]): 271 """Asynchronous structured interaction with the model using the provided messages and output_schema.""" 272 messages = self._run_pre_hooks(messages) 273 274 try: 275 response = await self._astructured(messages, schema) 276 except Exception as e: 277 self._run_exception_hooks(messages, e) 278 raise e 279 280 if isinstance(response, Generator): 281 return self.generator_wrapper(response, messages) 282 283 response = self._run_post_hooks(messages, response) 284 285 return response
Asynchronous structured interaction with the model using the provided messages and output_schema.
299 def chat_with_tools(self, messages: MessageHistory, tools: List[Tool]): 300 """Chat with the model using the provided messages and tools.""" 301 messages = self._run_pre_hooks(messages) 302 303 try: 304 response = self._chat_with_tools(messages, tools) 305 except Exception as e: 306 self._run_exception_hooks(messages, e) 307 raise e 308 309 if isinstance(response, Generator): 310 return self.generator_wrapper(response, messages) 311 312 response = self._run_post_hooks(messages, response) 313 return response
Chat with the model using the provided messages and tools.
327 async def achat_with_tools(self, messages: MessageHistory, tools: List[Tool]): 328 """Asynchronous chat with the model using the provided messages and tools.""" 329 messages = self._run_pre_hooks(messages) 330 331 try: 332 response = await self._achat_with_tools(messages, tools) 333 except Exception as e: 334 self._run_exception_hooks(messages, e) 335 raise e 336 337 if isinstance(response, Generator): 338 return self.generator_wrapper(response, messages) 339 340 response = self._run_post_hooks(messages, response) 341 342 return response
Asynchronous chat with the model using the provided messages and tools.
12class ToolCall(BaseModel): 13 """ 14 A simple model object that represents a tool call. 15 16 This simple model represents a moment when a tool is called. 17 """ 18 19 identifier: str = Field(description="The identifier attatched to this tool call.") 20 name: str = Field(description="The name of the tool being called.") 21 arguments: Dict[str, Any] = Field( 22 description="The arguments provided as input to the tool." 23 ) 24 25 def __str__(self): 26 return f"{self.name}({self.arguments})"
A simple model object that represents a tool call.
This simple model represents a moment when a tool is called.
29class ToolResponse(BaseModel): 30 """ 31 A simple model object that represents a tool response. 32 33 This simple model should be used when adding a response to a tool. 34 """ 35 36 identifier: str = Field( 37 description="The identifier attached to this tool response. This should match the identifier of the tool call." 38 ) 39 name: str = Field(description="The name of the tool that generated this response.") 40 result: AnyStr = Field(description="The result of the tool call.") 41 42 def __str__(self): 43 return f"{self.name} -> {self.result}"
A simple model object that represents a tool response.
This simple model should be used when adding a response to a tool.
178class UserMessage(_StringOnlyContent[Role.user]): 179 """ 180 Note that we only support string input 181 182 Args: 183 content: The content of the user message. 184 attachment: The file attachment(s) for the user message. Can be a single string or a list of strings, 185 containing file paths, URLs, or data URIs. Defaults to None. 186 inject_prompt: Whether to inject prompt with context variables. Defaults to True. 187 """ 188 189 def __init__( 190 self, 191 content: str | None = None, 192 attachment: str | list[str] | None = None, 193 inject_prompt: bool = True, 194 ): 195 if attachment is not None: 196 if isinstance(attachment, list): 197 self.attachment = [Attachment(att) for att in attachment] 198 else: 199 self.attachment = [Attachment(attachment)] 200 201 if content is None: 202 logger.warning( 203 "UserMessage initialized without content, setting to empty string." 204 ) 205 content = "" 206 else: 207 self.attachment = None 208 209 if content is None: 210 raise ValueError( 211 "UserMessage must have content if no attachment is provided." 212 ) 213 super().__init__(content=content, role=Role.user, inject_prompt=inject_prompt)
Note that we only support string input
Arguments:
- content: The content of the user message.
- attachment: The file attachment(s) for the user message. Can be a single string or a list of strings, containing file paths, URLs, or data URIs. Defaults to None.
- inject_prompt: Whether to inject prompt with context variables. Defaults to True.
189 def __init__( 190 self, 191 content: str | None = None, 192 attachment: str | list[str] | None = None, 193 inject_prompt: bool = True, 194 ): 195 if attachment is not None: 196 if isinstance(attachment, list): 197 self.attachment = [Attachment(att) for att in attachment] 198 else: 199 self.attachment = [Attachment(attachment)] 200 201 if content is None: 202 logger.warning( 203 "UserMessage initialized without content, setting to empty string." 204 ) 205 content = "" 206 else: 207 self.attachment = None 208 209 if content is None: 210 raise ValueError( 211 "UserMessage must have content if no attachment is provided." 212 ) 213 super().__init__(content=content, role=Role.user, inject_prompt=inject_prompt)
A simple class that represents a message that an LLM can read.
Arguments:
- content: The content of the message. It can take on any of the following types:
- str: A simple string message.
- List[ToolCall]: A list of tool calls.
- ToolResponse: A tool response.
- BaseModel: A custom base model object.
- Stream: A stream object with a final_message and a generator.
- role: The role of the message (assistant, user, system, tool, etc.).
- inject_prompt (bool, optional): Whether to inject prompt with context variables. Defaults to True.
216class SystemMessage(_StringOnlyContent[Role.system]): 217 """ 218 A simple class that represents a system message. 219 220 Args: 221 content (str): The content of the system message. 222 inject_prompt (bool, optional): Whether to inject prompt with context variables. Defaults to True. 223 """ 224 225 def __init__(self, content: str, inject_prompt: bool = True): 226 super().__init__(content=content, role=Role.system, inject_prompt=inject_prompt)
A simple class that represents a system message.
Arguments:
- content (str): The content of the system message.
- inject_prompt (bool, optional): Whether to inject prompt with context variables. Defaults to True.
225 def __init__(self, content: str, inject_prompt: bool = True): 226 super().__init__(content=content, role=Role.system, inject_prompt=inject_prompt)
A simple class that represents a message that an LLM can read.
Arguments:
- content: The content of the message. It can take on any of the following types:
- str: A simple string message.
- List[ToolCall]: A list of tool calls.
- ToolResponse: A tool response.
- BaseModel: A custom base model object.
- Stream: A stream object with a final_message and a generator.
- role: The role of the message (assistant, user, system, tool, etc.).
- inject_prompt (bool, optional): Whether to inject prompt with context variables. Defaults to True.
229class AssistantMessage(Message[_T, Role.assistant], Generic[_T]): 230 """ 231 A simple class that represents a message from the assistant. 232 233 Args: 234 content (_T): The content of the assistant message. 235 inject_prompt (bool, optional): Whether to inject prompt with context variables. Defaults to True. 236 """ 237 238 def __init__(self, content: _T, inject_prompt: bool = True): 239 super().__init__( 240 content=content, role=Role.assistant, inject_prompt=inject_prompt 241 ) 242 243 # Optionally stores the raw litellm message object so providers that 244 # attach extra metadata (e.g. Gemini thought_signature) can round-trip 245 # it back without any manual reconstruction. 246 self.raw_litellm_message = None
A simple class that represents a message from the assistant.
Arguments:
- content (_T): The content of the assistant message.
- inject_prompt (bool, optional): Whether to inject prompt with context variables. Defaults to True.
238 def __init__(self, content: _T, inject_prompt: bool = True): 239 super().__init__( 240 content=content, role=Role.assistant, inject_prompt=inject_prompt 241 ) 242 243 # Optionally stores the raw litellm message object so providers that 244 # attach extra metadata (e.g. Gemini thought_signature) can round-trip 245 # it back without any manual reconstruction. 246 self.raw_litellm_message = None
A simple class that represents a message that an LLM can read.
Arguments:
- content: The content of the message. It can take on any of the following types:
- str: A simple string message.
- List[ToolCall]: A list of tool calls.
- ToolResponse: A tool response.
- BaseModel: A custom base model object.
- Stream: A stream object with a final_message and a generator.
- role: The role of the message (assistant, user, system, tool, etc.).
- inject_prompt (bool, optional): Whether to inject prompt with context variables. Defaults to True.
85class Message(Generic[_T, _TRole]): 86 """ 87 A base class that represents a message that an LLM can read. 88 89 Note the content may take on a variety of allowable types. 90 """ 91 92 def __init__( 93 self, 94 content: _T, 95 role: _TRole, 96 inject_prompt: bool = True, 97 ): 98 """ 99 A simple class that represents a message that an LLM can read. 100 101 Args: 102 content: The content of the message. It can take on any of the following types: 103 - str: A simple string message. 104 - List[ToolCall]: A list of tool calls. 105 - ToolResponse: A tool response. 106 - BaseModel: A custom base model object. 107 - Stream: A stream object with a final_message and a generator. 108 role: The role of the message (assistant, user, system, tool, etc.). 109 inject_prompt (bool, optional): Whether to inject prompt with context variables. Defaults to True. 110 """ 111 assert isinstance(role, Role) 112 self.validate_content(content) 113 self._content = content 114 self._role = role 115 self._inject_prompt = inject_prompt 116 117 @classmethod 118 def validate_content(cls, content: _T): 119 pass 120 121 @property 122 def content(self) -> _T: 123 """Collects the content of the message.""" 124 return self._content 125 126 @property 127 def role(self) -> _TRole: 128 """Collects the role of the message.""" 129 return self._role 130 131 @property 132 def inject_prompt(self) -> bool: 133 """ 134 A boolean that indicates whether this message should be injected into from context. 135 """ 136 return self._inject_prompt 137 138 @inject_prompt.setter 139 def inject_prompt(self, value: bool): 140 """ 141 Sets the inject_prompt property. 142 """ 143 self._inject_prompt = value 144 145 def __str__(self): 146 return f"{self.role.value}: {self.content}" 147 148 def __repr__(self): 149 return str(self) 150 151 @property 152 def tool_calls(self): 153 """Gets the tool calls attached to this message, if any. If there are none return and empty list.""" 154 tools: list[ToolCall] = [] 155 if isinstance(self.content, list): 156 tools.extend(deepcopy(self.content)) 157 158 return tools
A base class that represents a message that an LLM can read.
Note the content may take on a variety of allowable types.
92 def __init__( 93 self, 94 content: _T, 95 role: _TRole, 96 inject_prompt: bool = True, 97 ): 98 """ 99 A simple class that represents a message that an LLM can read. 100 101 Args: 102 content: The content of the message. It can take on any of the following types: 103 - str: A simple string message. 104 - List[ToolCall]: A list of tool calls. 105 - ToolResponse: A tool response. 106 - BaseModel: A custom base model object. 107 - Stream: A stream object with a final_message and a generator. 108 role: The role of the message (assistant, user, system, tool, etc.). 109 inject_prompt (bool, optional): Whether to inject prompt with context variables. Defaults to True. 110 """ 111 assert isinstance(role, Role) 112 self.validate_content(content) 113 self._content = content 114 self._role = role 115 self._inject_prompt = inject_prompt
A simple class that represents a message that an LLM can read.
Arguments:
- content: The content of the message. It can take on any of the following types:
- str: A simple string message.
- List[ToolCall]: A list of tool calls.
- ToolResponse: A tool response.
- BaseModel: A custom base model object.
- Stream: A stream object with a final_message and a generator.
- role: The role of the message (assistant, user, system, tool, etc.).
- inject_prompt (bool, optional): Whether to inject prompt with context variables. Defaults to True.
121 @property 122 def content(self) -> _T: 123 """Collects the content of the message.""" 124 return self._content
Collects the content of the message.
126 @property 127 def role(self) -> _TRole: 128 """Collects the role of the message.""" 129 return self._role
Collects the role of the message.
131 @property 132 def inject_prompt(self) -> bool: 133 """ 134 A boolean that indicates whether this message should be injected into from context. 135 """ 136 return self._inject_prompt
A boolean that indicates whether this message should be injected into from context.
151 @property 152 def tool_calls(self): 153 """Gets the tool calls attached to this message, if any. If there are none return and empty list.""" 154 tools: list[ToolCall] = [] 155 if isinstance(self.content, list): 156 tools.extend(deepcopy(self.content)) 157 158 return tools
Gets the tool calls attached to this message, if any. If there are none return and empty list.
250class ToolMessage(Message[ToolResponse, Role.tool]): 251 """ 252 A simple class that represents a message that is a tool call answer. 253 254 Args: 255 content (ToolResponse): The tool response content for the message. 256 """ 257 258 def __init__(self, content: ToolResponse): 259 if not isinstance(content, ToolResponse): 260 raise TypeError( 261 f"A {self.__class__.__name__} needs a ToolResponse but got {type(content)}. Check the invoke function of the OutputLessToolCallLLM node. That is the only place to return a ToolMessage." 262 ) 263 super().__init__(content=content, role=Role.tool)
A simple class that represents a message that is a tool call answer.
Arguments:
- content (ToolResponse): The tool response content for the message.
258 def __init__(self, content: ToolResponse): 259 if not isinstance(content, ToolResponse): 260 raise TypeError( 261 f"A {self.__class__.__name__} needs a ToolResponse but got {type(content)}. Check the invoke function of the OutputLessToolCallLLM node. That is the only place to return a ToolMessage." 262 ) 263 super().__init__(content=content, role=Role.tool)
A simple class that represents a message that an LLM can read.
Arguments:
- content: The content of the message. It can take on any of the following types:
- str: A simple string message.
- List[ToolCall]: A list of tool calls.
- ToolResponse: A tool response.
- BaseModel: A custom base model object.
- Stream: A stream object with a final_message and a generator.
- role: The role of the message (assistant, user, system, tool, etc.).
- inject_prompt (bool, optional): Whether to inject prompt with context variables. Defaults to True.
9class MessageHistory(List[Message]): 10 """ 11 A basic object that represents a history of messages. The object has all the same capability as a list such as 12 `.remove()`, `.append()`, etc. 13 """ 14 15 def __str__(self): 16 return "\n".join([str(message) for message in self]) 17 18 def removed_system_messages(self) -> MessageHistory: 19 """ 20 Returns a new MessageHistory object with all SystemMessages removed. 21 """ 22 return MessageHistory([msg for msg in self if msg.role != Role.system])
A basic object that represents a history of messages. The object has all the same capability as a list such as
.remove(), .append(), etc.
13class ModelProvider(str, Enum): 14 """ 15 Enum of supported LLM model providers for RailTracks. 16 17 Attributes: 18 OPENAI: OpenAI models (e.g., GPT-3, GPT-4). 19 ANTHROPIC: Anthropic models (e.g., Claude). 20 GEMINI: Google Gemini models. 21 HUGGINGFACE: HuggingFace-hosted models. 22 AZUREAI: Azure OpenAI Service models. 23 OLLAMA: Ollama local LLMs. 24 COHERE: Cohere models. 25 """ 26 27 OPENAI = "OpenAI" 28 ANTHROPIC = "Anthropic" 29 GEMINI = "Vertex_AI" 30 HUGGINGFACE = "HuggingFace" 31 AZUREAI = "AzureAI" 32 OLLAMA = "Ollama" 33 COHERE = "cohere_chat" 34 TELUS = "Telus" 35 PORTKEY = "PortKey" 36 UNKNOWN = "Unknown"
Enum of supported LLM model providers for RailTracks.
Attributes:
- OPENAI: OpenAI models (e.g., GPT-3, GPT-4).
- ANTHROPIC: Anthropic models (e.g., Claude).
- GEMINI: Google Gemini models.
- HUGGINGFACE: HuggingFace-hosted models.
- AZUREAI: Azure OpenAI Service models.
- OLLAMA: Ollama local LLMs.
- COHERE: Cohere models.
29class Tool: 30 """ 31 A quasi-immutable class designed to represent a single Tool object. 32 You pass in key details (name, description, and required parameters). 33 """ 34 35 def __init__( 36 self, 37 name: str, 38 detail: str, 39 parameters: Iterable[Parameter] | Dict[str, Any] | None = None, 40 ): 41 """ 42 Creates a new Tool instance. 43 44 Args: 45 name: The name of the tool. 46 detail: A detailed description of the tool. 47 parameters: Parameters attached to this tool; a set of Parameter objects, or a dict. 48 """ 49 50 if ( 51 isinstance(parameters, dict) and len(parameters) > 0 52 ): # if parameters is a JSON-output_schema, convert into Parameter objects (Checks should be done in validate_tool_params) 53 props = parameters.get("properties") 54 required_fields = list(parameters.get("required", [])) 55 param_objs: List[Parameter] = [] 56 for name, prop in props.items(): 57 param_objs.append( 58 parse_json_schema_to_parameter(name, prop, name in required_fields) 59 ) 60 parameters = param_objs 61 62 self._name = name 63 self._detail = detail 64 self._parameters = parameters 65 66 @property 67 def name(self) -> str: 68 """Get the name of the tool.""" 69 return self._name 70 71 @property 72 def detail(self) -> str: 73 """Returns the detailed description for this tool.""" 74 return self._detail 75 76 @property 77 def parameters(self) -> List[Parameter] | None: 78 """Gets the parameters attached to this tool (if any).""" 79 return self._parameters 80 81 def __str__(self) -> str: 82 """String representation of the tool.""" 83 if self._parameters: 84 params_str = "{" + ", ".join(str(p) for p in self._parameters) + "}" 85 return f"Tool(name={self._name}, detail={self._detail}, parameters={params_str if self._parameters else 'None'})" 86 87 @classmethod 88 def from_function( 89 cls, 90 func: Callable, 91 /, 92 *, 93 name: str | None = None, 94 details: str | None = None, 95 params: Type[BaseModel] | Dict[str, Any] | List[Parameter] | None = None, 96 ) -> Self: 97 """ 98 Creates a Tool from a Python callable. 99 Uses the function's docstring and type annotations to extract details and parameter info. 100 101 KEY NOTE: No checking is done to ensure that the inserted params match the function signature 102 103 Args: 104 func: The function to create a tool from. 105 name: Optional name for the tool. If not provided, uses the function's name. 106 details: Optional detailed description for the tool. If not provided, extracts from the function's docstring. 107 params: Optional parameters for the tool. If not provided, infers from the function's signature and docstring. 108 109 Returns: 110 A Tool instance representing the function. 111 """ 112 # TODO: add set verification to ensure that the params match the function signature 113 # Check if the function is a method in a class 114 in_class = bool(func.__qualname__ and "." in func.__qualname__) 115 116 # Parse the docstring to get parameter descriptions 117 arg_descriptions = parse_docstring_args(func.__doc__ or "") 118 119 try: 120 # Get the function signature 121 signature = inspect.signature(func) 122 except ValueError: 123 raise ToolCreationError( 124 message="Cannot convert kwargs for builtin functions.", 125 notes=[ 126 "Please use a cutom made function.", 127 "Eg.- \ndef my_custom_function(a: int, b: str):\n pass", 128 ], 129 ) 130 131 if name is not None: 132 # TODO: add some checking here to ensure that the name is valid snake case. 133 function_name = name 134 else: 135 function_name = func.__name__ 136 137 docstring = func.__doc__.strip() if func.__doc__ else "" 138 139 if params is not None: 140 parameters = params 141 else: 142 # Check for multiple Args sections (warning) 143 # Only need to do this if we need to. 144 if docstring.count("Args:") > 1: 145 warnings.warn("Multiple 'Args:' sections found in the docstring.") 146 # Create parameter handlers 147 handlers: List[ParameterHandler] = [ 148 PydanticModelHandler(), 149 SequenceParameterHandler(), 150 UnionParameterHandler(), 151 DefaultParameterHandler(), 152 ] 153 154 parameters: List[Parameter] = [] 155 156 for param in signature.parameters.values(): 157 # Skip 'self' parameter for class methods 158 if in_class and (param.name == "self" or param.name == "cls"): 159 continue 160 161 description = arg_descriptions.get(param.name, "") 162 163 # Check if the parameter is required 164 required = param.default == inspect.Parameter.empty 165 166 handler = next(h for h in handlers if h.can_handle(param.annotation)) 167 168 param_obj = handler.create_parameter( 169 param.name, param.annotation, description, required 170 ) 171 172 parameters.append(param_obj) 173 174 if details is not None: 175 main_description = details 176 else: 177 main_description = extract_main_description(docstring) 178 179 tool_info = Tool( 180 name=function_name, 181 detail=main_description, 182 parameters=parameters, 183 ) 184 return tool_info 185 186 @classmethod 187 def from_mcp(cls, tool) -> Self: 188 """ 189 Creates a Tool from an MCP tool object. 190 191 Args: 192 tool: The MCP tool to create a Tool from. 193 194 Returns: 195 A Tool instance representing the MCP tool. 196 """ 197 input_schema = getattr(tool, "inputSchema", None) 198 if not input_schema or input_schema["type"] != "object": 199 raise ToolCreationError( 200 message="The inputSchema for an MCP Tool must be 'object'. ", 201 notes=[ 202 "If an MCP tool has a different output_schema, create a GitHub issue and support will be added." 203 ], 204 ) 205 206 properties = input_schema.get("properties", {}) 207 required_fields = set(input_schema.get("required", [])) 208 param_objs = set() 209 for name, prop in properties.items(): 210 required = name in required_fields 211 param_objs.add(parse_json_schema_to_parameter(name, prop, required)) 212 213 return cls(name=tool.name, detail=tool.description, parameters=param_objs)
A quasi-immutable class designed to represent a single Tool object. You pass in key details (name, description, and required parameters).
35 def __init__( 36 self, 37 name: str, 38 detail: str, 39 parameters: Iterable[Parameter] | Dict[str, Any] | None = None, 40 ): 41 """ 42 Creates a new Tool instance. 43 44 Args: 45 name: The name of the tool. 46 detail: A detailed description of the tool. 47 parameters: Parameters attached to this tool; a set of Parameter objects, or a dict. 48 """ 49 50 if ( 51 isinstance(parameters, dict) and len(parameters) > 0 52 ): # if parameters is a JSON-output_schema, convert into Parameter objects (Checks should be done in validate_tool_params) 53 props = parameters.get("properties") 54 required_fields = list(parameters.get("required", [])) 55 param_objs: List[Parameter] = [] 56 for name, prop in props.items(): 57 param_objs.append( 58 parse_json_schema_to_parameter(name, prop, name in required_fields) 59 ) 60 parameters = param_objs 61 62 self._name = name 63 self._detail = detail 64 self._parameters = parameters
Creates a new Tool instance.
Arguments:
- name: The name of the tool.
- detail: A detailed description of the tool.
- parameters: Parameters attached to this tool; a set of Parameter objects, or a dict.
71 @property 72 def detail(self) -> str: 73 """Returns the detailed description for this tool.""" 74 return self._detail
Returns the detailed description for this tool.
76 @property 77 def parameters(self) -> List[Parameter] | None: 78 """Gets the parameters attached to this tool (if any).""" 79 return self._parameters
Gets the parameters attached to this tool (if any).
87 @classmethod 88 def from_function( 89 cls, 90 func: Callable, 91 /, 92 *, 93 name: str | None = None, 94 details: str | None = None, 95 params: Type[BaseModel] | Dict[str, Any] | List[Parameter] | None = None, 96 ) -> Self: 97 """ 98 Creates a Tool from a Python callable. 99 Uses the function's docstring and type annotations to extract details and parameter info. 100 101 KEY NOTE: No checking is done to ensure that the inserted params match the function signature 102 103 Args: 104 func: The function to create a tool from. 105 name: Optional name for the tool. If not provided, uses the function's name. 106 details: Optional detailed description for the tool. If not provided, extracts from the function's docstring. 107 params: Optional parameters for the tool. If not provided, infers from the function's signature and docstring. 108 109 Returns: 110 A Tool instance representing the function. 111 """ 112 # TODO: add set verification to ensure that the params match the function signature 113 # Check if the function is a method in a class 114 in_class = bool(func.__qualname__ and "." in func.__qualname__) 115 116 # Parse the docstring to get parameter descriptions 117 arg_descriptions = parse_docstring_args(func.__doc__ or "") 118 119 try: 120 # Get the function signature 121 signature = inspect.signature(func) 122 except ValueError: 123 raise ToolCreationError( 124 message="Cannot convert kwargs for builtin functions.", 125 notes=[ 126 "Please use a cutom made function.", 127 "Eg.- \ndef my_custom_function(a: int, b: str):\n pass", 128 ], 129 ) 130 131 if name is not None: 132 # TODO: add some checking here to ensure that the name is valid snake case. 133 function_name = name 134 else: 135 function_name = func.__name__ 136 137 docstring = func.__doc__.strip() if func.__doc__ else "" 138 139 if params is not None: 140 parameters = params 141 else: 142 # Check for multiple Args sections (warning) 143 # Only need to do this if we need to. 144 if docstring.count("Args:") > 1: 145 warnings.warn("Multiple 'Args:' sections found in the docstring.") 146 # Create parameter handlers 147 handlers: List[ParameterHandler] = [ 148 PydanticModelHandler(), 149 SequenceParameterHandler(), 150 UnionParameterHandler(), 151 DefaultParameterHandler(), 152 ] 153 154 parameters: List[Parameter] = [] 155 156 for param in signature.parameters.values(): 157 # Skip 'self' parameter for class methods 158 if in_class and (param.name == "self" or param.name == "cls"): 159 continue 160 161 description = arg_descriptions.get(param.name, "") 162 163 # Check if the parameter is required 164 required = param.default == inspect.Parameter.empty 165 166 handler = next(h for h in handlers if h.can_handle(param.annotation)) 167 168 param_obj = handler.create_parameter( 169 param.name, param.annotation, description, required 170 ) 171 172 parameters.append(param_obj) 173 174 if details is not None: 175 main_description = details 176 else: 177 main_description = extract_main_description(docstring) 178 179 tool_info = Tool( 180 name=function_name, 181 detail=main_description, 182 parameters=parameters, 183 ) 184 return tool_info
Creates a Tool from a Python callable. Uses the function's docstring and type annotations to extract details and parameter info.
KEY NOTE: No checking is done to ensure that the inserted params match the function signature
Arguments:
- func: The function to create a tool from.
- name: Optional name for the tool. If not provided, uses the function's name.
- details: Optional detailed description for the tool. If not provided, extracts from the function's docstring.
- params: Optional parameters for the tool. If not provided, infers from the function's signature and docstring.
Returns:
A Tool instance representing the function.
186 @classmethod 187 def from_mcp(cls, tool) -> Self: 188 """ 189 Creates a Tool from an MCP tool object. 190 191 Args: 192 tool: The MCP tool to create a Tool from. 193 194 Returns: 195 A Tool instance representing the MCP tool. 196 """ 197 input_schema = getattr(tool, "inputSchema", None) 198 if not input_schema or input_schema["type"] != "object": 199 raise ToolCreationError( 200 message="The inputSchema for an MCP Tool must be 'object'. ", 201 notes=[ 202 "If an MCP tool has a different output_schema, create a GitHub issue and support will be added." 203 ], 204 ) 205 206 properties = input_schema.get("properties", {}) 207 required_fields = set(input_schema.get("required", [])) 208 param_objs = set() 209 for name, prop in properties.items(): 210 required = name in required_fields 211 param_objs.add(parse_json_schema_to_parameter(name, prop, required)) 212 213 return cls(name=tool.name, detail=tool.description, parameters=param_objs)
Creates a Tool from an MCP tool object.
Arguments:
- tool: The MCP tool to create a Tool from.
Returns:
A Tool instance representing the MCP tool.
10class AnthropicLLM(ProviderLLMWrapper[_TStream], Generic[_TStream]): 11 @classmethod 12 def model_gateway(cls) -> ModelProvider: 13 return ModelProvider.ANTHROPIC
A large base class that wraps around a litellm model.
Note that the model object should be interacted with via the methods provided in the wrapper class:
chatstructuredstream_chatchat_with_tools
Each individual API should implement the required abstract_methods in order to allow users to interact with a
model of that type.
22class AzureAILLM(LiteLLMWrapper[_TStream]): 23 @classmethod 24 def model_gateway(cls): 25 return ModelProvider.AZUREAI 26 27 def model_provider(self) -> ModelProvider: 28 return super().model_provider() 29 30 def __init__( 31 self, 32 model_name: str, 33 *, 34 temperature: float | None = None, 35 **kwargs, 36 ): 37 """Initialize an Azure AI LLM instance. 38 39 Args: 40 model_name (str): Name of the Azure AI model to use. 41 temperature (float | None, optional): Sampling temperature for generation (e.g. 0.0–2.0). 42 If None, the provider default is used. 43 **kwargs: Additional arguments passed to the parent LiteLLMWrapper. 44 45 Raises: 46 AzureAIError: If the specified model is not available or if there are issues with the Azure AI service. 47 """ 48 super().__init__(model_name, temperature=temperature, **kwargs) 49 50 # Currently matching names to Azure models is case sensitive 51 self._available_models = [model.lower() for model in litellm.azure_ai_models] 52 self._is_model_available() 53 54 self.logger = logger 55 56 def chat(self, messages, **kwargs): 57 try: 58 return super().chat(messages, **kwargs) 59 except litellm.InternalServerError as e: 60 raise AzureAIError( 61 reason=f"Azure AI LLM error while processing the request: {e}" 62 ) from e 63 64 def chat_with_tools(self, messages, tools, **kwargs): 65 if not litellm.supports_function_calling(model=self._model_name.lower()): 66 raise FunctionCallingNotSupportedError(self._model_name) 67 68 try: 69 return super().chat_with_tools(messages, tools, **kwargs) 70 except litellm.InternalServerError as e: 71 raise AzureAIError( 72 reason=f"Azure AI LLM error while processing the request: {e}" 73 ) from e 74 75 def _is_model_available(self) -> None: 76 """Check if the model is available and supports tool calling.""" 77 if self._model_name.lower() not in self._available_models: 78 raise AzureAIError( 79 reason=( 80 f"Model '{self._model_name}' is not available. " 81 f"Available models: {self._available_models}" 82 ) 83 ) 84 85 def _tool_calling_supported(self) -> bool: 86 """Check if the model supports tool calling.""" 87 tool_calling_supported = [ 88 (model, litellm.supports_function_calling(model=model)) 89 for model in self._available_models 90 ] 91 return tool_calling_supported
A large base class that wraps around a litellm model.
Note that the model object should be interacted with via the methods provided in the wrapper class:
chatstructuredstream_chatchat_with_tools
Each individual API should implement the required abstract_methods in order to allow users to interact with a
model of that type.
30 def __init__( 31 self, 32 model_name: str, 33 *, 34 temperature: float | None = None, 35 **kwargs, 36 ): 37 """Initialize an Azure AI LLM instance. 38 39 Args: 40 model_name (str): Name of the Azure AI model to use. 41 temperature (float | None, optional): Sampling temperature for generation (e.g. 0.0–2.0). 42 If None, the provider default is used. 43 **kwargs: Additional arguments passed to the parent LiteLLMWrapper. 44 45 Raises: 46 AzureAIError: If the specified model is not available or if there are issues with the Azure AI service. 47 """ 48 super().__init__(model_name, temperature=temperature, **kwargs) 49 50 # Currently matching names to Azure models is case sensitive 51 self._available_models = [model.lower() for model in litellm.azure_ai_models] 52 self._is_model_available() 53 54 self.logger = logger
Initialize an Azure AI LLM instance.
Arguments:
- model_name (str): Name of the Azure AI model to use.
- temperature (float | None, optional): Sampling temperature for generation (e.g. 0.0–2.0). If None, the provider default is used.
- **kwargs: Additional arguments passed to the parent LiteLLMWrapper.
Raises:
- AzureAIError: If the specified model is not available or if there are issues with the Azure AI service.
Gets the API distrubutor of the model. Note nessecarily the same as the model itself.
E.g. if you are calling openai LLM through Azure AI foundry
56 def chat(self, messages, **kwargs): 57 try: 58 return super().chat(messages, **kwargs) 59 except litellm.InternalServerError as e: 60 raise AzureAIError( 61 reason=f"Azure AI LLM error while processing the request: {e}" 62 ) from e
Chat with the model using the provided messages.
64 def chat_with_tools(self, messages, tools, **kwargs): 65 if not litellm.supports_function_calling(model=self._model_name.lower()): 66 raise FunctionCallingNotSupportedError(self._model_name) 67 68 try: 69 return super().chat_with_tools(messages, tools, **kwargs) 70 except litellm.InternalServerError as e: 71 raise AzureAIError( 72 reason=f"Azure AI LLM error while processing the request: {e}" 73 ) from e
Chat with the model using the provided messages and tools.
6class CohereLLM(ProviderLLMWrapper): 7 """ 8 A wrapper that provides access to the Cohere API. 9 """ 10 11 @classmethod 12 def model_gateway(cls): 13 return ModelProvider.COHERE
A wrapper that provides access to the Cohere API.
11class HuggingFaceLLM(ProviderLLMWrapper[_TStream]): 12 def _pre_init_provider_check(self, model_name): 13 """called by __init__ before the super call in ProviderLLMWrapper""" 14 # for huggingface models there is no good way of using `get_llm_provider` to check if the model is valid. 15 # so we are just goinog to add `huggingface/` to the model name in case it is not there. 16 # if the model name happens to be invalid, the error will be generated at runtime during `litellm.completion`. See `_litellm_wrapper.py` 17 if model_name.startswith(self.model_provider().lower()): 18 model_name = "/".join(model_name.split("/")[1:]) 19 try: 20 assert len(model_name.split("/")) == 3, "Invalid model name" 21 except AssertionError as e: 22 raise ModelNotFoundError( 23 reason=e.args[0], 24 notes=[ 25 "Model name must be of the format `huggingface/<provider>/<hf_org_or_user>/<hf_model>` or `<provider>/<hf_org_or_user>/<hf_model>`", 26 "We only support the huggingface Serverless Inference Provider Models.", 27 "Provider List: https://docs.litellm.ai/docs/providers", 28 ], 29 ) 30 return model_name 31 32 def model_provider(self) -> ModelProvider: 33 # TODO implement logic for all the possible providers attached the hugging face. 34 return ModelProvider.HUGGINGFACE 35 36 def _validate_tool_calling_support(self): 37 # NOTE: special exception case for huggingface 38 # Due to the wide range of huggingface models, `litellm.supports_function_calling` isn't always accurate. 39 # so we are just going to skip the check and the error (if any) will be generated at runtime during `litellm.completion`. 40 pass 41 42 @classmethod 43 def model_gateway(cls): 44 return ModelProvider.HUGGINGFACE
A large base class that wraps around a litellm model.
Note that the model object should be interacted with via the methods provided in the wrapper class:
chatstructuredstream_chatchat_with_tools
Each individual API should implement the required abstract_methods in order to allow users to interact with a
model of that type.
10class OpenAILLM(ProviderLLMWrapper[_TStream], Generic[_TStream]): 11 """ 12 A wrapper that provides access to the OPENAI API. 13 """ 14 15 @classmethod 16 def model_gateway(cls): 17 return ModelProvider.OPENAI
A wrapper that provides access to the OPENAI API.
10class GeminiLLM(ProviderLLMWrapper[_TStream]): 11 def full_model_name(self, model_name: str) -> str: 12 # for gemini models through litellm, we need 'gemini/{model_name}' format, but we do this after the checks in ProiLLMWrapper init 13 return f"gemini/{model_name}" 14 15 @classmethod 16 def model_gateway(cls): 17 return ModelProvider.GEMINI # litellm uses this for the provider for Gemini, we are using this in the checks in _provider_wrapper.py
A large base class that wraps around a litellm model.
Note that the model object should be interacted with via the methods provided in the wrapper class:
chatstructuredstream_chatchat_with_tools
Each individual API should implement the required abstract_methods in order to allow users to interact with a
model of that type.
11 def full_model_name(self, model_name: str) -> str: 12 # for gemini models through litellm, we need 'gemini/{model_name}' format, but we do this after the checks in ProiLLMWrapper init 13 return f"gemini/{model_name}"
After the provider is checked, this method is called to get the full model name
15 @classmethod 16 def model_gateway(cls): 17 return ModelProvider.GEMINI # litellm uses this for the provider for Gemini, we are using this in the checks in _provider_wrapper.py
Gets the API distrubutor of the model. Note nessecarily the same as the model itself.
E.g. if you are calling openai LLM through Azure AI foundry
25class OllamaLLM(LiteLLMWrapper[_TStream]): 26 def __init__( 27 self, 28 model_name: str, 29 stream: _TStream = False, 30 domain: Literal["default", "auto", "custom"] = "default", 31 custom_domain: str | None = None, 32 temperature: float | None = None, 33 **kwargs, 34 ): 35 """Initialize an Ollama LLM instance. 36 37 Args: 38 model_name (str): Name of the Ollama model to use. 39 stream (bool): Whether to stream the response. 40 domain (Literal["default", "auto", "custom"], optional): The domain configuration mode. 41 - "default": Uses the default localhost domain (http://localhost:11434) 42 - "auto": Uses the OLLAMA_HOST environment variable, raises OllamaError if not set 43 - "custom": Uses the provided custom_domain parameter, raises OllamaError if not provided 44 Defaults to "default". 45 custom_domain (str | None, optional): Custom domain URL to use when domain is set to "custom". 46 Must be provided if domain="custom". Defaults to None. 47 temperature (float | None, optional): Sampling temperature for generation (e.g. 0.0–2.0). 48 If None, the provider default is used. 49 **kwargs: Additional arguments passed to the parent LiteLLMWrapper. 50 51 Raises: 52 OllamaError: If: 53 - domain is "auto" and OLLAMA_HOST environment variable is not set 54 - domain is "custom" and custom_domain is not provided 55 - specified model is not available on the server 56 RequestException: If connection to Ollama server fails 57 """ 58 if not model_name.startswith("ollama/"): 59 logger.warning( 60 f"Prepending 'ollama/' to model name '{model_name}' for Ollama" 61 ) 62 model_name = f"ollama/{model_name}" 63 super().__init__( 64 model_name=model_name, 65 stream=stream, 66 temperature=temperature, 67 **kwargs, 68 ) 69 70 match domain: 71 case "default": 72 self.domain = DEFAULT_DOMAIN 73 case "auto": 74 domain_from_env = os.getenv("OLLAMA_HOST") 75 if domain_from_env is None: 76 raise OllamaError("OLLAMA_HOST environment variable not set") 77 self.domain = domain_from_env 78 case "custom": 79 if custom_domain is None: 80 raise OllamaError( 81 "Custom domain must be provided when domain is set to 'custom'" 82 ) 83 self.domain = custom_domain 84 85 self._run_check( 86 "api/tags" 87 ) # This will crash the workflow if Ollama is not setup properly 88 89 def _run_check(self, endpoint: str): 90 url = f"{self.domain}/{endpoint.lstrip('/')}" 91 try: 92 response = requests.get(url) 93 response.raise_for_status() 94 95 models = response.json() 96 97 model_names = {model["name"] for model in models["models"]} 98 99 model_name = self.model_name().rsplit("/", 1)[ 100 -1 101 ] # extract the model name if the provider is also included 102 103 if model_name not in model_names: 104 error_msg = f"{self.model_name()} not available on server {self.domain}. Avaiable models are: {model_names}" 105 logger.error(error_msg) 106 raise OllamaError(error_msg) 107 108 except OllamaError as e: 109 logger.error(e) 110 raise 111 112 except requests.exceptions.RequestException as e: 113 logger.error(e) 114 raise 115 116 def chat_with_tools(self, messages, tools, **kwargs): 117 if not supports_function_calling(model=self._model_name): 118 raise FunctionCallingNotSupportedError(self._model_name) 119 120 return super().chat_with_tools(messages, tools, **kwargs) 121 122 @classmethod 123 def model_gateway(cls): 124 return ModelProvider.OLLAMA 125 126 def model_provider(self) -> ModelProvider: 127 """Returns the name of the provider""" 128 return self.model_gateway()
A large base class that wraps around a litellm model.
Note that the model object should be interacted with via the methods provided in the wrapper class:
chatstructuredstream_chatchat_with_tools
Each individual API should implement the required abstract_methods in order to allow users to interact with a
model of that type.
26 def __init__( 27 self, 28 model_name: str, 29 stream: _TStream = False, 30 domain: Literal["default", "auto", "custom"] = "default", 31 custom_domain: str | None = None, 32 temperature: float | None = None, 33 **kwargs, 34 ): 35 """Initialize an Ollama LLM instance. 36 37 Args: 38 model_name (str): Name of the Ollama model to use. 39 stream (bool): Whether to stream the response. 40 domain (Literal["default", "auto", "custom"], optional): The domain configuration mode. 41 - "default": Uses the default localhost domain (http://localhost:11434) 42 - "auto": Uses the OLLAMA_HOST environment variable, raises OllamaError if not set 43 - "custom": Uses the provided custom_domain parameter, raises OllamaError if not provided 44 Defaults to "default". 45 custom_domain (str | None, optional): Custom domain URL to use when domain is set to "custom". 46 Must be provided if domain="custom". Defaults to None. 47 temperature (float | None, optional): Sampling temperature for generation (e.g. 0.0–2.0). 48 If None, the provider default is used. 49 **kwargs: Additional arguments passed to the parent LiteLLMWrapper. 50 51 Raises: 52 OllamaError: If: 53 - domain is "auto" and OLLAMA_HOST environment variable is not set 54 - domain is "custom" and custom_domain is not provided 55 - specified model is not available on the server 56 RequestException: If connection to Ollama server fails 57 """ 58 if not model_name.startswith("ollama/"): 59 logger.warning( 60 f"Prepending 'ollama/' to model name '{model_name}' for Ollama" 61 ) 62 model_name = f"ollama/{model_name}" 63 super().__init__( 64 model_name=model_name, 65 stream=stream, 66 temperature=temperature, 67 **kwargs, 68 ) 69 70 match domain: 71 case "default": 72 self.domain = DEFAULT_DOMAIN 73 case "auto": 74 domain_from_env = os.getenv("OLLAMA_HOST") 75 if domain_from_env is None: 76 raise OllamaError("OLLAMA_HOST environment variable not set") 77 self.domain = domain_from_env 78 case "custom": 79 if custom_domain is None: 80 raise OllamaError( 81 "Custom domain must be provided when domain is set to 'custom'" 82 ) 83 self.domain = custom_domain 84 85 self._run_check( 86 "api/tags" 87 ) # This will crash the workflow if Ollama is not setup properly
Initialize an Ollama LLM instance.
Arguments:
- model_name (str): Name of the Ollama model to use.
- stream (bool): Whether to stream the response.
- domain (Literal["default", "auto", "custom"], optional): The domain configuration mode.
- "default": Uses the default localhost domain (http://localhost:11434)
- "auto": Uses the OLLAMA_HOST environment variable, raises OllamaError if not set
- "custom": Uses the provided custom_domain parameter, raises OllamaError if not provided Defaults to "default".
- custom_domain (str | None, optional): Custom domain URL to use when domain is set to "custom". Must be provided if domain="custom". Defaults to None.
- temperature (float | None, optional): Sampling temperature for generation (e.g. 0.0–2.0). If None, the provider default is used.
- **kwargs: Additional arguments passed to the parent LiteLLMWrapper.
Raises:
- OllamaError: If:
- domain is "auto" and OLLAMA_HOST environment variable is not set
- domain is "custom" and custom_domain is not provided
- specified model is not available on the server
- RequestException: If connection to Ollama server fails
116 def chat_with_tools(self, messages, tools, **kwargs): 117 if not supports_function_calling(model=self._model_name): 118 raise FunctionCallingNotSupportedError(self._model_name) 119 120 return super().chat_with_tools(messages, tools, **kwargs)
Chat with the model using the provided messages and tools.
Gets the API distrubutor of the model. Note nessecarily the same as the model itself.
E.g. if you are calling openai LLM through Azure AI foundry
13class PortKeyLLM(OpenAICompatibleProvider[_TStream]): 14 def __init__( 15 self, 16 model_name: str, 17 *, 18 stream: _TStream = False, 19 api_key: str | None = None, 20 temperature: float | None = None, 21 ): 22 try: 23 from portkey_ai import Portkey 24 except ImportError: 25 raise ImportError( 26 "Could not import portkey_ai package. Use railtracks[portkey]" 27 ) 28 29 if api_key is None: 30 try: 31 api_key = os.environ["PORTKEY_API_KEY"] 32 except KeyError: 33 raise KeyError("Please set your PORTKEY_API_KEY in your .env file.") 34 35 portkey = Portkey(api_key=api_key) 36 37 super().__init__( 38 model_name, 39 stream=stream, 40 api_base=portkey.base_url, 41 api_key=portkey.api_key, 42 temperature=temperature, 43 ) 44 45 @classmethod 46 def model_gateway(cls): 47 return ModelProvider.PORTKEY 48 49 def model_provider(self): 50 # TODO: Implement specialized logic to determine the model provider 51 return ModelProvider.PORTKEY
A large base class that wraps around a litellm model.
Note that the model object should be interacted with via the methods provided in the wrapper class:
chatstructuredstream_chatchat_with_tools
Each individual API should implement the required abstract_methods in order to allow users to interact with a
model of that type.
14 def __init__( 15 self, 16 model_name: str, 17 *, 18 stream: _TStream = False, 19 api_key: str | None = None, 20 temperature: float | None = None, 21 ): 22 try: 23 from portkey_ai import Portkey 24 except ImportError: 25 raise ImportError( 26 "Could not import portkey_ai package. Use railtracks[portkey]" 27 ) 28 29 if api_key is None: 30 try: 31 api_key = os.environ["PORTKEY_API_KEY"] 32 except KeyError: 33 raise KeyError("Please set your PORTKEY_API_KEY in your .env file.") 34 35 portkey = Portkey(api_key=api_key) 36 37 super().__init__( 38 model_name, 39 stream=stream, 40 api_base=portkey.base_url, 41 api_key=portkey.api_key, 42 temperature=temperature, 43 )
12class OpenAICompatibleProvider(ProviderLLMWrapper[_TStream], ABC): 13 def __init__( 14 self, 15 model_name: str, 16 *, 17 stream: _TStream = False, 18 api_base: str, 19 api_key: str, 20 temperature: float | None = None, 21 ): 22 super().__init__( 23 model_name, 24 stream=stream, 25 api_base=api_base, 26 api_key=api_key, 27 temperature=temperature, 28 ) 29 30 def full_model_name(self, model_name: str) -> str: 31 return f"openai/{model_name}" 32 33 @classmethod 34 def model_gateway(cls) -> ModelProvider: 35 return ModelProvider.UNKNOWN 36 37 def _pre_init_provider_check(self, model_name: str): 38 # For OpenAI compatible providers, we skip the provider check since there is no way to do it. 39 return model_name 40 41 def _validate_tool_calling_support(self): 42 # For OpenAI compatible providers, we skip the tool calling support check since there is no way to do it. 43 return
A large base class that wraps around a litellm model.
Note that the model object should be interacted with via the methods provided in the wrapper class:
chatstructuredstream_chatchat_with_tools
Each individual API should implement the required abstract_methods in order to allow users to interact with a
model of that type.
After the provider is checked, this method is called to get the full model name
37class Parameter(ABC): 38 """ 39 Abstract Base Parameter class with default simple parameter behavior. 40 """ 41 42 param_type: Optional[Union[str, List[str]]] = None # class var for default type 43 44 def __init__( 45 self, 46 name: str, 47 description: Optional[str] = None, 48 required: bool = True, 49 default: Any = None, 50 enum: Optional[List[Any]] = None, 51 default_present: bool = False, 52 param_type: Optional[Union[str, List[str]]] = None, 53 ): 54 """ 55 Initialize a Parameter instance. 56 57 Args: 58 name (str): Name of the parameter. 59 description (Optional[str]): Description of the parameter. 60 required (bool): Whether the parameter is required. 61 default (Any): Default value for the parameter. 62 enum (Optional[List[Any]]): Allowed values for the parameter. 63 default_present (bool): Whether a default value is explicitly set. 64 param_type (Optional[Union[str, List[str]]]): The type or types of the parameter. 65 """ 66 self.name = name 67 self.description = description or "" 68 self.required = required 69 self.default = default 70 self.enum = enum 71 self.default_present = default_present 72 if param_type is not None: 73 # Accept either list[str], str, or ParameterType enum or list of them 74 # Normalize to str or List[str] 75 if isinstance(param_type, list): 76 self.param_type = [ 77 pt.value if isinstance(pt, ParameterType) else pt 78 for pt in param_type 79 ] 80 else: 81 self.param_type = ( 82 param_type.value 83 if isinstance(param_type, ParameterType) 84 else param_type 85 ) 86 elif hasattr(self, "param_type") and self.param_type is None: 87 self.param_type = None 88 89 def to_json_schema(self) -> Dict[str, Any]: 90 # Base dictionary with type and optional description 91 schema_dict: Dict[str, Any] = { 92 "type": self.param_type.value 93 if isinstance(self.param_type, ParameterType) 94 else self.param_type 95 } 96 if self.description: 97 schema_dict["description"] = self.description 98 99 # Handle enum 100 if self.enum: 101 schema_dict["enum"] = self.enum 102 103 # Handle default 104 # default can be None, 0, False; None means optional parameter 105 if self.default_present: 106 schema_dict["default"] = self.default 107 elif isinstance(self.param_type, list) and "none" in self.param_type: 108 schema_dict["default"] = None 109 110 return schema_dict 111 112 def __repr__(self) -> str: 113 return ( 114 f"Parameter(name={self.name!r}, param_type={self.param_type!r}, " 115 f"description={self.description!r}, required={self.required!r}, " 116 f"default={self.default!r}, enum={self.enum!r})" 117 )
Abstract Base Parameter class with default simple parameter behavior.
44 def __init__( 45 self, 46 name: str, 47 description: Optional[str] = None, 48 required: bool = True, 49 default: Any = None, 50 enum: Optional[List[Any]] = None, 51 default_present: bool = False, 52 param_type: Optional[Union[str, List[str]]] = None, 53 ): 54 """ 55 Initialize a Parameter instance. 56 57 Args: 58 name (str): Name of the parameter. 59 description (Optional[str]): Description of the parameter. 60 required (bool): Whether the parameter is required. 61 default (Any): Default value for the parameter. 62 enum (Optional[List[Any]]): Allowed values for the parameter. 63 default_present (bool): Whether a default value is explicitly set. 64 param_type (Optional[Union[str, List[str]]]): The type or types of the parameter. 65 """ 66 self.name = name 67 self.description = description or "" 68 self.required = required 69 self.default = default 70 self.enum = enum 71 self.default_present = default_present 72 if param_type is not None: 73 # Accept either list[str], str, or ParameterType enum or list of them 74 # Normalize to str or List[str] 75 if isinstance(param_type, list): 76 self.param_type = [ 77 pt.value if isinstance(pt, ParameterType) else pt 78 for pt in param_type 79 ] 80 else: 81 self.param_type = ( 82 param_type.value 83 if isinstance(param_type, ParameterType) 84 else param_type 85 ) 86 elif hasattr(self, "param_type") and self.param_type is None: 87 self.param_type = None
Initialize a Parameter instance.
Arguments:
- name (str): Name of the parameter.
- description (Optional[str]): Description of the parameter.
- required (bool): Whether the parameter is required.
- default (Any): Default value for the parameter.
- enum (Optional[List[Any]]): Allowed values for the parameter.
- default_present (bool): Whether a default value is explicitly set.
- param_type (Optional[Union[str, List[str]]]): The type or types of the parameter.
89 def to_json_schema(self) -> Dict[str, Any]: 90 # Base dictionary with type and optional description 91 schema_dict: Dict[str, Any] = { 92 "type": self.param_type.value 93 if isinstance(self.param_type, ParameterType) 94 else self.param_type 95 } 96 if self.description: 97 schema_dict["description"] = self.description 98 99 # Handle enum 100 if self.enum: 101 schema_dict["enum"] = self.enum 102 103 # Handle default 104 # default can be None, 0, False; None means optional parameter 105 if self.default_present: 106 schema_dict["default"] = self.default 107 elif isinstance(self.param_type, list) and "none" in self.param_type: 108 schema_dict["default"] = None 109 110 return schema_dict
9class UnionParameter(Parameter): 10 """Parameter representing a union type.""" 11 12 param_type: List[str] 13 14 def __init__( 15 self, 16 name: str, 17 options: List[Parameter], 18 description: Optional[str] = None, 19 required: bool = True, 20 default: Any = None, 21 enum: Optional[list] = None, 22 default_present: bool = False, 23 ): 24 """Initialize a UnionParameter instance. 25 26 Args: 27 name (str): Name of the parameter. 28 options (List[Parameter]): List of Parameter instances representing the union types. 29 description (Optional[str]): Description of the parameter. 30 required (bool): Whether the parameter is required. 31 default (Any): Default value for the parameter. 32 enum (Optional[list]): Allowed values for the parameter. 33 default_present (bool): Whether a default value is explicitly set. 34 """ 35 super().__init__(name, description, required, default, enum, default_present) 36 self.options = options 37 for opt in options: 38 if isinstance(opt, UnionParameter): 39 raise TypeError( 40 "UnionParameter cannot contain another UnionParameter in its options" 41 ) 42 43 # param_type here is the list of inner types as strings, e.g. ["string", "null"] 44 # flatten and deduplicate types (order does not matter for schema) 45 flattened_types = [] 46 for opt in options: 47 pt = opt.param_type 48 if hasattr(pt, "value"): 49 pt = pt.__getattribute__("value") 50 if isinstance(pt, list): 51 flattened_types.extend(p for p in pt if p is not None) 52 elif pt is not None: 53 flattened_types.append(pt) 54 55 # Deduplicate while preserving order 56 self.param_type = list(set(flattened_types)) 57 58 def to_json_schema(self) -> Dict[str, Any]: 59 """Convert the union parameter to a JSON schema representation.""" 60 schema = { 61 "anyOf": [opt.to_json_schema() for opt in self.options], 62 } 63 64 if self.description: 65 schema["description"] = self.description # type: ignore 66 67 if self.default_present: 68 schema["default"] = self.default 69 70 return schema 71 72 def __repr__(self) -> str: 73 """Return a string representation of the UnionParameter.""" 74 return ( 75 f"UnionParameter(name={self.name!r}, options={self.options!r}, " 76 f"description={self.description!r}, required={self.required!r}, default={self.default!r})" 77 )
Parameter representing a union type.
14 def __init__( 15 self, 16 name: str, 17 options: List[Parameter], 18 description: Optional[str] = None, 19 required: bool = True, 20 default: Any = None, 21 enum: Optional[list] = None, 22 default_present: bool = False, 23 ): 24 """Initialize a UnionParameter instance. 25 26 Args: 27 name (str): Name of the parameter. 28 options (List[Parameter]): List of Parameter instances representing the union types. 29 description (Optional[str]): Description of the parameter. 30 required (bool): Whether the parameter is required. 31 default (Any): Default value for the parameter. 32 enum (Optional[list]): Allowed values for the parameter. 33 default_present (bool): Whether a default value is explicitly set. 34 """ 35 super().__init__(name, description, required, default, enum, default_present) 36 self.options = options 37 for opt in options: 38 if isinstance(opt, UnionParameter): 39 raise TypeError( 40 "UnionParameter cannot contain another UnionParameter in its options" 41 ) 42 43 # param_type here is the list of inner types as strings, e.g. ["string", "null"] 44 # flatten and deduplicate types (order does not matter for schema) 45 flattened_types = [] 46 for opt in options: 47 pt = opt.param_type 48 if hasattr(pt, "value"): 49 pt = pt.__getattribute__("value") 50 if isinstance(pt, list): 51 flattened_types.extend(p for p in pt if p is not None) 52 elif pt is not None: 53 flattened_types.append(pt) 54 55 # Deduplicate while preserving order 56 self.param_type = list(set(flattened_types))
Initialize a UnionParameter instance.
Arguments:
- name (str): Name of the parameter.
- options (List[Parameter]): List of Parameter instances representing the union types.
- description (Optional[str]): Description of the parameter.
- required (bool): Whether the parameter is required.
- default (Any): Default value for the parameter.
- enum (Optional[list]): Allowed values for the parameter.
- default_present (bool): Whether a default value is explicitly set.
58 def to_json_schema(self) -> Dict[str, Any]: 59 """Convert the union parameter to a JSON schema representation.""" 60 schema = { 61 "anyOf": [opt.to_json_schema() for opt in self.options], 62 } 63 64 if self.description: 65 schema["description"] = self.description # type: ignore 66 67 if self.default_present: 68 schema["default"] = self.default 69 70 return schema
Convert the union parameter to a JSON schema representation.
9class ArrayParameter(Parameter): 10 """Parameter representing an array type.""" 11 12 param_type: ParameterType = ParameterType.ARRAY 13 14 def __init__( 15 self, 16 name: str, 17 items: Parameter, 18 description: Optional[str] = None, 19 required: bool = True, 20 default: Any = None, 21 max_items: Optional[int] = None, 22 additional_properties: bool = False, 23 ): 24 """Initialize an ArrayParameter instance. 25 26 Args: 27 name (str): Name of the parameter. 28 items (Parameter): Parameter instance describing the type of array elements. 29 description (Optional[str]): Description of the parameter. 30 required (bool): Whether the parameter is required. 31 default (Any): Default value for the parameter. 32 max_items (Optional[int]): Maximum number of items allowed in the array. 33 additional_properties (bool): Whether additional properties are allowed (relevant if items are objects). 34 """ 35 super().__init__(name, description, required, default) 36 self.items = items 37 self.max_items = max_items 38 self.additional_properties = ( 39 additional_properties # might be relevant if items is object type 40 ) 41 42 def to_json_schema(self) -> Dict[str, Any]: 43 """Convert the array parameter to a JSON schema representation.""" 44 # Base property for items inside the array 45 items_schema = self.items.to_json_schema() 46 47 schema = { 48 "type": "array", 49 "items": items_schema, 50 } 51 if self.description: 52 schema["description"] = self.description 53 54 if self.max_items is not None: 55 schema["maxItems"] = self.max_items 56 57 # Set defaults and enum if present at the array level 58 if self.default is not None: 59 schema["default"] = self.default 60 61 # Note: enum on arrays is uncommon but if you want to support: 62 if self.enum: 63 schema["enum"] = self.enum 64 65 return schema 66 67 def __repr__(self) -> str: 68 """Return a string representation of the ArrayParameter.""" 69 return ( 70 f"ArrayParameter(name={self.name!r}, items={self.items!r}, " 71 f"description={self.description!r}, required={self.required!r}, " 72 f"default={self.default!r}, max_items={self.max_items!r}, " 73 f"additional_properties={self.additional_properties!r})" 74 )
Parameter representing an array type.
14 def __init__( 15 self, 16 name: str, 17 items: Parameter, 18 description: Optional[str] = None, 19 required: bool = True, 20 default: Any = None, 21 max_items: Optional[int] = None, 22 additional_properties: bool = False, 23 ): 24 """Initialize an ArrayParameter instance. 25 26 Args: 27 name (str): Name of the parameter. 28 items (Parameter): Parameter instance describing the type of array elements. 29 description (Optional[str]): Description of the parameter. 30 required (bool): Whether the parameter is required. 31 default (Any): Default value for the parameter. 32 max_items (Optional[int]): Maximum number of items allowed in the array. 33 additional_properties (bool): Whether additional properties are allowed (relevant if items are objects). 34 """ 35 super().__init__(name, description, required, default) 36 self.items = items 37 self.max_items = max_items 38 self.additional_properties = ( 39 additional_properties # might be relevant if items is object type 40 )
Initialize an ArrayParameter instance.
Arguments:
- name (str): Name of the parameter.
- items (Parameter): Parameter instance describing the type of array elements.
- description (Optional[str]): Description of the parameter.
- required (bool): Whether the parameter is required.
- default (Any): Default value for the parameter.
- max_items (Optional[int]): Maximum number of items allowed in the array.
- additional_properties (bool): Whether additional properties are allowed (relevant if items are objects).
42 def to_json_schema(self) -> Dict[str, Any]: 43 """Convert the array parameter to a JSON schema representation.""" 44 # Base property for items inside the array 45 items_schema = self.items.to_json_schema() 46 47 schema = { 48 "type": "array", 49 "items": items_schema, 50 } 51 if self.description: 52 schema["description"] = self.description 53 54 if self.max_items is not None: 55 schema["maxItems"] = self.max_items 56 57 # Set defaults and enum if present at the array level 58 if self.default is not None: 59 schema["default"] = self.default 60 61 # Note: enum on arrays is uncommon but if you want to support: 62 if self.enum: 63 schema["enum"] = self.enum 64 65 return schema
Convert the array parameter to a JSON schema representation.
9class ObjectParameter(Parameter): 10 """Parameter representing an object type.""" 11 12 param_type: ParameterType = ParameterType.OBJECT 13 14 def __init__( 15 self, 16 name: str, 17 properties: list[Parameter], 18 description: Optional[str] = None, 19 required: bool = True, 20 additional_properties: bool = False, 21 default: Any = None, 22 ): 23 """Initialize an ObjectParameter instance. 24 25 Args: 26 name (str): Name of the parameter. 27 properties (list[Parameter]): List of Parameter instances describing object properties. 28 description (Optional[str]): Description of the parameter. 29 required (bool): Whether the parameter is required. 30 additional_properties (bool): Whether additional properties are allowed. 31 default (Any): Default value for the parameter. 32 """ 33 super().__init__(name, description, required, default) 34 self.properties = properties 35 self.additional_properties = additional_properties 36 37 def to_json_schema(self) -> Dict[str, Any]: 38 """Convert the object parameter to a JSON schema representation.""" 39 schema = { 40 "type": "object", 41 "properties": {}, 42 "additionalProperties": self.additional_properties, 43 } 44 45 if self.description: 46 schema["description"] = self.description 47 48 required_props = [] 49 for prop in self.properties: 50 schema["properties"][prop.name] = prop.to_json_schema() 51 if prop.required: 52 required_props.append(prop.name) 53 54 if required_props: 55 schema["required"] = required_props 56 57 if self.default is not None: 58 schema["default"] = self.default 59 60 if self.enum: 61 schema["enum"] = self.enum 62 63 return schema 64 65 def __repr__(self) -> str: 66 """Return a string representation of the ObjectParameter.""" 67 return ( 68 f"ObjectParameter(name={self.name!r}, properties={self.properties!r}, " 69 f"description={self.description!r}, required={self.required!r}, " 70 f"additional_properties={self.additional_properties!r}, default={self.default!r})" 71 )
Parameter representing an object type.
14 def __init__( 15 self, 16 name: str, 17 properties: list[Parameter], 18 description: Optional[str] = None, 19 required: bool = True, 20 additional_properties: bool = False, 21 default: Any = None, 22 ): 23 """Initialize an ObjectParameter instance. 24 25 Args: 26 name (str): Name of the parameter. 27 properties (list[Parameter]): List of Parameter instances describing object properties. 28 description (Optional[str]): Description of the parameter. 29 required (bool): Whether the parameter is required. 30 additional_properties (bool): Whether additional properties are allowed. 31 default (Any): Default value for the parameter. 32 """ 33 super().__init__(name, description, required, default) 34 self.properties = properties 35 self.additional_properties = additional_properties
Initialize an ObjectParameter instance.
Arguments:
- name (str): Name of the parameter.
- properties (list[Parameter]): List of Parameter instances describing object properties.
- description (Optional[str]): Description of the parameter.
- required (bool): Whether the parameter is required.
- additional_properties (bool): Whether additional properties are allowed.
- default (Any): Default value for the parameter.
37 def to_json_schema(self) -> Dict[str, Any]: 38 """Convert the object parameter to a JSON schema representation.""" 39 schema = { 40 "type": "object", 41 "properties": {}, 42 "additionalProperties": self.additional_properties, 43 } 44 45 if self.description: 46 schema["description"] = self.description 47 48 required_props = [] 49 for prop in self.properties: 50 schema["properties"][prop.name] = prop.to_json_schema() 51 if prop.required: 52 required_props.append(prop.name) 53 54 if required_props: 55 schema["required"] = required_props 56 57 if self.default is not None: 58 schema["default"] = self.default 59 60 if self.enum: 61 schema["enum"] = self.enum 62 63 return schema
Convert the object parameter to a JSON schema representation.
9class RefParameter(Parameter): 10 """Parameter representing a reference type.""" 11 12 param_type: str = "object" # referenced schemas are always 'object' type 13 14 def __init__( 15 self, 16 name: str, 17 ref_path: str, 18 description: Optional[str] = None, 19 required: bool = True, 20 default: Any = None, 21 ): 22 """Initialize a RefParameter instance. 23 24 Args: 25 name (str): Name of the parameter. 26 ref_path (str): Reference path to the schema definition. 27 description (Optional[str]): Description of the parameter. 28 required (bool): Whether the parameter is required. 29 default (Any): Default value for the parameter. 30 """ 31 super().__init__(name, description, required, default) 32 self.ref_path = ref_path 33 34 def to_json_schema(self) -> Dict[str, Any]: 35 """Convert the reference parameter to a JSON schema representation.""" 36 schema = {"$ref": self.ref_path} 37 if self.description: 38 schema["description"] = self.description 39 40 if self.default is not None: 41 schema["default"] = self.default 42 43 if self.enum: 44 schema["enum"] = self.enum 45 46 return schema 47 48 def __repr__(self) -> str: 49 """Return a string representation of the RefParameter.""" 50 return ( 51 f"RefParameter(name={self.name!r}, ref_path={self.ref_path!r}, " 52 f"description={self.description!r}, required={self.required!r}, default={self.default!r})" 53 )
Parameter representing a reference type.
14 def __init__( 15 self, 16 name: str, 17 ref_path: str, 18 description: Optional[str] = None, 19 required: bool = True, 20 default: Any = None, 21 ): 22 """Initialize a RefParameter instance. 23 24 Args: 25 name (str): Name of the parameter. 26 ref_path (str): Reference path to the schema definition. 27 description (Optional[str]): Description of the parameter. 28 required (bool): Whether the parameter is required. 29 default (Any): Default value for the parameter. 30 """ 31 super().__init__(name, description, required, default) 32 self.ref_path = ref_path
Initialize a RefParameter instance.
Arguments:
- name (str): Name of the parameter.
- ref_path (str): Reference path to the schema definition.
- description (Optional[str]): Description of the parameter.
- required (bool): Whether the parameter is required.
- default (Any): Default value for the parameter.
34 def to_json_schema(self) -> Dict[str, Any]: 35 """Convert the reference parameter to a JSON schema representation.""" 36 schema = {"$ref": self.ref_path} 37 if self.description: 38 schema["description"] = self.description 39 40 if self.default is not None: 41 schema["default"] = self.default 42 43 if self.enum: 44 schema["enum"] = self.enum 45 46 return schema
Convert the reference parameter to a JSON schema representation.