Coverage for pydantic_ai_slim/pydantic_ai/models/mistral.py: 95.72%

299 statements  

« prev     ^ index     » next       coverage.py v7.6.12, created at 2025-03-28 17:27 +0000

1from __future__ import annotations as _annotations 

2 

3import base64 

4from collections.abc import AsyncIterable, AsyncIterator, Iterable 

5from contextlib import asynccontextmanager 

6from dataclasses import dataclass, field 

7from datetime import datetime, timezone 

8from itertools import chain 

9from typing import Any, Literal, Union, cast 

10 

11import pydantic_core 

12from httpx import Timeout 

13from typing_extensions import assert_never 

14 

15from .. import ModelHTTPError, UnexpectedModelBehavior, _utils 

16from .._utils import generate_tool_call_id as _generate_tool_call_id, now_utc as _now_utc 

17from ..messages import ( 

18 BinaryContent, 

19 DocumentUrl, 

20 ImageUrl, 

21 ModelMessage, 

22 ModelRequest, 

23 ModelResponse, 

24 ModelResponsePart, 

25 ModelResponseStreamEvent, 

26 RetryPromptPart, 

27 SystemPromptPart, 

28 TextPart, 

29 ToolCallPart, 

30 ToolReturnPart, 

31 UserPromptPart, 

32) 

33from ..providers import Provider, infer_provider 

34from ..result import Usage 

35from ..settings import ModelSettings 

36from ..tools import ToolDefinition 

37from . import ( 

38 Model, 

39 ModelRequestParameters, 

40 StreamedResponse, 

41 check_allow_model_requests, 

42) 

43 

44try: 

45 from mistralai import ( 

46 UNSET, 

47 CompletionChunk as MistralCompletionChunk, 

48 Content as MistralContent, 

49 ContentChunk as MistralContentChunk, 

50 FunctionCall as MistralFunctionCall, 

51 ImageURL as MistralImageURL, 

52 ImageURLChunk as MistralImageURLChunk, 

53 Mistral, 

54 OptionalNullable as MistralOptionalNullable, 

55 TextChunk as MistralTextChunk, 

56 ToolChoiceEnum as MistralToolChoiceEnum, 

57 ) 

58 from mistralai.models import ( 

59 ChatCompletionResponse as MistralChatCompletionResponse, 

60 CompletionEvent as MistralCompletionEvent, 

61 Messages as MistralMessages, 

62 SDKError, 

63 Tool as MistralTool, 

64 ToolCall as MistralToolCall, 

65 ) 

66 from mistralai.models.assistantmessage import AssistantMessage as MistralAssistantMessage 

67 from mistralai.models.function import Function as MistralFunction 

68 from mistralai.models.systemmessage import SystemMessage as MistralSystemMessage 

69 from mistralai.models.toolmessage import ToolMessage as MistralToolMessage 

70 from mistralai.models.usermessage import UserMessage as MistralUserMessage 

71 from mistralai.types.basemodel import Unset as MistralUnset 

72 from mistralai.utils.eventstreaming import EventStreamAsync as MistralEventStreamAsync 

73except ImportError as e: 

74 raise ImportError( 

75 'Please install `mistral` to use the Mistral model, ' 

76 'you can use the `mistral` optional group — `pip install "pydantic-ai-slim[mistral]"`' 

77 ) from e 

78 

79LatestMistralModelNames = Literal[ 

80 'mistral-large-latest', 'mistral-small-latest', 'codestral-latest', 'mistral-moderation-latest' 

81] 

82"""Latest Mistral models.""" 

83 

84MistralModelName = Union[str, LatestMistralModelNames] 

85"""Possible Mistral model names. 

86 

87Since Mistral supports a variety of date-stamped models, we explicitly list the most popular models but 

88allow any name in the type hints. 

89Since [the Mistral docs](https://docs.mistral.ai/getting-started/models/models_overview/) for a full list. 

90""" 

91 

92 

93class MistralModelSettings(ModelSettings): 

94 """Settings used for a Mistral model request. 

95 

96 ALL FIELDS MUST BE `mistral_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS. 

97 """ 

98 

99 # This class is a placeholder for any future mistral-specific settings 

100 

101 

102@dataclass(init=False) 

103class MistralModel(Model): 

104 """A model that uses Mistral. 

105 

106 Internally, this uses the [Mistral Python client](https://github.com/mistralai/client-python) to interact with the API. 

107 

108 [API Documentation](https://docs.mistral.ai/) 

109 """ 

110 

111 client: Mistral = field(repr=False) 

112 json_mode_schema_prompt: str = """Answer in JSON Object, respect the format:\n```\n{schema}\n```\n""" 

113 

114 _model_name: MistralModelName = field(repr=False) 

115 _system: str = field(default='mistral_ai', repr=False) 

116 

117 def __init__( 

118 self, 

119 model_name: MistralModelName, 

120 *, 

121 provider: Literal['mistral'] | Provider[Mistral] = 'mistral', 

122 json_mode_schema_prompt: str = """Answer in JSON Object, respect the format:\n```\n{schema}\n```\n""", 

123 ): 

124 """Initialize a Mistral model. 

125 

126 Args: 

127 model_name: The name of the model to use. 

128 provider: The provider to use for authentication and API access. Can be either the string 

129 'mistral' or an instance of `Provider[Mistral]`. If not provided, a new provider will be 

130 created using the other parameters. 

131 json_mode_schema_prompt: The prompt to show when the model expects a JSON object as input. 

132 """ 

133 self._model_name = model_name 

134 self.json_mode_schema_prompt = json_mode_schema_prompt 

135 

136 if isinstance(provider, str): 

137 provider = infer_provider(provider) 

138 self.client = provider.client 

139 

140 @property 

141 def base_url(self) -> str: 

142 return self.client.sdk_configuration.get_server_details()[0] 

143 

144 async def request( 

145 self, 

146 messages: list[ModelMessage], 

147 model_settings: ModelSettings | None, 

148 model_request_parameters: ModelRequestParameters, 

149 ) -> tuple[ModelResponse, Usage]: 

150 """Make a non-streaming request to the model from Pydantic AI call.""" 

151 check_allow_model_requests() 

152 response = await self._completions_create( 

153 messages, cast(MistralModelSettings, model_settings or {}), model_request_parameters 

154 ) 

155 return self._process_response(response), _map_usage(response) 

156 

157 @asynccontextmanager 

158 async def request_stream( 

159 self, 

160 messages: list[ModelMessage], 

161 model_settings: ModelSettings | None, 

162 model_request_parameters: ModelRequestParameters, 

163 ) -> AsyncIterator[StreamedResponse]: 

164 """Make a streaming request to the model from Pydantic AI call.""" 

165 check_allow_model_requests() 

166 response = await self._stream_completions_create( 

167 messages, cast(MistralModelSettings, model_settings or {}), model_request_parameters 

168 ) 

169 async with response: 

170 yield await self._process_streamed_response(model_request_parameters.result_tools, response) 

171 

172 @property 

173 def model_name(self) -> MistralModelName: 

174 """The model name.""" 

175 return self._model_name 

176 

177 @property 

178 def system(self) -> str: 

179 """The system / model provider.""" 

180 return self._system 

181 

182 async def _completions_create( 

183 self, 

184 messages: list[ModelMessage], 

185 model_settings: MistralModelSettings, 

186 model_request_parameters: ModelRequestParameters, 

187 ) -> MistralChatCompletionResponse: 

188 """Make a non-streaming request to the model.""" 

189 try: 

190 response = await self.client.chat.complete_async( 

191 model=str(self._model_name), 

192 messages=list(chain(*(self._map_message(m) for m in messages))), 

193 n=1, 

194 tools=self._map_function_and_result_tools_definition(model_request_parameters) or UNSET, 

195 tool_choice=self._get_tool_choice(model_request_parameters), 

196 stream=False, 

197 max_tokens=model_settings.get('max_tokens', UNSET), 

198 temperature=model_settings.get('temperature', UNSET), 

199 top_p=model_settings.get('top_p', 1), 

200 timeout_ms=self._get_timeout_ms(model_settings.get('timeout')), 

201 random_seed=model_settings.get('seed', UNSET), 

202 ) 

203 except SDKError as e: 

204 if (status_code := e.status_code) >= 400: 204 ↛ 206line 204 didn't jump to line 206 because the condition on line 204 was always true

205 raise ModelHTTPError(status_code=status_code, model_name=self.model_name, body=e.body) from e 

206 raise 

207 

208 assert response, 'A unexpected empty response from Mistral.' 

209 return response 

210 

211 async def _stream_completions_create( 

212 self, 

213 messages: list[ModelMessage], 

214 model_settings: MistralModelSettings, 

215 model_request_parameters: ModelRequestParameters, 

216 ) -> MistralEventStreamAsync[MistralCompletionEvent]: 

217 """Create a streaming completion request to the Mistral model.""" 

218 response: MistralEventStreamAsync[MistralCompletionEvent] | None 

219 mistral_messages = list(chain(*(self._map_message(m) for m in messages))) 

220 

221 if ( 

222 model_request_parameters.result_tools 

223 and model_request_parameters.function_tools 

224 or model_request_parameters.function_tools 

225 ): 

226 # Function Calling 

227 response = await self.client.chat.stream_async( 

228 model=str(self._model_name), 

229 messages=mistral_messages, 

230 n=1, 

231 tools=self._map_function_and_result_tools_definition(model_request_parameters) or UNSET, 

232 tool_choice=self._get_tool_choice(model_request_parameters), 

233 temperature=model_settings.get('temperature', UNSET), 

234 top_p=model_settings.get('top_p', 1), 

235 max_tokens=model_settings.get('max_tokens', UNSET), 

236 timeout_ms=self._get_timeout_ms(model_settings.get('timeout')), 

237 presence_penalty=model_settings.get('presence_penalty'), 

238 frequency_penalty=model_settings.get('frequency_penalty'), 

239 ) 

240 

241 elif model_request_parameters.result_tools: 

242 # Json Mode 

243 parameters_json_schemas = [tool.parameters_json_schema for tool in model_request_parameters.result_tools] 

244 user_output_format_message = self._generate_user_output_format(parameters_json_schemas) 

245 mistral_messages.append(user_output_format_message) 

246 

247 response = await self.client.chat.stream_async( 

248 model=str(self._model_name), 

249 messages=mistral_messages, 

250 response_format={'type': 'json_object'}, 

251 stream=True, 

252 ) 

253 

254 else: 

255 # Stream Mode 

256 response = await self.client.chat.stream_async( 

257 model=str(self._model_name), 

258 messages=mistral_messages, 

259 stream=True, 

260 ) 

261 assert response, 'A unexpected empty response from Mistral.' 

262 return response 

263 

264 def _get_tool_choice(self, model_request_parameters: ModelRequestParameters) -> MistralToolChoiceEnum | None: 

265 """Get tool choice for the model. 

266 

267 - "auto": Default mode. Model decides if it uses the tool or not. 

268 - "any": Select any tool. 

269 - "none": Prevents tool use. 

270 - "required": Forces tool use. 

271 """ 

272 if not model_request_parameters.function_tools and not model_request_parameters.result_tools: 

273 return None 

274 elif not model_request_parameters.allow_text_result: 

275 return 'required' 

276 else: 

277 return 'auto' 

278 

279 def _map_function_and_result_tools_definition( 

280 self, model_request_parameters: ModelRequestParameters 

281 ) -> list[MistralTool] | None: 

282 """Map function and result tools to MistralTool format. 

283 

284 Returns None if both function_tools and result_tools are empty. 

285 """ 

286 all_tools: list[ToolDefinition] = ( 

287 model_request_parameters.function_tools + model_request_parameters.result_tools 

288 ) 

289 tools = [ 

290 MistralTool( 

291 function=MistralFunction(name=r.name, parameters=r.parameters_json_schema, description=r.description) 

292 ) 

293 for r in all_tools 

294 ] 

295 return tools if tools else None 

296 

297 def _process_response(self, response: MistralChatCompletionResponse) -> ModelResponse: 

298 """Process a non-streamed response, and prepare a message to return.""" 

299 assert response.choices, 'Unexpected empty response choice.' 

300 

301 if response.created: 

302 timestamp = datetime.fromtimestamp(response.created, tz=timezone.utc) 

303 else: 

304 timestamp = _now_utc() 

305 

306 choice = response.choices[0] 

307 content = choice.message.content 

308 tool_calls = choice.message.tool_calls 

309 

310 parts: list[ModelResponsePart] = [] 

311 if text := _map_content(content): 

312 parts.append(TextPart(content=text)) 

313 

314 if isinstance(tool_calls, list): 

315 for tool_call in tool_calls: 

316 tool = self._map_mistral_to_pydantic_tool_call(tool_call=tool_call) 

317 parts.append(tool) 

318 

319 return ModelResponse(parts, model_name=response.model, timestamp=timestamp) 

320 

321 async def _process_streamed_response( 

322 self, 

323 result_tools: list[ToolDefinition], 

324 response: MistralEventStreamAsync[MistralCompletionEvent], 

325 ) -> StreamedResponse: 

326 """Process a streamed response, and prepare a streaming response to return.""" 

327 peekable_response = _utils.PeekableAsyncStream(response) 

328 first_chunk = await peekable_response.peek() 

329 if isinstance(first_chunk, _utils.Unset): 329 ↛ 330line 329 didn't jump to line 330 because the condition on line 329 was never true

330 raise UnexpectedModelBehavior('Streamed response ended without content or tool calls') 

331 

332 if first_chunk.data.created: 

333 timestamp = datetime.fromtimestamp(first_chunk.data.created, tz=timezone.utc) 

334 else: 

335 timestamp = datetime.now(tz=timezone.utc) 

336 

337 return MistralStreamedResponse( 

338 _response=peekable_response, 

339 _model_name=self._model_name, 

340 _timestamp=timestamp, 

341 _result_tools={c.name: c for c in result_tools}, 

342 ) 

343 

344 @staticmethod 

345 def _map_mistral_to_pydantic_tool_call(tool_call: MistralToolCall) -> ToolCallPart: 

346 """Maps a MistralToolCall to a ToolCall.""" 

347 tool_call_id = tool_call.id or _generate_tool_call_id() 

348 func_call = tool_call.function 

349 

350 return ToolCallPart(func_call.name, func_call.arguments, tool_call_id) 

351 

352 @staticmethod 

353 def _map_tool_call(t: ToolCallPart) -> MistralToolCall: 

354 """Maps a pydantic-ai ToolCall to a MistralToolCall.""" 

355 return MistralToolCall( 

356 id=_utils.guard_tool_call_id(t=t), 

357 type='function', 

358 function=MistralFunctionCall(name=t.tool_name, arguments=t.args), 

359 ) 

360 

361 def _generate_user_output_format(self, schemas: list[dict[str, Any]]) -> MistralUserMessage: 

362 """Get a message with an example of the expected output format.""" 

363 examples: list[dict[str, Any]] = [] 

364 for schema in schemas: 

365 typed_dict_definition: dict[str, Any] = {} 

366 for key, value in schema.get('properties', {}).items(): 

367 typed_dict_definition[key] = self._get_python_type(value) 

368 examples.append(typed_dict_definition) 

369 

370 example_schema = examples[0] if len(examples) == 1 else examples 

371 return MistralUserMessage(content=self.json_mode_schema_prompt.format(schema=example_schema)) 

372 

373 @classmethod 

374 def _get_python_type(cls, value: dict[str, Any]) -> str: 

375 """Return a string representation of the Python type for a single JSON schema property. 

376 

377 This function handles recursion for nested arrays/objects and `anyOf`. 

378 """ 

379 # 1) Handle anyOf first, because it's a different schema structure 

380 if any_of := value.get('anyOf'): 

381 # Simplistic approach: pick the first option in anyOf 

382 # (In reality, you'd possibly want to merge or union types) 

383 return f'Optional[{cls._get_python_type(any_of[0])}]' 

384 

385 # 2) If we have a top-level "type" field 

386 value_type = value.get('type') 

387 if not value_type: 

388 # No explicit type; fallback 

389 return 'Any' 

390 

391 # 3) Direct simple type mapping (string, integer, float, bool, None) 

392 if value_type in SIMPLE_JSON_TYPE_MAPPING and value_type != 'array' and value_type != 'object': 

393 return SIMPLE_JSON_TYPE_MAPPING[value_type] 

394 

395 # 4) Array: Recursively get the item type 

396 if value_type == 'array': 

397 items = value.get('items', {}) 

398 return f'list[{cls._get_python_type(items)}]' 

399 

400 # 5) Object: Check for additionalProperties 

401 if value_type == 'object': 

402 additional_properties = value.get('additionalProperties', {}) 

403 if isinstance(additional_properties, bool): 

404 return 'bool' # pragma: no cover 

405 additional_properties_type = additional_properties.get('type') 

406 if ( 

407 additional_properties_type in SIMPLE_JSON_TYPE_MAPPING 

408 and additional_properties_type != 'array' 

409 and additional_properties_type != 'object' 

410 ): 

411 # dict[str, bool/int/float/etc...] 

412 return f'dict[str, {SIMPLE_JSON_TYPE_MAPPING[additional_properties_type]}]' 

413 elif additional_properties_type == 'array': 

414 array_items = additional_properties.get('items', {}) 

415 return f'dict[str, list[{cls._get_python_type(array_items)}]]' 

416 elif additional_properties_type == 'object': 

417 # nested dictionary of unknown shape 

418 return 'dict[str, dict[str, Any]]' 

419 else: 

420 # If no additionalProperties type or something else, default to a generic dict 

421 return 'dict[str, Any]' 

422 

423 # 6) Fallback 

424 return 'Any' 

425 

426 @staticmethod 

427 def _get_timeout_ms(timeout: Timeout | float | None) -> int | None: 

428 """Convert a timeout to milliseconds.""" 

429 if timeout is None: 429 ↛ 431line 429 didn't jump to line 431 because the condition on line 429 was always true

430 return None 

431 if isinstance(timeout, float): 

432 return int(1000 * timeout) 

433 raise NotImplementedError('Timeout object is not yet supported for MistralModel.') 

434 

435 @classmethod 

436 def _map_user_message(cls, message: ModelRequest) -> Iterable[MistralMessages]: 

437 for part in message.parts: 

438 if isinstance(part, SystemPromptPart): 

439 yield MistralSystemMessage(content=part.content) 

440 elif isinstance(part, UserPromptPart): 

441 yield cls._map_user_prompt(part) 

442 elif isinstance(part, ToolReturnPart): 

443 yield MistralToolMessage( 

444 tool_call_id=part.tool_call_id, 

445 content=part.model_response_str(), 

446 ) 

447 elif isinstance(part, RetryPromptPart): 

448 if part.tool_name is None: 448 ↛ 449line 448 didn't jump to line 449 because the condition on line 448 was never true

449 yield MistralUserMessage(content=part.model_response()) 

450 else: 

451 yield MistralToolMessage( 

452 tool_call_id=part.tool_call_id, 

453 content=part.model_response(), 

454 ) 

455 else: 

456 assert_never(part) 

457 

458 @classmethod 

459 def _map_message(cls, message: ModelMessage) -> Iterable[MistralMessages]: 

460 """Just maps a `pydantic_ai.Message` to a `MistralMessage`.""" 

461 if isinstance(message, ModelRequest): 

462 yield from cls._map_user_message(message) 

463 elif isinstance(message, ModelResponse): 

464 content_chunks: list[MistralContentChunk] = [] 

465 tool_calls: list[MistralToolCall] = [] 

466 

467 for part in message.parts: 

468 if isinstance(part, TextPart): 

469 content_chunks.append(MistralTextChunk(text=part.content)) 

470 elif isinstance(part, ToolCallPart): 

471 tool_calls.append(cls._map_tool_call(part)) 

472 else: 

473 assert_never(part) 

474 yield MistralAssistantMessage(content=content_chunks, tool_calls=tool_calls) 

475 else: 

476 assert_never(message) 

477 

478 @staticmethod 

479 def _map_user_prompt(part: UserPromptPart) -> MistralUserMessage: 

480 content: str | list[MistralContentChunk] 

481 if isinstance(part.content, str): 

482 content = part.content 

483 else: 

484 content = [] 

485 for item in part.content: 

486 if isinstance(item, str): 

487 content.append(MistralTextChunk(text=item)) 

488 elif isinstance(item, ImageUrl): 

489 content.append(MistralImageURLChunk(image_url=MistralImageURL(url=item.url))) 

490 elif isinstance(item, BinaryContent): 490 ↛ 497line 490 didn't jump to line 497 because the condition on line 490 was always true

491 base64_encoded = base64.b64encode(item.data).decode('utf-8') 

492 if item.is_image: 

493 image_url = MistralImageURL(url=f'data:{item.media_type};base64,{base64_encoded}') 

494 content.append(MistralImageURLChunk(image_url=image_url, type='image_url')) 

495 else: 

496 raise RuntimeError('Only image binary content is supported for Mistral.') 

497 elif isinstance(item, DocumentUrl): 

498 raise RuntimeError('DocumentUrl is not supported in Mistral.') 

499 else: # pragma: no cover 

500 raise RuntimeError(f'Unsupported content type: {type(item)}') 

501 return MistralUserMessage(content=content) 

502 

503 

504MistralToolCallId = Union[str, None] 

505 

506 

507@dataclass 

508class MistralStreamedResponse(StreamedResponse): 

509 """Implementation of `StreamedResponse` for Mistral models.""" 

510 

511 _model_name: MistralModelName 

512 _response: AsyncIterable[MistralCompletionEvent] 

513 _timestamp: datetime 

514 _result_tools: dict[str, ToolDefinition] 

515 

516 _delta_content: str = field(default='', init=False) 

517 

518 async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: 

519 chunk: MistralCompletionEvent 

520 async for chunk in self._response: 

521 self._usage += _map_usage(chunk.data) 

522 

523 try: 

524 choice = chunk.data.choices[0] 

525 except IndexError: 

526 continue 

527 

528 # Handle the text part of the response 

529 content = choice.delta.content 

530 text = _map_content(content) 

531 if text: 

532 # Attempt to produce a result tool call from the received text 

533 if self._result_tools: 

534 self._delta_content += text 

535 maybe_tool_call_part = self._try_get_result_tool_from_text(self._delta_content, self._result_tools) 

536 if maybe_tool_call_part: 

537 yield self._parts_manager.handle_tool_call_part( 

538 vendor_part_id='result', 

539 tool_name=maybe_tool_call_part.tool_name, 

540 args=maybe_tool_call_part.args_as_dict(), 

541 tool_call_id=maybe_tool_call_part.tool_call_id, 

542 ) 

543 else: 

544 yield self._parts_manager.handle_text_delta(vendor_part_id='content', content=text) 

545 

546 # Handle the explicit tool calls 

547 for index, dtc in enumerate(choice.delta.tool_calls or []): 

548 # It seems that mistral just sends full tool calls, so we just use them directly, rather than building 

549 yield self._parts_manager.handle_tool_call_part( 

550 vendor_part_id=index, tool_name=dtc.function.name, args=dtc.function.arguments, tool_call_id=dtc.id 

551 ) 

552 

553 @property 

554 def model_name(self) -> MistralModelName: 

555 """Get the model name of the response.""" 

556 return self._model_name 

557 

558 @property 

559 def timestamp(self) -> datetime: 

560 """Get the timestamp of the response.""" 

561 return self._timestamp 

562 

563 @staticmethod 

564 def _try_get_result_tool_from_text(text: str, result_tools: dict[str, ToolDefinition]) -> ToolCallPart | None: 

565 output_json: dict[str, Any] | None = pydantic_core.from_json(text, allow_partial='trailing-strings') 

566 if output_json: 

567 for result_tool in result_tools.values(): 

568 # NOTE: Additional verification to prevent JSON validation to crash in `_result.py` 

569 # Ensures required parameters in the JSON schema are respected, especially for stream-based return types. 

570 # Example with BaseModel and required fields. 

571 if not MistralStreamedResponse._validate_required_json_schema( 

572 output_json, result_tool.parameters_json_schema 

573 ): 

574 continue 

575 

576 # The following part_id will be thrown away 

577 return ToolCallPart(tool_name=result_tool.name, args=output_json) 

578 

579 @staticmethod 

580 def _validate_required_json_schema(json_dict: dict[str, Any], json_schema: dict[str, Any]) -> bool: 

581 """Validate that all required parameters in the JSON schema are present in the JSON dictionary.""" 

582 required_params = json_schema.get('required', []) 

583 properties = json_schema.get('properties', {}) 

584 

585 for param in required_params: 

586 if param not in json_dict: 

587 return False 

588 

589 param_schema = properties.get(param, {}) 

590 param_type = param_schema.get('type') 

591 param_items_type = param_schema.get('items', {}).get('type') 

592 

593 if param_type == 'array' and param_items_type: 

594 if not isinstance(json_dict[param], list): 

595 return False 

596 for item in json_dict[param]: 

597 if not isinstance(item, VALID_JSON_TYPE_MAPPING[param_items_type]): 

598 return False 

599 elif param_type and not isinstance(json_dict[param], VALID_JSON_TYPE_MAPPING[param_type]): 

600 return False 

601 

602 if isinstance(json_dict[param], dict) and 'properties' in param_schema: 

603 nested_schema = param_schema 

604 if not MistralStreamedResponse._validate_required_json_schema(json_dict[param], nested_schema): 

605 return False 

606 

607 return True 

608 

609 

610VALID_JSON_TYPE_MAPPING: dict[str, Any] = { 

611 'string': str, 

612 'integer': int, 

613 'number': float, 

614 'boolean': bool, 

615 'array': list, 

616 'object': dict, 

617 'null': type(None), 

618} 

619 

620SIMPLE_JSON_TYPE_MAPPING = { 

621 'string': 'str', 

622 'integer': 'int', 

623 'number': 'float', 

624 'boolean': 'bool', 

625 'array': 'list', 

626 'null': 'None', 

627} 

628 

629 

630def _map_usage(response: MistralChatCompletionResponse | MistralCompletionChunk) -> Usage: 

631 """Maps a Mistral Completion Chunk or Chat Completion Response to a Usage.""" 

632 if response.usage: 632 ↛ 640line 632 didn't jump to line 640 because the condition on line 632 was always true

633 return Usage( 

634 request_tokens=response.usage.prompt_tokens, 

635 response_tokens=response.usage.completion_tokens, 

636 total_tokens=response.usage.total_tokens, 

637 details=None, 

638 ) 

639 else: 

640 return Usage() 

641 

642 

643def _map_content(content: MistralOptionalNullable[MistralContent]) -> str | None: 

644 """Maps the delta content from a Mistral Completion Chunk to a string or None.""" 

645 result: str | None = None 

646 

647 if isinstance(content, MistralUnset) or not content: 

648 result = None 

649 elif isinstance(content, list): 

650 for chunk in content: 

651 if isinstance(chunk, MistralTextChunk): 651 ↛ 654line 651 didn't jump to line 654 because the condition on line 651 was always true

652 result = result or '' + chunk.text 

653 else: 

654 assert False, f'Other data types like (Image, Reference) are not yet supported, got {type(chunk)}' 

655 elif isinstance(content, str): 655 ↛ 659line 655 didn't jump to line 659 because the condition on line 655 was always true

656 result = content 

657 

658 # Note: Check len to handle potential mismatch between function calls and responses from the API. (`msg: not the same number of function class and responses`) 

659 if result and len(result) == 0: 659 ↛ 660line 659 didn't jump to line 660 because the condition on line 659 was never true

660 result = None 

661 

662 return result