"Sure, I can certainly help you with that. The sum of 5 plus 3 is equal to 8. Let me know if you have any other medical questions that I can assist you with!\n"
"content":"You are a medical expert but not good at math",
},
{
"role":"user",
"content":"What is 5 plus 3",
}
],
model="mixtral-8x7b-32768",
)
print(chat_completion.choices[0].message.content)
```
%% Output
Sure, I can certainly help you with that. The sum of 5 plus 3 is equal to 8. Let me know if you have any other medical questions that I can assist you with!
File ~/.local/lib/python3.10/site-packages/openai/_base_client.py:921, in SyncAPIClient.request(self, cast_to, options, remaining_retries, stream, stream_cls)
File ~/.local/lib/python3.10/site-packages/openai/_base_client.py:921, in SyncAPIClient.request(self, cast_to, options, remaining_retries, stream, stream_cls)
912 def request(
912 def request(
913 self,
913 self,
914 cast_to: Type[ResponseT],
914 cast_to: Type[ResponseT],
(...)
(...)
919 stream_cls: type[_StreamT] | None = None,
919 stream_cls: type[_StreamT] | None = None,
920 ) -> ResponseT | _StreamT:
920 ) -> ResponseT | _StreamT:
--> 921 return self._request(
--> 921 return self._request(
922 cast_to=cast_to,
922 cast_to=cast_to,
923 options=options,
923 options=options,
924 stream=stream,
924 stream=stream,
925 stream_cls=stream_cls,
925 stream_cls=stream_cls,
926 remaining_retries=remaining_retries,
926 remaining_retries=remaining_retries,
927 )
927 )
File ~/.local/lib/python3.10/site-packages/openai/_base_client.py:1005, in SyncAPIClient._request(self, cast_to, options, remaining_retries, stream, stream_cls)
File ~/.local/lib/python3.10/site-packages/openai/_base_client.py:1005, in SyncAPIClient._request(self, cast_to, options, remaining_retries, stream, stream_cls)
1003 if retries > 0 and self._should_retry(err.response):
1003 if retries > 0 and self._should_retry(err.response):
1004 err.response.close()
1004 err.response.close()
-> 1005 return self._retry_request(
-> 1005 return self._retry_request(
1006 options,
1006 options,
1007 cast_to,
1007 cast_to,
1008 retries,
1008 retries,
1009 err.response.headers,
1009 err.response.headers,
1010 stream=stream,
1010 stream=stream,
1011 stream_cls=stream_cls,
1011 stream_cls=stream_cls,
1012 )
1012 )
1014 # If the response is streamed then we need to explicitly read the response
1014 # If the response is streamed then we need to explicitly read the response
1015 # to completion before attempting to access the response text.
1015 # to completion before attempting to access the response text.
1016 if not err.response.is_closed:
1016 if not err.response.is_closed:
File ~/.local/lib/python3.10/site-packages/openai/_base_client.py:1053, in SyncAPIClient._retry_request(self, options, cast_to, remaining_retries, response_headers, stream, stream_cls)
File ~/.local/lib/python3.10/site-packages/openai/_base_client.py:1053, in SyncAPIClient._retry_request(self, options, cast_to, remaining_retries, response_headers, stream, stream_cls)
1049 # In a synchronous context we are blocking the entire thread. Up to the library user to run the client in a
1049 # In a synchronous context we are blocking the entire thread. Up to the library user to run the client in a
1050 # different thread if necessary.
1050 # different thread if necessary.
1051 time.sleep(timeout)
1051 time.sleep(timeout)
-> 1053 return self._request(
-> 1053 return self._request(
1054 options=options,
1054 options=options,
1055 cast_to=cast_to,
1055 cast_to=cast_to,
1056 remaining_retries=remaining,
1056 remaining_retries=remaining,
1057 stream=stream,
1057 stream=stream,
1058 stream_cls=stream_cls,
1058 stream_cls=stream_cls,
1059 )
1059 )
File ~/.local/lib/python3.10/site-packages/openai/_base_client.py:1005, in SyncAPIClient._request(self, cast_to, options, remaining_retries, stream, stream_cls)
File ~/.local/lib/python3.10/site-packages/openai/_base_client.py:1005, in SyncAPIClient._request(self, cast_to, options, remaining_retries, stream, stream_cls)
1003 if retries > 0 and self._should_retry(err.response):
1003 if retries > 0 and self._should_retry(err.response):
1004 err.response.close()
1004 err.response.close()
-> 1005 return self._retry_request(
-> 1005 return self._retry_request(
1006 options,
1006 options,
1007 cast_to,
1007 cast_to,
1008 retries,
1008 retries,
1009 err.response.headers,
1009 err.response.headers,
1010 stream=stream,
1010 stream=stream,
1011 stream_cls=stream_cls,
1011 stream_cls=stream_cls,
1012 )
1012 )
1014 # If the response is streamed then we need to explicitly read the response
1014 # If the response is streamed then we need to explicitly read the response
1015 # to completion before attempting to access the response text.
1015 # to completion before attempting to access the response text.
1016 if not err.response.is_closed:
1016 if not err.response.is_closed:
File ~/.local/lib/python3.10/site-packages/openai/_base_client.py:1053, in SyncAPIClient._retry_request(self, options, cast_to, remaining_retries, response_headers, stream, stream_cls)
File ~/.local/lib/python3.10/site-packages/openai/_base_client.py:1053, in SyncAPIClient._retry_request(self, options, cast_to, remaining_retries, response_headers, stream, stream_cls)
1049 # In a synchronous context we are blocking the entire thread. Up to the library user to run the client in a
1049 # In a synchronous context we are blocking the entire thread. Up to the library user to run the client in a
1050 # different thread if necessary.
1050 # different thread if necessary.
1051 time.sleep(timeout)
1051 time.sleep(timeout)
-> 1053 return self._request(
-> 1053 return self._request(
1054 options=options,
1054 options=options,
1055 cast_to=cast_to,
1055 cast_to=cast_to,
1056 remaining_retries=remaining,
1056 remaining_retries=remaining,
1057 stream=stream,
1057 stream=stream,
1058 stream_cls=stream_cls,
1058 stream_cls=stream_cls,
1059 )
1059 )
File ~/.local/lib/python3.10/site-packages/openai/_base_client.py:1020, in SyncAPIClient._request(self, cast_to, options, remaining_retries, stream, stream_cls)
File ~/.local/lib/python3.10/site-packages/openai/_base_client.py:1020, in SyncAPIClient._request(self, cast_to, options, remaining_retries, stream, stream_cls)
1017 err.response.read()
1017 err.response.read()
1019 log.debug("Re-raising status error")
1019 log.debug("Re-raising status error")
-> 1020 raise self._make_status_error_from_response(err.response) from None
-> 1020 raise self._make_status_error_from_response(err.response) from None
1022 return self._process_response(
1022 return self._process_response(
1023 cast_to=cast_to,
1023 cast_to=cast_to,
1024 options=options,
1024 options=options,
(...)
(...)
1027 stream_cls=stream_cls,
1027 stream_cls=stream_cls,
1028 )
1028 )
RateLimitError: Error code: 429 - {'error': {'message': 'You exceeded your current quota, please check your plan and billing details. For more information on this error, read the docs: https://platform.openai.com/docs/guides/error-codes/api-errors.', 'type': 'insufficient_quota', 'param': None, 'code': 'insufficient_quota'}}
RateLimitError: Error code: 429 - {'error': {'message': 'You exceeded your current quota, please check your plan and billing details. For more information on this error, read the docs: https://platform.openai.com/docs/guides/error-codes/api-errors.', 'type': 'insufficient_quota', 'param': None, 'code': 'insufficient_quota'}}