Skip to content
Snippets Groups Projects
Commit 7ef0ba0a authored by Adrien Klose's avatar Adrien Klose
Browse files

minimal test groq api

parent 6def1d05
No related branches found
No related tags found
No related merge requests found
%% Cell type:code id:7209584a-2748-4473-acc8-51a062f7b2cd tags:
``` python
# set up environment
from openai import OpenAI
import os
import dotenv
dotenv.load_dotenv()
client = OpenAI()
#api_key=os.environ.get("OPENAI_API_KEY")
```
%% Cell type:code id:6f05f369-af37-45df-8a33-675f177290b4 tags:
``` python
# first example
completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a poetic assistant, skilled in explaining complex programming concepts with creative flair."},
{"role": "user", "content": "Compose a poem that explains the concept of recursion in programming."}
]
)
print(completion.choices[0].message)
```
%% Output
---------------------------------------------------------------------------
RateLimitError Traceback (most recent call last)
Cell In[5], line 2
1 # first example
----> 2 completion = client.chat.completions.create(
3 model="gpt-3.5-turbo",
4 messages=[
5 {"role": "system", "content": "You are a poetic assistant, skilled in explaining complex programming concepts with creative flair."},
6 {"role": "user", "content": "Compose a poem that explains the concept of recursion in programming."}
7 ]
8 )
10 print(completion.choices[0].message)
File ~/.local/lib/python3.10/site-packages/openai/_utils/_utils.py:277, in required_args.<locals>.inner.<locals>.wrapper(*args, **kwargs)
275 msg = f"Missing required argument: {quote(missing[0])}"
276 raise TypeError(msg)
--> 277 return func(*args, **kwargs)
File ~/.local/lib/python3.10/site-packages/openai/resources/chat/completions.py:590, in Completions.create(self, messages, model, frequency_penalty, function_call, functions, logit_bias, logprobs, max_tokens, n, presence_penalty, response_format, seed, stop, stream, stream_options, temperature, tool_choice, tools, top_logprobs, top_p, user, extra_headers, extra_query, extra_body, timeout)
558 @required_args(["messages", "model"], ["messages", "model", "stream"])
559 def create(
560 self,
(...)
588 timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
589 ) -> ChatCompletion | Stream[ChatCompletionChunk]:
--> 590 return self._post(
591 "/chat/completions",
592 body=maybe_transform(
593 {
594 "messages": messages,
595 "model": model,
596 "frequency_penalty": frequency_penalty,
597 "function_call": function_call,
598 "functions": functions,
599 "logit_bias": logit_bias,
600 "logprobs": logprobs,
601 "max_tokens": max_tokens,
602 "n": n,
603 "presence_penalty": presence_penalty,
604 "response_format": response_format,
605 "seed": seed,
606 "stop": stop,
607 "stream": stream,
608 "stream_options": stream_options,
609 "temperature": temperature,
610 "tool_choice": tool_choice,
611 "tools": tools,
612 "top_logprobs": top_logprobs,
613 "top_p": top_p,
614 "user": user,
615 },
616 completion_create_params.CompletionCreateParams,
617 ),
618 options=make_request_options(
619 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
620 ),
621 cast_to=ChatCompletion,
622 stream=stream or False,
623 stream_cls=Stream[ChatCompletionChunk],
624 )
File ~/.local/lib/python3.10/site-packages/openai/_base_client.py:1240, in SyncAPIClient.post(self, path, cast_to, body, options, files, stream, stream_cls)
1226 def post(
1227 self,
1228 path: str,
(...)
1235 stream_cls: type[_StreamT] | None = None,
1236 ) -> ResponseT | _StreamT:
1237 opts = FinalRequestOptions.construct(
1238 method="post", url=path, json_data=body, files=to_httpx_files(files), **options
1239 )
-> 1240 return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))
File ~/.local/lib/python3.10/site-packages/openai/_base_client.py:921, in SyncAPIClient.request(self, cast_to, options, remaining_retries, stream, stream_cls)
912 def request(
913 self,
914 cast_to: Type[ResponseT],
(...)
919 stream_cls: type[_StreamT] | None = None,
920 ) -> ResponseT | _StreamT:
--> 921 return self._request(
922 cast_to=cast_to,
923 options=options,
924 stream=stream,
925 stream_cls=stream_cls,
926 remaining_retries=remaining_retries,
927 )
File ~/.local/lib/python3.10/site-packages/openai/_base_client.py:1005, in SyncAPIClient._request(self, cast_to, options, remaining_retries, stream, stream_cls)
1003 if retries > 0 and self._should_retry(err.response):
1004 err.response.close()
-> 1005 return self._retry_request(
1006 options,
1007 cast_to,
1008 retries,
1009 err.response.headers,
1010 stream=stream,
1011 stream_cls=stream_cls,
1012 )
1014 # If the response is streamed then we need to explicitly read the response
1015 # to completion before attempting to access the response text.
1016 if not err.response.is_closed:
File ~/.local/lib/python3.10/site-packages/openai/_base_client.py:1053, in SyncAPIClient._retry_request(self, options, cast_to, remaining_retries, response_headers, stream, stream_cls)
1049 # In a synchronous context we are blocking the entire thread. Up to the library user to run the client in a
1050 # different thread if necessary.
1051 time.sleep(timeout)
-> 1053 return self._request(
1054 options=options,
1055 cast_to=cast_to,
1056 remaining_retries=remaining,
1057 stream=stream,
1058 stream_cls=stream_cls,
1059 )
File ~/.local/lib/python3.10/site-packages/openai/_base_client.py:1005, in SyncAPIClient._request(self, cast_to, options, remaining_retries, stream, stream_cls)
1003 if retries > 0 and self._should_retry(err.response):
1004 err.response.close()
-> 1005 return self._retry_request(
1006 options,
1007 cast_to,
1008 retries,
1009 err.response.headers,
1010 stream=stream,
1011 stream_cls=stream_cls,
1012 )
1014 # If the response is streamed then we need to explicitly read the response
1015 # to completion before attempting to access the response text.
1016 if not err.response.is_closed:
File ~/.local/lib/python3.10/site-packages/openai/_base_client.py:1053, in SyncAPIClient._retry_request(self, options, cast_to, remaining_retries, response_headers, stream, stream_cls)
1049 # In a synchronous context we are blocking the entire thread. Up to the library user to run the client in a
1050 # different thread if necessary.
1051 time.sleep(timeout)
-> 1053 return self._request(
1054 options=options,
1055 cast_to=cast_to,
1056 remaining_retries=remaining,
1057 stream=stream,
1058 stream_cls=stream_cls,
1059 )
File ~/.local/lib/python3.10/site-packages/openai/_base_client.py:1020, in SyncAPIClient._request(self, cast_to, options, remaining_retries, stream, stream_cls)
1017 err.response.read()
1019 log.debug("Re-raising status error")
-> 1020 raise self._make_status_error_from_response(err.response) from None
1022 return self._process_response(
1023 cast_to=cast_to,
1024 options=options,
(...)
1027 stream_cls=stream_cls,
1028 )
RateLimitError: Error code: 429 - {'error': {'message': 'You exceeded your current quota, please check your plan and billing details. For more information on this error, read the docs: https://platform.openai.com/docs/guides/error-codes/api-errors.', 'type': 'insufficient_quota', 'param': None, 'code': 'insufficient_quota'}}
%% Cell type:code id:6cf7d4e4-173c-497e-a254-c9278a9c6e13 tags:
``` python
```
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment