Skip to content
Snippets Groups Projects
Commit 88764985 authored by Adrien Klose's avatar Adrien Klose
Browse files

experiment pipeline for fgm

parent bbbdb20d
Branches
No related tags found
No related merge requests found
%% Cell type:code id:7209584a-2748-4473-acc8-51a062f7b2cd tags: %% Cell type:code id:7209584a-2748-4473-acc8-51a062f7b2cd tags:
``` python ``` python
# set up environment # set up environment
from openai import OpenAI from openai import OpenAI
import os import os
import dotenv import dotenv
from groq import Groq from groq import Groq
dotenv.load_dotenv() dotenv.load_dotenv()
clientOAI = OpenAI(api_key=os.environ.get("OPENAI_API_KEY")) clientOAI = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
clientG = Groq(api_key=os.environ.get("GROQ_API_KEY")) clientG = Groq(api_key=os.environ.get("GROQ_API_KEY"))
``` ```
%% Cell type:code id:b8b9daad tags: %% Cell type:code id:b8b9daad tags:
``` python ``` python
#groq example #groq example
chat_completion = clientG.chat.completions.create( chat_completion = clientG.chat.completions.create(
messages=[ messages=[
{ {
"role": "user", "role": "user",
"content": "What is 5 plus 3", "content": "What is 5 plus 3",
} }
], ],
model="llama3-8b-8192", model="llama3-8b-8192",
) )
print(chat_completion.choices[0].message.content) print(chat_completion.choices[0].message.content)
``` ```
%% Output %% Output
5 + 3 = 8 5 + 3 = 8
%% Cell type:code id:d249618b tags:
``` python
#groq example
chat_completion = clientG.chat.completions.create(
messages=[
{
"role": "system",
"content": "You are a medical expert but not good at math",
},
{
"role": "user",
"content": "What is 5 plus 3",
}
],
model="mixtral-8x7b-32768",
)
print(chat_completion.choices[0].message.content)
```
%% Output
Sure, I can certainly help you with that. The sum of 5 plus 3 is equal to 8. Let me know if you have any other medical questions that I can assist you with!
%% Cell type:code id:6f05f369-af37-45df-8a33-675f177290b4 tags: %% Cell type:code id:6f05f369-af37-45df-8a33-675f177290b4 tags:
``` python ``` python
# openai example, fails due to no free token limit and current key does have no value added # openai example, fails due to no free token limit and current key does have no value added
completion = clientOAI.chat.completions.create( completion = clientOAI.chat.completions.create(
model="gpt-3.5-turbo", model="gpt-3.5-turbo",
messages=[ messages=[
{"role": "system", "content": "You are a poetic assistant, skilled in explaining complex programming concepts with creative flair."}, {"role": "system", "content": "You are a poetic assistant, skilled in explaining complex programming concepts with creative flair."},
{"role": "user", "content": "Compose a poem that explains the concept of recursion in programming."} {"role": "user", "content": "Compose a poem that explains the concept of recursion in programming."}
] ]
) )
print(completion.choices[0].message) print(completion.choices[0].message)
``` ```
%% Output %% Output
--------------------------------------------------------------------------- ---------------------------------------------------------------------------
RateLimitError Traceback (most recent call last) RateLimitError Traceback (most recent call last)
Cell In[5], line 2 Cell In[5], line 2
1 # openai example, fails due to no free token limit and current key does have no value added 1 # openai example, fails due to no free token limit and current key does have no value added
----> 2 completion = clientOAI.chat.completions.create( ----> 2 completion = clientOAI.chat.completions.create(
3 model="gpt-3.5-turbo", 3 model="gpt-3.5-turbo",
4 messages=[ 4 messages=[
5 {"role": "system", "content": "You are a poetic assistant, skilled in explaining complex programming concepts with creative flair."}, 5 {"role": "system", "content": "You are a poetic assistant, skilled in explaining complex programming concepts with creative flair."},
6 {"role": "user", "content": "Compose a poem that explains the concept of recursion in programming."} 6 {"role": "user", "content": "Compose a poem that explains the concept of recursion in programming."}
7 ] 7 ]
8 ) 8 )
10 print(completion.choices[0].message) 10 print(completion.choices[0].message)
File ~/.local/lib/python3.10/site-packages/openai/_utils/_utils.py:277, in required_args.<locals>.inner.<locals>.wrapper(*args, **kwargs) File ~/.local/lib/python3.10/site-packages/openai/_utils/_utils.py:277, in required_args.<locals>.inner.<locals>.wrapper(*args, **kwargs)
275 msg = f"Missing required argument: {quote(missing[0])}" 275 msg = f"Missing required argument: {quote(missing[0])}"
276 raise TypeError(msg) 276 raise TypeError(msg)
--> 277 return func(*args, **kwargs) --> 277 return func(*args, **kwargs)
File ~/.local/lib/python3.10/site-packages/openai/resources/chat/completions.py:590, in Completions.create(self, messages, model, frequency_penalty, function_call, functions, logit_bias, logprobs, max_tokens, n, presence_penalty, response_format, seed, stop, stream, stream_options, temperature, tool_choice, tools, top_logprobs, top_p, user, extra_headers, extra_query, extra_body, timeout) File ~/.local/lib/python3.10/site-packages/openai/resources/chat/completions.py:590, in Completions.create(self, messages, model, frequency_penalty, function_call, functions, logit_bias, logprobs, max_tokens, n, presence_penalty, response_format, seed, stop, stream, stream_options, temperature, tool_choice, tools, top_logprobs, top_p, user, extra_headers, extra_query, extra_body, timeout)
558 @required_args(["messages", "model"], ["messages", "model", "stream"]) 558 @required_args(["messages", "model"], ["messages", "model", "stream"])
559 def create( 559 def create(
560 self, 560 self,
(...) (...)
588 timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, 588 timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
589 ) -> ChatCompletion | Stream[ChatCompletionChunk]: 589 ) -> ChatCompletion | Stream[ChatCompletionChunk]:
--> 590 return self._post( --> 590 return self._post(
591 "/chat/completions", 591 "/chat/completions",
592 body=maybe_transform( 592 body=maybe_transform(
593 { 593 {
594 "messages": messages, 594 "messages": messages,
595 "model": model, 595 "model": model,
596 "frequency_penalty": frequency_penalty, 596 "frequency_penalty": frequency_penalty,
597 "function_call": function_call, 597 "function_call": function_call,
598 "functions": functions, 598 "functions": functions,
599 "logit_bias": logit_bias, 599 "logit_bias": logit_bias,
600 "logprobs": logprobs, 600 "logprobs": logprobs,
601 "max_tokens": max_tokens, 601 "max_tokens": max_tokens,
602 "n": n, 602 "n": n,
603 "presence_penalty": presence_penalty, 603 "presence_penalty": presence_penalty,
604 "response_format": response_format, 604 "response_format": response_format,
605 "seed": seed, 605 "seed": seed,
606 "stop": stop, 606 "stop": stop,
607 "stream": stream, 607 "stream": stream,
608 "stream_options": stream_options, 608 "stream_options": stream_options,
609 "temperature": temperature, 609 "temperature": temperature,
610 "tool_choice": tool_choice, 610 "tool_choice": tool_choice,
611 "tools": tools, 611 "tools": tools,
612 "top_logprobs": top_logprobs, 612 "top_logprobs": top_logprobs,
613 "top_p": top_p, 613 "top_p": top_p,
614 "user": user, 614 "user": user,
615 }, 615 },
616 completion_create_params.CompletionCreateParams, 616 completion_create_params.CompletionCreateParams,
617 ), 617 ),
618 options=make_request_options( 618 options=make_request_options(
619 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout 619 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
620 ), 620 ),
621 cast_to=ChatCompletion, 621 cast_to=ChatCompletion,
622 stream=stream or False, 622 stream=stream or False,
623 stream_cls=Stream[ChatCompletionChunk], 623 stream_cls=Stream[ChatCompletionChunk],
624 ) 624 )
File ~/.local/lib/python3.10/site-packages/openai/_base_client.py:1240, in SyncAPIClient.post(self, path, cast_to, body, options, files, stream, stream_cls) File ~/.local/lib/python3.10/site-packages/openai/_base_client.py:1240, in SyncAPIClient.post(self, path, cast_to, body, options, files, stream, stream_cls)
1226 def post( 1226 def post(
1227 self, 1227 self,
1228 path: str, 1228 path: str,
(...) (...)
1235 stream_cls: type[_StreamT] | None = None, 1235 stream_cls: type[_StreamT] | None = None,
1236 ) -> ResponseT | _StreamT: 1236 ) -> ResponseT | _StreamT:
1237 opts = FinalRequestOptions.construct( 1237 opts = FinalRequestOptions.construct(
1238 method="post", url=path, json_data=body, files=to_httpx_files(files), **options 1238 method="post", url=path, json_data=body, files=to_httpx_files(files), **options
1239 ) 1239 )
-> 1240 return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)) -> 1240 return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))
File ~/.local/lib/python3.10/site-packages/openai/_base_client.py:921, in SyncAPIClient.request(self, cast_to, options, remaining_retries, stream, stream_cls) File ~/.local/lib/python3.10/site-packages/openai/_base_client.py:921, in SyncAPIClient.request(self, cast_to, options, remaining_retries, stream, stream_cls)
912 def request( 912 def request(
913 self, 913 self,
914 cast_to: Type[ResponseT], 914 cast_to: Type[ResponseT],
(...) (...)
919 stream_cls: type[_StreamT] | None = None, 919 stream_cls: type[_StreamT] | None = None,
920 ) -> ResponseT | _StreamT: 920 ) -> ResponseT | _StreamT:
--> 921 return self._request( --> 921 return self._request(
922 cast_to=cast_to, 922 cast_to=cast_to,
923 options=options, 923 options=options,
924 stream=stream, 924 stream=stream,
925 stream_cls=stream_cls, 925 stream_cls=stream_cls,
926 remaining_retries=remaining_retries, 926 remaining_retries=remaining_retries,
927 ) 927 )
File ~/.local/lib/python3.10/site-packages/openai/_base_client.py:1005, in SyncAPIClient._request(self, cast_to, options, remaining_retries, stream, stream_cls) File ~/.local/lib/python3.10/site-packages/openai/_base_client.py:1005, in SyncAPIClient._request(self, cast_to, options, remaining_retries, stream, stream_cls)
1003 if retries > 0 and self._should_retry(err.response): 1003 if retries > 0 and self._should_retry(err.response):
1004 err.response.close() 1004 err.response.close()
-> 1005 return self._retry_request( -> 1005 return self._retry_request(
1006 options, 1006 options,
1007 cast_to, 1007 cast_to,
1008 retries, 1008 retries,
1009 err.response.headers, 1009 err.response.headers,
1010 stream=stream, 1010 stream=stream,
1011 stream_cls=stream_cls, 1011 stream_cls=stream_cls,
1012 ) 1012 )
1014 # If the response is streamed then we need to explicitly read the response 1014 # If the response is streamed then we need to explicitly read the response
1015 # to completion before attempting to access the response text. 1015 # to completion before attempting to access the response text.
1016 if not err.response.is_closed: 1016 if not err.response.is_closed:
File ~/.local/lib/python3.10/site-packages/openai/_base_client.py:1053, in SyncAPIClient._retry_request(self, options, cast_to, remaining_retries, response_headers, stream, stream_cls) File ~/.local/lib/python3.10/site-packages/openai/_base_client.py:1053, in SyncAPIClient._retry_request(self, options, cast_to, remaining_retries, response_headers, stream, stream_cls)
1049 # In a synchronous context we are blocking the entire thread. Up to the library user to run the client in a 1049 # In a synchronous context we are blocking the entire thread. Up to the library user to run the client in a
1050 # different thread if necessary. 1050 # different thread if necessary.
1051 time.sleep(timeout) 1051 time.sleep(timeout)
-> 1053 return self._request( -> 1053 return self._request(
1054 options=options, 1054 options=options,
1055 cast_to=cast_to, 1055 cast_to=cast_to,
1056 remaining_retries=remaining, 1056 remaining_retries=remaining,
1057 stream=stream, 1057 stream=stream,
1058 stream_cls=stream_cls, 1058 stream_cls=stream_cls,
1059 ) 1059 )
File ~/.local/lib/python3.10/site-packages/openai/_base_client.py:1005, in SyncAPIClient._request(self, cast_to, options, remaining_retries, stream, stream_cls) File ~/.local/lib/python3.10/site-packages/openai/_base_client.py:1005, in SyncAPIClient._request(self, cast_to, options, remaining_retries, stream, stream_cls)
1003 if retries > 0 and self._should_retry(err.response): 1003 if retries > 0 and self._should_retry(err.response):
1004 err.response.close() 1004 err.response.close()
-> 1005 return self._retry_request( -> 1005 return self._retry_request(
1006 options, 1006 options,
1007 cast_to, 1007 cast_to,
1008 retries, 1008 retries,
1009 err.response.headers, 1009 err.response.headers,
1010 stream=stream, 1010 stream=stream,
1011 stream_cls=stream_cls, 1011 stream_cls=stream_cls,
1012 ) 1012 )
1014 # If the response is streamed then we need to explicitly read the response 1014 # If the response is streamed then we need to explicitly read the response
1015 # to completion before attempting to access the response text. 1015 # to completion before attempting to access the response text.
1016 if not err.response.is_closed: 1016 if not err.response.is_closed:
File ~/.local/lib/python3.10/site-packages/openai/_base_client.py:1053, in SyncAPIClient._retry_request(self, options, cast_to, remaining_retries, response_headers, stream, stream_cls) File ~/.local/lib/python3.10/site-packages/openai/_base_client.py:1053, in SyncAPIClient._retry_request(self, options, cast_to, remaining_retries, response_headers, stream, stream_cls)
1049 # In a synchronous context we are blocking the entire thread. Up to the library user to run the client in a 1049 # In a synchronous context we are blocking the entire thread. Up to the library user to run the client in a
1050 # different thread if necessary. 1050 # different thread if necessary.
1051 time.sleep(timeout) 1051 time.sleep(timeout)
-> 1053 return self._request( -> 1053 return self._request(
1054 options=options, 1054 options=options,
1055 cast_to=cast_to, 1055 cast_to=cast_to,
1056 remaining_retries=remaining, 1056 remaining_retries=remaining,
1057 stream=stream, 1057 stream=stream,
1058 stream_cls=stream_cls, 1058 stream_cls=stream_cls,
1059 ) 1059 )
File ~/.local/lib/python3.10/site-packages/openai/_base_client.py:1020, in SyncAPIClient._request(self, cast_to, options, remaining_retries, stream, stream_cls) File ~/.local/lib/python3.10/site-packages/openai/_base_client.py:1020, in SyncAPIClient._request(self, cast_to, options, remaining_retries, stream, stream_cls)
1017 err.response.read() 1017 err.response.read()
1019 log.debug("Re-raising status error") 1019 log.debug("Re-raising status error")
-> 1020 raise self._make_status_error_from_response(err.response) from None -> 1020 raise self._make_status_error_from_response(err.response) from None
1022 return self._process_response( 1022 return self._process_response(
1023 cast_to=cast_to, 1023 cast_to=cast_to,
1024 options=options, 1024 options=options,
(...) (...)
1027 stream_cls=stream_cls, 1027 stream_cls=stream_cls,
1028 ) 1028 )
RateLimitError: Error code: 429 - {'error': {'message': 'You exceeded your current quota, please check your plan and billing details. For more information on this error, read the docs: https://platform.openai.com/docs/guides/error-codes/api-errors.', 'type': 'insufficient_quota', 'param': None, 'code': 'insufficient_quota'}} RateLimitError: Error code: 429 - {'error': {'message': 'You exceeded your current quota, please check your plan and billing details. For more information on this error, read the docs: https://platform.openai.com/docs/guides/error-codes/api-errors.', 'type': 'insufficient_quota', 'param': None, 'code': 'insufficient_quota'}}
%% Cell type:code id:6cf7d4e4-173c-497e-a254-c9278a9c6e13 tags: %% Cell type:code id:6cf7d4e4-173c-497e-a254-c9278a9c6e13 tags:
``` python ``` python
``` ```
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment