Skip to content
Snippets Groups Projects

Draft: Resolve "Llama3.2 Konfigurationstest"

Open Erik Jonas Hartnick requested to merge 2-llama3-2-konfigurationstest into main
14 files
+ 249
5
Compare changes
  • Side-by-side
  • Inline
Files
14
  • - added .env config for test with Llama3.2 for pool PC (TODO: rename to .env.llama3.2.example)
    - added langchain-ollama python package and changed Chat and Embeddings calls accordingly (mainly constructors)
    - switched preprocessing to ollama3.2, model needs to be configured in CHESS/src/database_utils/db_catalog/preprocess.py
    - added engine config (models their config for each agent, preprocessing (embedding api, generating tokens) does not use this
    - changed retrieve_entity.py to also use OllamaEmbeddings, because Embeddings are configured separately
    - adapted run scripts to work with configured env
    - adapted tool/agent configs to work with new ollama config
    - added a place to put ollama and models locally, more or less to prepare different places (data buckets on server)
    - added some things to gitignore
    - TODO: documentation of the changes and configs above
    - TODO: documentation on how and where to get the datasets and where to put them
+ 68
0
setting_name: CHESS_IR_CG_UT
team_agents:
information_retriever:
#engine: 'gpt-4o-mini'
engine: 'meta-llama/llama3-2'
tools:
extract_keywords:
template_name: 'extract_keywords'
engine_config:
#engine_name: 'gpt-4o-mini'
engine_name: 'meta-llama/llama3-2'
temperature: 0.2
parser_name: 'python_list_output_parser'
retrieve_entity: {}
retrieve_context:
top_k: 5
candidate_generator:
#engine: 'gpt-4o-mini'
engine: 'meta-llama/llama3-2'
tools:
generate_candidate:
generator_configs:
- template_name: 'generate_candidate_one'
engine_config:
#engine_name: 'gpt-4o-mini'
engine_name: 'meta-llama/llama3-2'
temperature: 0.5
parser_name: 'generate_candidate_gemini_markdown_cot'
sampling_count: 10
- template_name: 'generate_candidate_two'
engine_config:
#engine_name: 'gpt-4o-mini'
engine_name: 'meta-llama/llama3-2'
temperature: 0.5
parser_name: 'generate_candidate_gemini_markdown_cot'
sampling_count: 10
revise:
template_name: 'revise_one'
engine_config:
#engine_name: 'gpt-4o-mini'
engine_name: 'meta-llama/llama3-2'
temperature: 0.0
parser_name: 'revise_new'
unit_tester:
#engine: 'gpt-4o-mini'
engine: 'meta-llama/llama3-2'
tools:
generate_unit_test:
template_name: 'generate_unit_tests'
engine_config:
#engine_name: 'gpt-4o-mini'
engine_name: 'meta-llama/llama3-2'
temperature: 0.8
parser_name: 'generate_unit_tests'
unit_test_count: 20
sampling_count: 1
evaluate:
template_name: 'evaluate'
engine_config:
#engine_name: 'gpt-4o-mini'
engine_name: 'meta-llama/llama3-2'
temperature: 0.0
parser_name: 'evaluate'
Loading