try out some more models

This commit is contained in:
2025-01-13 20:47:48 +01:00
parent 677eb6d0ea
commit 19870cdea8
5 changed files with 288 additions and 47 deletions

37
download_model.py Normal file
View File

@@ -0,0 +1,37 @@
from inference import Inference
from modelconfig import Modelconfig
def main():
# Model size: 3.21B params
Inference(Modelconfig("NousResearch/Hermes-3-Llama-3.2-3B", load_in_8bit=True))
# Model size: 1.24B params
Inference(Modelconfig("unsloth/Llama-3.2-1B", load_in_8bit=True))
# Model size: 3.21B params
Inference(Modelconfig("unsloth/Llama-3.2-3B-Instruct", load_in_8bit=True))
# Model size: 4.65B params
Inference(Modelconfig("unsloth/llama-3-8b-bnb-4bit", load_in_4bit=True))
# Model size: 3.21B params
Inference(Modelconfig("unsloth/Llama-3.2-3B-Instruct-GGUF", load_in_4bit=True))
# Model size: 5.21B params
Inference(Modelconfig("unsloth/gemma-2-9b-it-bnb-4bit", load_in_4bit=True))
# Model size: 4.46B params
Inference(Modelconfig("unsloth/Qwen2.5-7B-Instruct-bnb-4bit", load_in_4bit=True))
# Model size: 3.09B params
Inference(Modelconfig("unsloth/Qwen2.5-3B-Instruct", load_in_4bit=True))
# Model size: 3.87B params
Inference(Modelconfig("unsloth/mistral-7b-instruct-v0.3-bnb-4bit", load_in_4bit=True))
if __name__ == "__main__":
main()