try out some more models
This commit is contained in:
37
download_model.py
Normal file
37
download_model.py
Normal file
@@ -0,0 +1,37 @@
|
||||
|
||||
|
||||
from inference import Inference
|
||||
from modelconfig import Modelconfig
|
||||
|
||||
|
||||
def main():
|
||||
# Model size: 3.21B params
|
||||
Inference(Modelconfig("NousResearch/Hermes-3-Llama-3.2-3B", load_in_8bit=True))
|
||||
|
||||
# Model size: 1.24B params
|
||||
Inference(Modelconfig("unsloth/Llama-3.2-1B", load_in_8bit=True))
|
||||
|
||||
# Model size: 3.21B params
|
||||
Inference(Modelconfig("unsloth/Llama-3.2-3B-Instruct", load_in_8bit=True))
|
||||
|
||||
# Model size: 4.65B params
|
||||
Inference(Modelconfig("unsloth/llama-3-8b-bnb-4bit", load_in_4bit=True))
|
||||
|
||||
# Model size: 3.21B params
|
||||
Inference(Modelconfig("unsloth/Llama-3.2-3B-Instruct-GGUF", load_in_4bit=True))
|
||||
|
||||
# Model size: 5.21B params
|
||||
Inference(Modelconfig("unsloth/gemma-2-9b-it-bnb-4bit", load_in_4bit=True))
|
||||
|
||||
# Model size: 4.46B params
|
||||
Inference(Modelconfig("unsloth/Qwen2.5-7B-Instruct-bnb-4bit", load_in_4bit=True))
|
||||
|
||||
# Model size: 3.09B params
|
||||
Inference(Modelconfig("unsloth/Qwen2.5-3B-Instruct", load_in_4bit=True))
|
||||
|
||||
# Model size: 3.87B params
|
||||
Inference(Modelconfig("unsloth/mistral-7b-instruct-v0.3-bnb-4bit", load_in_4bit=True))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
Reference in New Issue
Block a user