python package restructuring
This commit is contained in:
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,4 +1,5 @@
|
||||
/model/*
|
||||
*.prof
|
||||
__pycache__
|
||||
*.venv
|
||||
*.venv
|
||||
*.egg-info
|
2
.vscode/launch.json
vendored
2
.vscode/launch.json
vendored
@@ -15,7 +15,7 @@
|
||||
"name": "PyDebug: __main__.py",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "__main__.py",
|
||||
"program": "chatbug/__main__.py",
|
||||
"console": "integratedTerminal"
|
||||
}
|
||||
]
|
||||
|
0
chatbug/__init__.py
Normal file
0
chatbug/__init__.py
Normal file
@@ -1,6 +1,7 @@
|
||||
print("running __main__.-py")
|
||||
|
||||
from llama import main
|
||||
from chatbug.llama import main
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@@ -1,7 +1,7 @@
|
||||
|
||||
|
||||
from inference import Inference
|
||||
from modelconfig import Modelconfig
|
||||
from chatbug.inference import Inference
|
||||
from chatbug.modelconfig import Modelconfig
|
||||
|
||||
|
||||
def main():
|
@@ -1,9 +1,9 @@
|
||||
import time
|
||||
import json
|
||||
import random
|
||||
from tool_helper import tool_list, parse_and_execute_tool_call
|
||||
from inference import Inference, torch_reseed
|
||||
from file_append import check_append_file
|
||||
from chatbug.tool_helper import tool_list, parse_and_execute_tool_call
|
||||
from chatbug.inference import Inference, torch_reseed
|
||||
from chatbug.file_append import check_append_file
|
||||
|
||||
|
||||
|
@@ -14,10 +14,10 @@ from transformers.cache_utils import (
|
||||
)
|
||||
import torch
|
||||
import time
|
||||
import utils
|
||||
import re
|
||||
import os
|
||||
from modelconfig import Modelconfig
|
||||
import chatbug.utils as utils
|
||||
from chatbug.modelconfig import Modelconfig
|
||||
|
||||
torch.set_num_threads(os.cpu_count()) # Adjust this to the number of threads/cores you have
|
||||
|
@@ -1,9 +1,9 @@
|
||||
from inference import Inference
|
||||
from modelconfig import Modelconfig
|
||||
import time
|
||||
import nvidia_smi
|
||||
import torch
|
||||
import gc
|
||||
from chatbug.inference import Inference
|
||||
from chatbug.modelconfig import Modelconfig
|
||||
|
||||
|
||||
def empty_cuda():
|
@@ -1,10 +1,11 @@
|
||||
|
||||
from tool_helper import tool_list
|
||||
from tool_functions import register_dummy
|
||||
from inference import Inference
|
||||
|
||||
import datetime
|
||||
import model_selection
|
||||
from generation_loop import Terminal, msg
|
||||
from chatbug.tool_helper import tool_list
|
||||
from chatbug.tool_functions import register_dummy
|
||||
from chatbug.inference import Inference
|
||||
from chatbug.generation_loop import Terminal, msg
|
||||
from chatbug import model_selection
|
||||
|
||||
|
||||
register_dummy()
|
||||
@@ -35,9 +36,11 @@ def initialize_config(inference: Inference) -> Terminal:
|
||||
return terminal
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
def main():
|
||||
inference = Inference(model_selection.get_model())
|
||||
terminal = initialize_config(inference)
|
||||
terminal.join()
|
||||
terminal.join()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
3
chatbug/matheval/__init__.py
Normal file
3
chatbug/matheval/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from chatbug.matheval import ast
|
||||
from chatbug.matheval import interpreter
|
||||
from chatbug.matheval import lexer
|
@@ -1,6 +1,5 @@
|
||||
|
||||
import math_lexer as lexer
|
||||
from math_lexer import Token
|
||||
from chatbug.matheval import lexer
|
||||
from chatbug.matheval.lexer import Token
|
||||
|
||||
|
||||
class Statement:
|
@@ -1,10 +1,11 @@
|
||||
import math_ast as ast
|
||||
|
||||
|
||||
|
||||
from sympy.parsing.sympy_parser import parse_expr
|
||||
from sympy.core.numbers import Integer, One, Zero
|
||||
from sympy import symbols, Eq, solveset, linsolve, nonlinsolve
|
||||
from sympy.core.symbol import Symbol
|
||||
from chatbug.matheval import ast
|
||||
|
||||
|
||||
def interpret(statement: ast.Statement) -> str:
|
@@ -1,10 +1,10 @@
|
||||
|
||||
from modelconfig import Modelconfig
|
||||
from chatbug.modelconfig import Modelconfig
|
||||
|
||||
|
||||
|
||||
def get_model() -> Modelconfig:
|
||||
|
||||
|
||||
# model: NousResearch/Hermes-3-Llama-3.2-3B
|
||||
# tokens: 315 tk
|
||||
# time: 94.360 s
|
@@ -1,10 +1,10 @@
|
||||
import random
|
||||
import datetime
|
||||
from tool_helper import tool
|
||||
import math_lexer
|
||||
import math_ast
|
||||
import math_interpreter
|
||||
import utils
|
||||
from chatbug.tool_helper import tool
|
||||
import chatbug.matheval as matheval
|
||||
# from chatbug.matheval import interpreter, lexer
|
||||
# from chatbug.matheval.ast import Parser
|
||||
import chatbug.utils as utils
|
||||
|
||||
|
||||
# @tool
|
||||
@@ -39,10 +39,10 @@ def math_evaluate(expression: str):
|
||||
Args:
|
||||
expression: A valid arithmetic expression (e.g., '2 + 3 * 4'). The expression must not contain '='."""
|
||||
try:
|
||||
tokens = math_lexer.tokenize(expression)
|
||||
parser = math_ast.Parser()
|
||||
tokens = matheval.lexer.tokenize(expression)
|
||||
parser = matheval.ast.Parser()
|
||||
ast = parser.parse(tokens)
|
||||
return math_interpreter.interpret(ast)
|
||||
return matheval.interpreter.interpret(ast)
|
||||
except Exception as e:
|
||||
utils.print_error("Tool call evaluation failed. - " + str(e))
|
||||
return "Tool call evaluation failed."
|
||||
@@ -58,10 +58,10 @@ Args:
|
||||
expression = "solve " + " and ".join(equations) + " for " + " and ".join(variables)
|
||||
print(expression)
|
||||
|
||||
tokens = math_lexer.tokenize(expression)
|
||||
parser = math_ast.Parser()
|
||||
tokens = lexer.tokenize(expression)
|
||||
parser = ast.Parser()
|
||||
ast = parser.parse(tokens)
|
||||
return math_interpreter.interpret(ast)
|
||||
return interpreter.interpret(ast)
|
||||
except Exception as e:
|
||||
utils.print_error("Tool call evaluation failed. - " + str(e))
|
||||
return "Tool call evaluation failed."
|
@@ -2,7 +2,7 @@
|
||||
from typing import Callable, List, Optional
|
||||
import json
|
||||
import re
|
||||
import utils
|
||||
import chatbug.utils as utils
|
||||
|
||||
tool_list = []
|
||||
|
22
setup.py
Normal file
22
setup.py
Normal file
@@ -0,0 +1,22 @@
|
||||
from setuptools import setup, find_packages
|
||||
|
||||
setup(
|
||||
name='chatbug',
|
||||
version='0.1.0',
|
||||
description='A conversational AI chatbot',
|
||||
author='Florin Tobler',
|
||||
author_email='florin.tobler@hotmail.com',
|
||||
packages=find_packages(exclude=["tests"]),
|
||||
install_requires=[
|
||||
'transformers',
|
||||
'accelerate',
|
||||
'bitsandbytes',
|
||||
'pytest',
|
||||
'pywebview',
|
||||
],
|
||||
# entry_points={
|
||||
# 'console_scripts': [
|
||||
# 'chatbug=chatbug.app:main',
|
||||
# ],
|
||||
# },
|
||||
)
|
@@ -1 +0,0 @@
|
||||
# empty
|
@@ -1,32 +1,20 @@
|
||||
import pytest
|
||||
import tests.helper as helper
|
||||
from tests import helper
|
||||
|
||||
|
||||
inference = None
|
||||
InferenceClass = None
|
||||
Tensor = None
|
||||
|
||||
|
||||
def prepare():
|
||||
if InferenceClass == None:
|
||||
test_import_inference_module_librarys()
|
||||
if inference == None:
|
||||
test_instantiate_inference_instance()
|
||||
|
||||
|
||||
def test_import_inference_module_librarys():
|
||||
import inference
|
||||
import torch
|
||||
global InferenceClass
|
||||
global Tensor
|
||||
InferenceClass = inference.Inference
|
||||
Tensor = torch.Tensor
|
||||
|
||||
|
||||
def test_instantiate_inference_instance():
|
||||
if InferenceClass == None:
|
||||
test_import_inference_module_librarys()
|
||||
global inference
|
||||
inference = InferenceClass()
|
||||
global Tensor
|
||||
if inference == None:
|
||||
from torch import Tensor as _Tensor
|
||||
from chatbug.inference import Inference
|
||||
from chatbug.model_selection import get_model
|
||||
inference = Inference(get_model())
|
||||
Tensor = _Tensor
|
||||
|
||||
|
||||
def test_tool_header_generation():
|
||||
|
@@ -1,6 +1,6 @@
|
||||
import pytest
|
||||
import tool_helper
|
||||
import tests.helper as helper
|
||||
import chatbug.tool_helper as tool_helper
|
||||
from tests import helper
|
||||
|
||||
|
||||
|
||||
|
@@ -1,6 +1,6 @@
|
||||
import pytest
|
||||
import tool_functions
|
||||
|
||||
import chatbug.tool_functions as tool_functions
|
||||
from tests import helper
|
||||
|
||||
|
||||
def test_math_evaluate_1():
|
||||
@@ -28,6 +28,13 @@ def test_math_evaluate_5():
|
||||
result = tool_functions.math_evaluate("sin(pi/2) + cos(0)")
|
||||
assert result == "sin(pi/2) + cos(0) = 2"
|
||||
|
||||
def test_math_evaluate_solve_a():
|
||||
result = tool_functions.math_evaluate("solve 240=x*r+x*r^2+x*r^3+s and r=1.618 and s=5 for x, r, s")
|
||||
assert result == "Solved equation system 240 = r**3*x + r**2*x + r*x + s, r = 1.61800000000000 and s = 5 for x=27.7393327937747=~27.739, r=1.61800000000000=~1.618 and s=5.00000000000000=~5.000."
|
||||
|
||||
def test_math_evaluate_solve_b():
|
||||
result = tool_functions.math_evaluate("solve 250=x+x*r+s and r=1.618 and s=0 for x, r, s")
|
||||
assert result == "Solved equation system 250 = r*x + s + x, r = 1.61800000000000 and s = 0 for x=95.4927425515661=~95.493, r=1.61800000000000=~1.618 and s=0."
|
||||
|
||||
|
||||
|
||||
@@ -54,4 +61,3 @@ def test_math_solver_3b():
|
||||
def test_math_solver_4():
|
||||
result = tool_functions.math_evaluate("solve 2*x**3 + 3*y = 7 and x - y = 1 for x, y")
|
||||
assert result == "Solved equation system 2*x**3 + 3*y = 7 and x - y = 1 for x=~1.421 and y=~0.421."
|
||||
|
||||
|
@@ -1,7 +1,8 @@
|
||||
import pytest
|
||||
import tool_helper
|
||||
from chatbug import tool_helper
|
||||
from unittest import mock
|
||||
import tests.helper as helper
|
||||
from tests import helper
|
||||
import re
|
||||
|
||||
|
||||
|
||||
@@ -40,34 +41,34 @@ def test_match_and_extract_matching3_with_newline():
|
||||
|
||||
|
||||
def test_string_malformed_faulty():
|
||||
with mock.patch("utils.print_error") as print_error_mock:
|
||||
with mock.patch("chatbug.utils.print_error") as print_error_mock:
|
||||
result = tool_helper._execute_tool_call_str("{json_content}", [])
|
||||
assert result == None
|
||||
print_error_mock.assert_called_once() # this will check if the mocked function on the context was called.
|
||||
|
||||
|
||||
def test_tool_call_json_1():
|
||||
with mock.patch("utils.print_error") as print_error_mock:
|
||||
with mock.patch("chatbug.utils.print_error") as print_error_mock:
|
||||
result = tool_helper._execute_tool_call_json({"name": "tool_dummy", "arguments": {"a": 1, "b": "zwei"}}, [helper.tool_dummy, helper.tool_dummy2])
|
||||
assert result == "result_1_zwei"
|
||||
assert print_error_mock.call_count == 0
|
||||
|
||||
|
||||
def test_tool_call_json_2():
|
||||
with mock.patch("utils.print_error") as print_error_mock:
|
||||
with mock.patch("chatbug.utils.print_error") as print_error_mock:
|
||||
result = tool_helper._execute_tool_call_json({"name": "tool_dummy2", "arguments": {"text": "some_text"}}, [helper.tool_dummy, helper.tool_dummy2])
|
||||
assert result == "SOME_TEXT"
|
||||
assert print_error_mock.call_count == 0
|
||||
|
||||
|
||||
def test_tool_call_json_non_existing_call_check():
|
||||
with mock.patch("utils.print_error") as print_error_mock:
|
||||
with mock.patch("chatbug.utils.print_error") as print_error_mock:
|
||||
result = tool_helper._execute_tool_call_json({"name": "tool_dummy_which_is_not_existing", "arguments": {"text": "some_text"}}, [helper.tool_dummy, helper.tool_dummy2])
|
||||
assert result == None
|
||||
assert print_error_mock.call_count == 1 # this will check if the mocked function on the context was called.
|
||||
|
||||
def test_tool_call_json_wrong_arguments_check():
|
||||
with mock.patch("utils.print_error") as print_error_mock:
|
||||
with mock.patch("chatbug.utils.print_error") as print_error_mock:
|
||||
result = tool_helper._execute_tool_call_json({"name": "tool_dummy", "arguments": {"a": "must_be_an_int_but_is_string", "b": "zwei"}}, [helper.tool_dummy, helper.tool_dummy2])
|
||||
assert result == None
|
||||
assert print_error_mock.call_count == 1 # this will check if the mocked function on the context was called.
|
||||
@@ -75,7 +76,6 @@ def test_tool_call_json_wrong_arguments_check():
|
||||
|
||||
|
||||
def test_regex_multiline():
|
||||
import re
|
||||
pattern = r"<start>(.*)</end>"
|
||||
|
||||
# The text to search (spanning multiple lines)
|
||||
|
Reference in New Issue
Block a user