Compare commits
5 Commits
7f0cb49156
...
7224111a0b
Author | SHA1 | Date | |
---|---|---|---|
7224111a0b | |||
0c022d4731 | |||
a697f49698 | |||
3218e7eb63 | |||
ef789375c8 |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,4 +1,5 @@
|
|||||||
/model/*
|
/model/*
|
||||||
*.prof
|
*.prof
|
||||||
__pycache__
|
__pycache__
|
||||||
*.venv
|
*.venv
|
||||||
|
*.egg-info
|
2
.vscode/launch.json
vendored
2
.vscode/launch.json
vendored
@@ -15,7 +15,7 @@
|
|||||||
"name": "PyDebug: __main__.py",
|
"name": "PyDebug: __main__.py",
|
||||||
"type": "debugpy",
|
"type": "debugpy",
|
||||||
"request": "launch",
|
"request": "launch",
|
||||||
"program": "__main__.py",
|
"program": "chatbug/__main__.py",
|
||||||
"console": "integratedTerminal"
|
"console": "integratedTerminal"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
0
chatbug/__init__.py
Normal file
0
chatbug/__init__.py
Normal file
@@ -1,6 +1,7 @@
|
|||||||
print("running __main__.-py")
|
print("running __main__.-py")
|
||||||
|
|
||||||
from llama import main
|
from chatbug.llama import main
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
@@ -1,7 +1,7 @@
|
|||||||
|
|
||||||
|
|
||||||
from inference import Inference
|
from chatbug.inference import Inference
|
||||||
from modelconfig import Modelconfig
|
from chatbug.modelconfig import Modelconfig
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
44
chatbug/file_append.py
Normal file
44
chatbug/file_append.py
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
def check_append_file(prompt: str) -> str:
|
||||||
|
if "@" in prompt:
|
||||||
|
parts = prompt.split(" ")
|
||||||
|
content = []
|
||||||
|
for part in parts:
|
||||||
|
if part.startswith("@"):
|
||||||
|
filename = part[1:]
|
||||||
|
try:
|
||||||
|
if os.path.exists(filename):
|
||||||
|
with open(filename, "r") as f:
|
||||||
|
content.append("%s:'''\n%s'''" % (filename, f.read()))
|
||||||
|
except FileNotFoundError:
|
||||||
|
print(f"File '{filename}' not found.")
|
||||||
|
content.append(prompt)
|
||||||
|
return "\n".join(content)
|
||||||
|
return prompt
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
exit() # not accidentally trigger it
|
||||||
|
|
||||||
|
# Create some sample files
|
||||||
|
with open("fmain.py", "w") as f:
|
||||||
|
f.write("# This is main.py\n")
|
||||||
|
with open("finference.py", "w") as f:
|
||||||
|
f.write("# This is inference.py\n")
|
||||||
|
|
||||||
|
# Test cases
|
||||||
|
test_prompts = [
|
||||||
|
"@fmain.py",
|
||||||
|
"@fmain.py @finference.py",
|
||||||
|
"@fnonexistent.py",
|
||||||
|
"@fmain.py @fnonexistent.py"
|
||||||
|
]
|
||||||
|
|
||||||
|
for prompt in test_prompts:
|
||||||
|
print(f"Testing prompt: {prompt}")
|
||||||
|
result = check_append_file(prompt)
|
||||||
|
print(f"Result: {result}")
|
||||||
|
print("-" * 20)
|
@@ -1,22 +1,11 @@
|
|||||||
import time
|
import time
|
||||||
import json
|
import json
|
||||||
import random
|
import random
|
||||||
from tool_helper import tool_list, parse_and_execute_tool_call
|
from chatbug.tool_helper import tool_list, parse_and_execute_tool_call
|
||||||
from inference import Inference, torch_reseed
|
from chatbug.inference import Inference, torch_reseed
|
||||||
|
from chatbug.file_append import check_append_file
|
||||||
|
|
||||||
|
|
||||||
def check_append_file(prompt: str) -> str:
|
|
||||||
if prompt.startswith("@"):
|
|
||||||
prompt = prompt[1:] # Remove the '@'
|
|
||||||
filename = prompt.split(" ")[0]
|
|
||||||
try:
|
|
||||||
with open(filename, "r") as f:
|
|
||||||
content = f.read()
|
|
||||||
return "'''%s'''\n\n%s" % (content, prompt)
|
|
||||||
except:
|
|
||||||
print(f"File '{filename}' not found.")
|
|
||||||
return prompt
|
|
||||||
|
|
||||||
|
|
||||||
def msg(role: str, content: str) -> dict:
|
def msg(role: str, content: str) -> dict:
|
||||||
return {"role": role, "content": content}
|
return {"role": role, "content": content}
|
||||||
@@ -84,22 +73,22 @@ class Terminal:
|
|||||||
print("")
|
print("")
|
||||||
|
|
||||||
elif input_text.startswith("/history"):
|
elif input_text.startswith("/history"):
|
||||||
history = self.inference.tokenize(self.message, tokenize=False)
|
history = self.inference.tokenize(self.messages, tokenize=False)
|
||||||
# history = tokenizer.apply_chat_template(self.message, return_tensors="pt", tokenize=False, add_generation_prompt=False)
|
# history = tokenizer.apply_chat_template(self.message, return_tensors="pt", tokenize=False, add_generation_prompt=False)
|
||||||
print(history)
|
print(history)
|
||||||
|
|
||||||
elif input_text.startswith("/undo"):
|
elif input_text.startswith("/undo"):
|
||||||
if len(self.message) > 2:
|
if len(self.messages) > 2:
|
||||||
print("undo latest prompt")
|
print("undo latest prompt")
|
||||||
self.message = self.message[:-2]
|
self.message = self.messages[:-2]
|
||||||
else:
|
else:
|
||||||
print("cannot undo because there are not enough self.message on history.")
|
print("cannot undo because there are not enough self.message on history.")
|
||||||
print("")
|
print("")
|
||||||
|
|
||||||
elif input_text.startswith("/regen"):
|
elif input_text.startswith("/regen"):
|
||||||
if len(self.message) >= 2:
|
if len(self.messages) >= 2:
|
||||||
print("regenerating message (not working)")
|
print("regenerating message (not working)")
|
||||||
self.message = self.message[:-1]
|
self.messages = self.messages[:-1]
|
||||||
seed = random.randint(0, 2**32 - 1) # Generate a random seed
|
seed = random.randint(0, 2**32 - 1) # Generate a random seed
|
||||||
torch_reseed(seed)
|
torch_reseed(seed)
|
||||||
self.append_generate_chat(None)
|
self.append_generate_chat(None)
|
||||||
@@ -119,8 +108,8 @@ class Terminal:
|
|||||||
self.append_generate_chat(content)
|
self.append_generate_chat(content)
|
||||||
|
|
||||||
elif input_text.startswith("/auto"):
|
elif input_text.startswith("/auto"):
|
||||||
message_backup = self.message
|
message_backup = self.messages
|
||||||
self.message = [self.roleflip]
|
self.messages = [self.roleflip]
|
||||||
for m in self.message_backup:
|
for m in self.message_backup:
|
||||||
role = m["role"]
|
role = m["role"]
|
||||||
content = m["content"]
|
content = m["content"]
|
||||||
@@ -153,11 +142,11 @@ class Terminal:
|
|||||||
elif input_text.startswith("/save"):
|
elif input_text.startswith("/save"):
|
||||||
with open("messages.json", "w") as f:
|
with open("messages.json", "w") as f:
|
||||||
json.dump(self.messages, f, indent=4)
|
json.dump(self.messages, f, indent=4)
|
||||||
|
|
||||||
elif input_text.startswith("/load"):
|
elif input_text.startswith("/load"):
|
||||||
with open("messages.json", "r") as f:
|
with open("messages.json", "r") as f:
|
||||||
new_messages = json.load(f)
|
new_messages = json.load(f)
|
||||||
messages = [self.messages[0]] + new_messages[1:]
|
self.messages = [self.messages[0]] + new_messages[1:]
|
||||||
|
|
||||||
elif input_text.startswith("/help"):
|
elif input_text.startswith("/help"):
|
||||||
print("!<prompt> answer as 'tool' in <tool_response> tags")
|
print("!<prompt> answer as 'tool' in <tool_response> tags")
|
@@ -14,10 +14,10 @@ from transformers.cache_utils import (
|
|||||||
)
|
)
|
||||||
import torch
|
import torch
|
||||||
import time
|
import time
|
||||||
import utils
|
|
||||||
import re
|
import re
|
||||||
import os
|
import os
|
||||||
from modelconfig import Modelconfig
|
import chatbug.utils as utils
|
||||||
|
from chatbug.modelconfig import Modelconfig
|
||||||
|
|
||||||
torch.set_num_threads(os.cpu_count()) # Adjust this to the number of threads/cores you have
|
torch.set_num_threads(os.cpu_count()) # Adjust this to the number of threads/cores you have
|
||||||
|
|
||||||
@@ -101,7 +101,7 @@ class Inference:
|
|||||||
if print_stdout:
|
if print_stdout:
|
||||||
print(out_text)
|
print(out_text)
|
||||||
return outputs, out_text
|
return outputs, out_text
|
||||||
|
|
||||||
|
|
||||||
def generate_incremental_2(self, input_ids: torch.Tensor, print_stdout:bool=True) -> tuple[torch.Tensor, str]:
|
def generate_incremental_2(self, input_ids: torch.Tensor, print_stdout:bool=True) -> tuple[torch.Tensor, str]:
|
||||||
generated_tokens = input_ids
|
generated_tokens = input_ids
|
||||||
@@ -180,7 +180,7 @@ class Inference:
|
|||||||
while True:
|
while True:
|
||||||
# Call the model with the current tokens
|
# Call the model with the current tokens
|
||||||
outputs = self.model(
|
outputs = self.model(
|
||||||
input_ids=generated_tokens,
|
input_ids=generated_tokens,
|
||||||
use_cache=True,
|
use_cache=True,
|
||||||
num_beams = 1
|
num_beams = 1
|
||||||
# past_key_values=past_key_values
|
# past_key_values=past_key_values
|
@@ -1,16 +1,16 @@
|
|||||||
from inference import Inference
|
|
||||||
from modelconfig import Modelconfig
|
|
||||||
import time
|
import time
|
||||||
import nvidia_smi
|
import nvidia_smi
|
||||||
import torch
|
import torch
|
||||||
import gc
|
import gc
|
||||||
|
from chatbug.inference import Inference
|
||||||
|
from chatbug.modelconfig import Modelconfig
|
||||||
|
|
||||||
|
|
||||||
def empty_cuda():
|
def empty_cuda():
|
||||||
while True:
|
while True:
|
||||||
gc.collect()
|
gc.collect()
|
||||||
torch.cuda.empty_cache()
|
torch.cuda.empty_cache()
|
||||||
time.sleep(0.5)
|
time.sleep(0.5)
|
||||||
vram = nvidia_smi.get_gpu_stats()["memory_used"]
|
vram = nvidia_smi.get_gpu_stats()["memory_used"]
|
||||||
print("vram: %d MB" % vram)
|
print("vram: %d MB" % vram)
|
||||||
if vram < 200:
|
if vram < 200:
|
@@ -1,10 +1,11 @@
|
|||||||
|
|
||||||
from tool_helper import tool_list
|
|
||||||
from tool_functions import register_dummy
|
|
||||||
from inference import Inference
|
|
||||||
import datetime
|
import datetime
|
||||||
import model_selection
|
from chatbug.tool_helper import tool_list
|
||||||
from generation_loop import Terminal, msg
|
from chatbug.tool_functions import register_dummy
|
||||||
|
from chatbug.inference import Inference
|
||||||
|
from chatbug.generation_loop import Terminal, msg
|
||||||
|
from chatbug import model_selection
|
||||||
|
|
||||||
|
|
||||||
register_dummy()
|
register_dummy()
|
||||||
@@ -13,7 +14,7 @@ register_dummy()
|
|||||||
def initialize_config(inference: Inference) -> Terminal:
|
def initialize_config(inference: Inference) -> Terminal:
|
||||||
|
|
||||||
# systemmessage at the very begin of the chat. Will be concatenated with the automatic tool usage descriptions
|
# systemmessage at the very begin of the chat. Will be concatenated with the automatic tool usage descriptions
|
||||||
system_prompt = "Hold a casual conversation with the user. Keep responses short at max 5 sentences and on point. Answer using markdown to the user. When providing code examples, avoid comments which provide no additional information."
|
system_prompt = "Hold a casual conversation with the user. Keep responses short at max 5 sentences and on point. Answer using markdown to the user. When providing code examples, avoid comments which provide no additional information. Do not summarize."
|
||||||
current_date_and_time = datetime.datetime.now().strftime("Current date is %Y-%m-%d and its %H:%M %p right now.")
|
current_date_and_time = datetime.datetime.now().strftime("Current date is %Y-%m-%d and its %H:%M %p right now.")
|
||||||
append_toolcalls = False
|
append_toolcalls = False
|
||||||
if append_toolcalls:
|
if append_toolcalls:
|
||||||
@@ -25,19 +26,21 @@ def initialize_config(inference: Inference) -> Terminal:
|
|||||||
|
|
||||||
# system message for role flip so the model automatically answers for the user
|
# system message for role flip so the model automatically answers for the user
|
||||||
terminal.roleflip = msg("system", "Keep the conversation going, ask for more information on the subject. Keep messages short at max 1-2 sentences. Do not thank and say goodbye.")
|
terminal.roleflip = msg("system", "Keep the conversation going, ask for more information on the subject. Keep messages short at max 1-2 sentences. Do not thank and say goodbye.")
|
||||||
|
|
||||||
# system messages and user message to bring the model to summarize the entire conversation
|
# system messages and user message to bring the model to summarize the entire conversation
|
||||||
terminal.summarize = msg("system", "Summarize the conversation as a single, cohesive paragraph. Avoid using any bullet points, numbers, or list formatting. Write in plain text with natural sentences that flow together seamlessly.")
|
terminal.summarize = msg("system", "Summarize the conversation as a single, cohesive paragraph. Avoid using any bullet points, numbers, or list formatting. Write in plain text with natural sentences that flow together seamlessly.")
|
||||||
terminal.summarize_user = msg("system", "Can you summarize the conversation?")
|
terminal.summarize_user = msg("system", "Can you summarize the conversation?")
|
||||||
|
|
||||||
# system message to create a conversation title
|
# system message to create a conversation title
|
||||||
terminal.title_prompt = msg("system", "Please create a very short and descriptive title or label for this conversation. Maximum 2-5 words. Use only plain text, avoid numbering, special characters, or unnecessary formatting-focus on clarity and brevity.")
|
terminal.title_prompt = msg("system", "Please create a very short and descriptive title or label for this conversation. Maximum 2-5 words. Use only plain text, avoid numbering, special characters, or unnecessary formatting-focus on clarity and brevity.")
|
||||||
return terminal
|
return terminal
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
if __name__ == "__main__":
|
|
||||||
|
|
||||||
inference = Inference(model_selection.get_model())
|
inference = Inference(model_selection.get_model())
|
||||||
terminal = initialize_config(inference)
|
terminal = initialize_config(inference)
|
||||||
terminal.join()
|
terminal.join()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
3
chatbug/matheval/__init__.py
Normal file
3
chatbug/matheval/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
from chatbug.matheval import ast
|
||||||
|
from chatbug.matheval import interpreter
|
||||||
|
from chatbug.matheval import lexer
|
@@ -1,6 +1,5 @@
|
|||||||
|
from chatbug.matheval import lexer
|
||||||
import math_lexer as lexer
|
from chatbug.matheval.lexer import Token
|
||||||
from math_lexer import Token
|
|
||||||
|
|
||||||
|
|
||||||
class Statement:
|
class Statement:
|
@@ -1,10 +1,11 @@
|
|||||||
import math_ast as ast
|
|
||||||
|
|
||||||
|
|
||||||
from sympy.parsing.sympy_parser import parse_expr
|
from sympy.parsing.sympy_parser import parse_expr
|
||||||
from sympy.core.numbers import Integer, One, Zero
|
from sympy.core.numbers import Integer, One, Zero
|
||||||
from sympy import symbols, Eq, solveset, linsolve, nonlinsolve
|
from sympy import symbols, Eq, solveset, linsolve, nonlinsolve
|
||||||
from sympy.core.symbol import Symbol
|
from sympy.core.symbol import Symbol
|
||||||
|
from chatbug.matheval import ast
|
||||||
|
|
||||||
|
|
||||||
def interpret(statement: ast.Statement) -> str:
|
def interpret(statement: ast.Statement) -> str:
|
@@ -1,10 +1,10 @@
|
|||||||
|
|
||||||
from modelconfig import Modelconfig
|
from chatbug.modelconfig import Modelconfig
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def get_model() -> Modelconfig:
|
def get_model() -> Modelconfig:
|
||||||
|
|
||||||
# model: NousResearch/Hermes-3-Llama-3.2-3B
|
# model: NousResearch/Hermes-3-Llama-3.2-3B
|
||||||
# tokens: 315 tk
|
# tokens: 315 tk
|
||||||
# time: 94.360 s
|
# time: 94.360 s
|
@@ -1,10 +1,10 @@
|
|||||||
import random
|
import random
|
||||||
import datetime
|
import datetime
|
||||||
from tool_helper import tool
|
from chatbug.tool_helper import tool
|
||||||
import math_lexer
|
import chatbug.matheval as matheval
|
||||||
import math_ast
|
# from chatbug.matheval import interpreter, lexer
|
||||||
import math_interpreter
|
# from chatbug.matheval.ast import Parser
|
||||||
import utils
|
import chatbug.utils as utils
|
||||||
|
|
||||||
|
|
||||||
# @tool
|
# @tool
|
||||||
@@ -39,10 +39,10 @@ def math_evaluate(expression: str):
|
|||||||
Args:
|
Args:
|
||||||
expression: A valid arithmetic expression (e.g., '2 + 3 * 4'). The expression must not contain '='."""
|
expression: A valid arithmetic expression (e.g., '2 + 3 * 4'). The expression must not contain '='."""
|
||||||
try:
|
try:
|
||||||
tokens = math_lexer.tokenize(expression)
|
tokens = matheval.lexer.tokenize(expression)
|
||||||
parser = math_ast.Parser()
|
parser = matheval.ast.Parser()
|
||||||
ast = parser.parse(tokens)
|
ast = parser.parse(tokens)
|
||||||
return math_interpreter.interpret(ast)
|
return matheval.interpreter.interpret(ast)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
utils.print_error("Tool call evaluation failed. - " + str(e))
|
utils.print_error("Tool call evaluation failed. - " + str(e))
|
||||||
return "Tool call evaluation failed."
|
return "Tool call evaluation failed."
|
||||||
@@ -58,10 +58,10 @@ Args:
|
|||||||
expression = "solve " + " and ".join(equations) + " for " + " and ".join(variables)
|
expression = "solve " + " and ".join(equations) + " for " + " and ".join(variables)
|
||||||
print(expression)
|
print(expression)
|
||||||
|
|
||||||
tokens = math_lexer.tokenize(expression)
|
tokens = lexer.tokenize(expression)
|
||||||
parser = math_ast.Parser()
|
parser = ast.Parser()
|
||||||
ast = parser.parse(tokens)
|
ast = parser.parse(tokens)
|
||||||
return math_interpreter.interpret(ast)
|
return interpreter.interpret(ast)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
utils.print_error("Tool call evaluation failed. - " + str(e))
|
utils.print_error("Tool call evaluation failed. - " + str(e))
|
||||||
return "Tool call evaluation failed."
|
return "Tool call evaluation failed."
|
@@ -2,7 +2,7 @@
|
|||||||
from typing import Callable, List, Optional
|
from typing import Callable, List, Optional
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
import utils
|
import chatbug.utils as utils
|
||||||
|
|
||||||
tool_list = []
|
tool_list = []
|
||||||
|
|
22
setup.py
Normal file
22
setup.py
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
from setuptools import setup, find_packages
|
||||||
|
|
||||||
|
setup(
|
||||||
|
name='chatbug',
|
||||||
|
version='0.1.0',
|
||||||
|
description='A conversational AI chatbot',
|
||||||
|
author='Florin Tobler',
|
||||||
|
author_email='florin.tobler@hotmail.com',
|
||||||
|
packages=find_packages(exclude=["tests"]),
|
||||||
|
install_requires=[
|
||||||
|
'transformers',
|
||||||
|
'accelerate',
|
||||||
|
'bitsandbytes',
|
||||||
|
'pytest',
|
||||||
|
'pywebview',
|
||||||
|
],
|
||||||
|
# entry_points={
|
||||||
|
# 'console_scripts': [
|
||||||
|
# 'chatbug=chatbug.app:main',
|
||||||
|
# ],
|
||||||
|
# },
|
||||||
|
)
|
@@ -1 +0,0 @@
|
|||||||
# empty
|
|
@@ -1,32 +1,20 @@
|
|||||||
import pytest
|
import pytest
|
||||||
import tests.helper as helper
|
from tests import helper
|
||||||
|
|
||||||
|
|
||||||
inference = None
|
inference = None
|
||||||
InferenceClass = None
|
|
||||||
Tensor = None
|
Tensor = None
|
||||||
|
|
||||||
|
|
||||||
def prepare():
|
def prepare():
|
||||||
if InferenceClass == None:
|
|
||||||
test_import_inference_module_librarys()
|
|
||||||
if inference == None:
|
|
||||||
test_instantiate_inference_instance()
|
|
||||||
|
|
||||||
|
|
||||||
def test_import_inference_module_librarys():
|
|
||||||
import inference
|
|
||||||
import torch
|
|
||||||
global InferenceClass
|
|
||||||
global Tensor
|
|
||||||
InferenceClass = inference.Inference
|
|
||||||
Tensor = torch.Tensor
|
|
||||||
|
|
||||||
|
|
||||||
def test_instantiate_inference_instance():
|
|
||||||
if InferenceClass == None:
|
|
||||||
test_import_inference_module_librarys()
|
|
||||||
global inference
|
global inference
|
||||||
inference = InferenceClass()
|
global Tensor
|
||||||
|
if inference == None:
|
||||||
|
from torch import Tensor as _Tensor
|
||||||
|
from chatbug.inference import Inference
|
||||||
|
from chatbug.model_selection import get_model
|
||||||
|
inference = Inference(get_model())
|
||||||
|
Tensor = _Tensor
|
||||||
|
|
||||||
|
|
||||||
def test_tool_header_generation():
|
def test_tool_header_generation():
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
import pytest
|
import pytest
|
||||||
import tool_helper
|
import chatbug.tool_helper as tool_helper
|
||||||
import tests.helper as helper
|
from tests import helper
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
import pytest
|
import pytest
|
||||||
import tool_functions
|
import chatbug.tool_functions as tool_functions
|
||||||
|
from tests import helper
|
||||||
|
|
||||||
|
|
||||||
def test_math_evaluate_1():
|
def test_math_evaluate_1():
|
||||||
@@ -28,6 +28,13 @@ def test_math_evaluate_5():
|
|||||||
result = tool_functions.math_evaluate("sin(pi/2) + cos(0)")
|
result = tool_functions.math_evaluate("sin(pi/2) + cos(0)")
|
||||||
assert result == "sin(pi/2) + cos(0) = 2"
|
assert result == "sin(pi/2) + cos(0) = 2"
|
||||||
|
|
||||||
|
def test_math_evaluate_solve_a():
|
||||||
|
result = tool_functions.math_evaluate("solve 240=x*r+x*r^2+x*r^3+s and r=1.618 and s=5 for x, r, s")
|
||||||
|
assert result == "Solved equation system 240 = r**3*x + r**2*x + r*x + s, r = 1.61800000000000 and s = 5 for x=27.7393327937747=~27.739, r=1.61800000000000=~1.618 and s=5.00000000000000=~5.000."
|
||||||
|
|
||||||
|
def test_math_evaluate_solve_b():
|
||||||
|
result = tool_functions.math_evaluate("solve 250=x+x*r+s and r=1.618 and s=0 for x, r, s")
|
||||||
|
assert result == "Solved equation system 250 = r*x + s + x, r = 1.61800000000000 and s = 0 for x=95.4927425515661=~95.493, r=1.61800000000000=~1.618 and s=0."
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -54,4 +61,3 @@ def test_math_solver_3b():
|
|||||||
def test_math_solver_4():
|
def test_math_solver_4():
|
||||||
result = tool_functions.math_evaluate("solve 2*x**3 + 3*y = 7 and x - y = 1 for x, y")
|
result = tool_functions.math_evaluate("solve 2*x**3 + 3*y = 7 and x - y = 1 for x, y")
|
||||||
assert result == "Solved equation system 2*x**3 + 3*y = 7 and x - y = 1 for x=~1.421 and y=~0.421."
|
assert result == "Solved equation system 2*x**3 + 3*y = 7 and x - y = 1 for x=~1.421 and y=~0.421."
|
||||||
|
|
||||||
|
@@ -1,7 +1,8 @@
|
|||||||
import pytest
|
import pytest
|
||||||
import tool_helper
|
from chatbug import tool_helper
|
||||||
from unittest import mock
|
from unittest import mock
|
||||||
import tests.helper as helper
|
from tests import helper
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -40,34 +41,34 @@ def test_match_and_extract_matching3_with_newline():
|
|||||||
|
|
||||||
|
|
||||||
def test_string_malformed_faulty():
|
def test_string_malformed_faulty():
|
||||||
with mock.patch("utils.print_error") as print_error_mock:
|
with mock.patch("chatbug.utils.print_error") as print_error_mock:
|
||||||
result = tool_helper._execute_tool_call_str("{json_content}", [])
|
result = tool_helper._execute_tool_call_str("{json_content}", [])
|
||||||
assert result == None
|
assert result == None
|
||||||
print_error_mock.assert_called_once() # this will check if the mocked function on the context was called.
|
print_error_mock.assert_called_once() # this will check if the mocked function on the context was called.
|
||||||
|
|
||||||
|
|
||||||
def test_tool_call_json_1():
|
def test_tool_call_json_1():
|
||||||
with mock.patch("utils.print_error") as print_error_mock:
|
with mock.patch("chatbug.utils.print_error") as print_error_mock:
|
||||||
result = tool_helper._execute_tool_call_json({"name": "tool_dummy", "arguments": {"a": 1, "b": "zwei"}}, [helper.tool_dummy, helper.tool_dummy2])
|
result = tool_helper._execute_tool_call_json({"name": "tool_dummy", "arguments": {"a": 1, "b": "zwei"}}, [helper.tool_dummy, helper.tool_dummy2])
|
||||||
assert result == "result_1_zwei"
|
assert result == "result_1_zwei"
|
||||||
assert print_error_mock.call_count == 0
|
assert print_error_mock.call_count == 0
|
||||||
|
|
||||||
|
|
||||||
def test_tool_call_json_2():
|
def test_tool_call_json_2():
|
||||||
with mock.patch("utils.print_error") as print_error_mock:
|
with mock.patch("chatbug.utils.print_error") as print_error_mock:
|
||||||
result = tool_helper._execute_tool_call_json({"name": "tool_dummy2", "arguments": {"text": "some_text"}}, [helper.tool_dummy, helper.tool_dummy2])
|
result = tool_helper._execute_tool_call_json({"name": "tool_dummy2", "arguments": {"text": "some_text"}}, [helper.tool_dummy, helper.tool_dummy2])
|
||||||
assert result == "SOME_TEXT"
|
assert result == "SOME_TEXT"
|
||||||
assert print_error_mock.call_count == 0
|
assert print_error_mock.call_count == 0
|
||||||
|
|
||||||
|
|
||||||
def test_tool_call_json_non_existing_call_check():
|
def test_tool_call_json_non_existing_call_check():
|
||||||
with mock.patch("utils.print_error") as print_error_mock:
|
with mock.patch("chatbug.utils.print_error") as print_error_mock:
|
||||||
result = tool_helper._execute_tool_call_json({"name": "tool_dummy_which_is_not_existing", "arguments": {"text": "some_text"}}, [helper.tool_dummy, helper.tool_dummy2])
|
result = tool_helper._execute_tool_call_json({"name": "tool_dummy_which_is_not_existing", "arguments": {"text": "some_text"}}, [helper.tool_dummy, helper.tool_dummy2])
|
||||||
assert result == None
|
assert result == None
|
||||||
assert print_error_mock.call_count == 1 # this will check if the mocked function on the context was called.
|
assert print_error_mock.call_count == 1 # this will check if the mocked function on the context was called.
|
||||||
|
|
||||||
def test_tool_call_json_wrong_arguments_check():
|
def test_tool_call_json_wrong_arguments_check():
|
||||||
with mock.patch("utils.print_error") as print_error_mock:
|
with mock.patch("chatbug.utils.print_error") as print_error_mock:
|
||||||
result = tool_helper._execute_tool_call_json({"name": "tool_dummy", "arguments": {"a": "must_be_an_int_but_is_string", "b": "zwei"}}, [helper.tool_dummy, helper.tool_dummy2])
|
result = tool_helper._execute_tool_call_json({"name": "tool_dummy", "arguments": {"a": "must_be_an_int_but_is_string", "b": "zwei"}}, [helper.tool_dummy, helper.tool_dummy2])
|
||||||
assert result == None
|
assert result == None
|
||||||
assert print_error_mock.call_count == 1 # this will check if the mocked function on the context was called.
|
assert print_error_mock.call_count == 1 # this will check if the mocked function on the context was called.
|
||||||
@@ -75,7 +76,6 @@ def test_tool_call_json_wrong_arguments_check():
|
|||||||
|
|
||||||
|
|
||||||
def test_regex_multiline():
|
def test_regex_multiline():
|
||||||
import re
|
|
||||||
pattern = r"<start>(.*)</end>"
|
pattern = r"<start>(.*)</end>"
|
||||||
|
|
||||||
# The text to search (spanning multiple lines)
|
# The text to search (spanning multiple lines)
|
||||||
|
Reference in New Issue
Block a user