diff --git a/llama.py b/llama.py
index 5749356..3f1780d 100644
--- a/llama.py
+++ b/llama.py
@@ -10,7 +10,7 @@ messages = []
inference = None
# systemmessage at the very begin of the chat. Will be concatenated with the automatic tool usage descriptions
-systemmessage = "Hold a casual conversation with the user. Keep responses short at max 3 sentences."
+systemmessage = "Hold a casual conversation with the user. Keep responses short at max 3 sentences. Answer using markdown to the user."
# system message for role flip so the model automatically answers for the user
roleflip = {"role": "system", "content": "Keep the conversation going, ask for more information on the subject. Keep messages short at max 1-2 sentences. Do not thank and say goodbye."}
@@ -50,7 +50,7 @@ def append_generate_chat(input_text: str, role="user"):
tool_result = parse_and_execute_tool_call(out_text, tool_list)
if tool_result != None:
# tool call happened
- # tool_result = "%s" % tool_result
+ tool_result = "%s" % tool_result
# depending on the chat template the tool response tags must or must not be passed. :(
append_generate_chat(tool_result, role="tool")