From b8c0979a3ee0b1399f32ad6ca4e60e8e9320c597 Mon Sep 17 00:00:00 2001 From: Florin Tobler Date: Thu, 2 Jan 2025 04:59:57 +0100 Subject: [PATCH] add tool response tags --- llama.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/llama.py b/llama.py index 5749356..3f1780d 100644 --- a/llama.py +++ b/llama.py @@ -10,7 +10,7 @@ messages = [] inference = None # systemmessage at the very begin of the chat. Will be concatenated with the automatic tool usage descriptions -systemmessage = "Hold a casual conversation with the user. Keep responses short at max 3 sentences." +systemmessage = "Hold a casual conversation with the user. Keep responses short at max 3 sentences. Answer using markdown to the user." # system message for role flip so the model automatically answers for the user roleflip = {"role": "system", "content": "Keep the conversation going, ask for more information on the subject. Keep messages short at max 1-2 sentences. Do not thank and say goodbye."} @@ -50,7 +50,7 @@ def append_generate_chat(input_text: str, role="user"): tool_result = parse_and_execute_tool_call(out_text, tool_list) if tool_result != None: # tool call happened - # tool_result = "%s" % tool_result + tool_result = "%s" % tool_result # depending on the chat template the tool response tags must or must not be passed. :( append_generate_chat(tool_result, role="tool")