from tool_helper import tool_list from tool_functions import register_dummy from inference import Inference import datetime import model_selection from generation_loop import Terminal, msg register_dummy() def initialize_config(inference: Inference) -> Terminal: # systemmessage at the very begin of the chat. Will be concatenated with the automatic tool usage descriptions system_prompt = "Hold a casual conversation with the user. Keep responses short at max 5 sentences and on point. Answer using markdown to the user. When providing code examples, avoid comments which provide no additional information. Do not summarize." current_date_and_time = datetime.datetime.now().strftime("Current date is %Y-%m-%d and its %H:%M %p right now.") append_toolcalls = False if append_toolcalls: systemmessage = msg("system", system_prompt + "\n" + current_date_and_time + "\n" + inference.generate_tool_use_header(tool_list)) else: systemmessage = msg("system", system_prompt + "\n" + current_date_and_time) terminal = Terminal(inference, systemmessage) # system message for role flip so the model automatically answers for the user terminal.roleflip = msg("system", "Keep the conversation going, ask for more information on the subject. Keep messages short at max 1-2 sentences. Do not thank and say goodbye.") # system messages and user message to bring the model to summarize the entire conversation terminal.summarize = msg("system", "Summarize the conversation as a single, cohesive paragraph. Avoid using any bullet points, numbers, or list formatting. Write in plain text with natural sentences that flow together seamlessly.") terminal.summarize_user = msg("system", "Can you summarize the conversation?") # system message to create a conversation title terminal.title_prompt = msg("system", "Please create a very short and descriptive title or label for this conversation. Maximum 2-5 words. Use only plain text, avoid numbering, special characters, or unnecessary formatting-focus on clarity and brevity.") return terminal if __name__ == "__main__": inference = Inference(model_selection.get_model()) terminal = initialize_config(inference) terminal.join()