tuned prompt
This commit is contained in:
2
llama.py
2
llama.py
@@ -13,7 +13,7 @@ register_dummy()
|
|||||||
def initialize_config(inference: Inference) -> Terminal:
|
def initialize_config(inference: Inference) -> Terminal:
|
||||||
|
|
||||||
# systemmessage at the very begin of the chat. Will be concatenated with the automatic tool usage descriptions
|
# systemmessage at the very begin of the chat. Will be concatenated with the automatic tool usage descriptions
|
||||||
system_prompt = "Hold a casual conversation with the user. Keep responses short at max 5 sentences and on point. Answer using markdown to the user. When providing code examples, avoid comments which provide no additional information."
|
system_prompt = "Hold a casual conversation with the user. Keep responses short at max 5 sentences and on point. Answer using markdown to the user. When providing code examples, avoid comments which provide no additional information. Do not summarize."
|
||||||
current_date_and_time = datetime.datetime.now().strftime("Current date is %Y-%m-%d and its %H:%M %p right now.")
|
current_date_and_time = datetime.datetime.now().strftime("Current date is %Y-%m-%d and its %H:%M %p right now.")
|
||||||
append_toolcalls = False
|
append_toolcalls = False
|
||||||
if append_toolcalls:
|
if append_toolcalls:
|
||||||
|
Reference in New Issue
Block a user