improved append file
This commit is contained in:
44
file_append.py
Normal file
44
file_append.py
Normal file
@@ -0,0 +1,44 @@
|
||||
import os
|
||||
|
||||
|
||||
def check_append_file(prompt: str) -> str:
|
||||
if "@" in prompt:
|
||||
parts = prompt.split(" ")
|
||||
content = []
|
||||
for part in parts:
|
||||
if part.startswith("@"):
|
||||
filename = part[1:]
|
||||
try:
|
||||
if os.path.exists(filename):
|
||||
with open(filename, "r") as f:
|
||||
content.append("%s:'''\n%s'''" % (filename, f.read()))
|
||||
except FileNotFoundError:
|
||||
print(f"File '{filename}' not found.")
|
||||
content.append(prompt)
|
||||
return "\n".join(content)
|
||||
return prompt
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
exit() # not accidentally trigger it
|
||||
|
||||
# Create some sample files
|
||||
with open("fmain.py", "w") as f:
|
||||
f.write("# This is main.py\n")
|
||||
with open("finference.py", "w") as f:
|
||||
f.write("# This is inference.py\n")
|
||||
|
||||
# Test cases
|
||||
test_prompts = [
|
||||
"@fmain.py",
|
||||
"@fmain.py @finference.py",
|
||||
"@fnonexistent.py",
|
||||
"@fmain.py @fnonexistent.py"
|
||||
]
|
||||
|
||||
for prompt in test_prompts:
|
||||
print(f"Testing prompt: {prompt}")
|
||||
result = check_append_file(prompt)
|
||||
print(f"Result: {result}")
|
||||
print("-" * 20)
|
@@ -3,20 +3,9 @@ import json
|
||||
import random
|
||||
from tool_helper import tool_list, parse_and_execute_tool_call
|
||||
from inference import Inference, torch_reseed
|
||||
from file_append import check_append_file
|
||||
|
||||
|
||||
def check_append_file(prompt: str) -> str:
|
||||
if prompt.startswith("@"):
|
||||
prompt = prompt[1:] # Remove the '@'
|
||||
filename = prompt.split(" ")[0]
|
||||
try:
|
||||
with open(filename, "r") as f:
|
||||
content = f.read()
|
||||
return "'''%s'''\n\n%s" % (content, prompt)
|
||||
except:
|
||||
print(f"File '{filename}' not found.")
|
||||
return prompt
|
||||
|
||||
|
||||
def msg(role: str, content: str) -> dict:
|
||||
return {"role": role, "content": content}
|
||||
@@ -84,22 +73,22 @@ class Terminal:
|
||||
print("")
|
||||
|
||||
elif input_text.startswith("/history"):
|
||||
history = self.inference.tokenize(self.message, tokenize=False)
|
||||
history = self.inference.tokenize(self.messages, tokenize=False)
|
||||
# history = tokenizer.apply_chat_template(self.message, return_tensors="pt", tokenize=False, add_generation_prompt=False)
|
||||
print(history)
|
||||
|
||||
elif input_text.startswith("/undo"):
|
||||
if len(self.message) > 2:
|
||||
if len(self.messages) > 2:
|
||||
print("undo latest prompt")
|
||||
self.message = self.message[:-2]
|
||||
self.message = self.messages[:-2]
|
||||
else:
|
||||
print("cannot undo because there are not enough self.message on history.")
|
||||
print("")
|
||||
|
||||
elif input_text.startswith("/regen"):
|
||||
if len(self.message) >= 2:
|
||||
if len(self.messages) >= 2:
|
||||
print("regenerating message (not working)")
|
||||
self.message = self.message[:-1]
|
||||
self.messages = self.messages[:-1]
|
||||
seed = random.randint(0, 2**32 - 1) # Generate a random seed
|
||||
torch_reseed(seed)
|
||||
self.append_generate_chat(None)
|
||||
@@ -119,8 +108,8 @@ class Terminal:
|
||||
self.append_generate_chat(content)
|
||||
|
||||
elif input_text.startswith("/auto"):
|
||||
message_backup = self.message
|
||||
self.message = [self.roleflip]
|
||||
message_backup = self.messages
|
||||
self.messages = [self.roleflip]
|
||||
for m in self.message_backup:
|
||||
role = m["role"]
|
||||
content = m["content"]
|
||||
@@ -157,7 +146,7 @@ class Terminal:
|
||||
elif input_text.startswith("/load"):
|
||||
with open("messages.json", "r") as f:
|
||||
new_messages = json.load(f)
|
||||
messages = [self.messages[0]] + new_messages[1:]
|
||||
self.messages = [self.messages[0]] + new_messages[1:]
|
||||
|
||||
elif input_text.startswith("/help"):
|
||||
print("!<prompt> answer as 'tool' in <tool_response> tags")
|
||||
|
Reference in New Issue
Block a user