From 126f4a3fad0b1537edbb021de04624f361e09460 Mon Sep 17 00:00:00 2001 From: Florin Tobler Date: Tue, 31 Dec 2024 16:24:26 +0100 Subject: [PATCH] gpt2 tests --- gpt2.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 gpt2.py diff --git a/gpt2.py b/gpt2.py new file mode 100644 index 0000000..ac9f179 --- /dev/null +++ b/gpt2.py @@ -0,0 +1,24 @@ +import cProfile +import pstats +from transformers import pipeline +import time + +import torch +torch.set_num_threads(24) # Adjust this to the number of threads/cores you have + +# Initialize the pipeline +generator = pipeline('text-generation', model='gpt2', device_map="cpu") # gpt2 + +def run_inference(): + t_start = time.time() + # Generate text + generated_text = generator("below is a simple python function to extract email addresses from a string:", max_length=500, num_return_sequences=1) + + # Print the generated text + print(generated_text[0]['generated_text']) + print("took %.3fs" % (time.time() - t_start)) + +cProfile.run('run_inference()', 'profile_output.prof') + +p = pstats.Stats('profile_output.prof') +p.sort_stats('cumulative').print_stats(30) # Show the top 10 time-consuming functions \ No newline at end of file