Spaces:
Sleeping
Sleeping
| from transformers import AutoTokenizer, AutoModelForSeq2SeqLM | |
| import torch | |
| import gradio as gr | |
| # Load model and tokenizer | |
| model_name = "Amitabhdas/code-summarizer-python" | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForSeq2SeqLM.from_pretrained(model_name) | |
| # Define summarization function | |
| def summarize_code_with_attention(code_snippet): | |
| inputs = tokenizer( | |
| code_snippet, | |
| max_length=512, | |
| truncation=True, | |
| padding="max_length", | |
| return_tensors="pt" | |
| ) | |
| with torch.no_grad(): | |
| outputs = model.generate( | |
| inputs.input_ids, | |
| max_length=150, | |
| num_beams=4, | |
| early_stopping=True, | |
| output_attentions=True, | |
| return_dict_in_generate=True | |
| ) | |
| summary = tokenizer.decode(outputs.sequences[0], skip_special_tokens=True) | |
| attention = str(outputs.encoder_attentions) # Convert to string for safe JSON | |
| input_shape = list(inputs.input_ids.shape) | |
| return { | |
| "summary": summary, | |
| "attention": attention, | |
| "input_shape": input_shape | |
| } | |
| # Launch the Gradio interface | |
| demo = gr.Interface( | |
| fn=summarize_code_with_attention, | |
| inputs=gr.Textbox(lines=10, label="Enter Python code"), | |
| outputs="json", | |
| title="Code Summarizer API" | |
| ) | |
| demo.launch(share=True) | |