# app.py import torch import gradio as gr from model import GPT, GPTConfig, sample # === Model Initialization === variant = "120M" model_path = "pytorch_model.pt" config = GPTConfig(variant) model = GPT(config) model.load_state_dict(torch.load(model_path, map_location="cpu")) model.eval() # === Story Generation Function === def continue_story(title, story_body, max_tokens, temperature, top_k): prompt = f"Title: {title}\n\nStory:\n{story_body.strip()}\n\nNext part:" with torch.no_grad(): continuation = sample( model=model, prompt=prompt, config=config, length=max_tokens, temperature=temperature, top_k=top_k, instruct_mode=config.instruct ) return continuation.strip() # === Gradio App Interface === with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue")) as demo: gr.Markdown( """ # ๐Ÿ“š NGen3's Story Weaver Continue your creative story with **TNSA's NGen3** โ€” title remains sacred, only the story evolves. Provide a **title** and **initial part of the story**, then let the AI continue it! """ ) with gr.Row(): with gr.Column(): title = gr.Textbox( label="๐Ÿ“– Story Title", placeholder="e.g., The Last Light on Europa", max_lines=1 ) story_body = gr.Textbox( label="โœ๏ธ Beginning of Story", lines=10, placeholder="Type the beginning of your story here..." ) with gr.Column(): max_tokens = gr.Slider(20, 400, value=150, step=10, label="๐Ÿ”ข Max Tokens") temperature = gr.Slider(0.5, 1.5, value=1.0, step=0.1, label="๐ŸŒก๏ธ Temperature") top_k = gr.Slider(1, 100, value=40, step=1, label="๐ŸŽฏ Top-k Sampling") generate_btn = gr.Button("โœจ Continue Story") output = gr.Textbox(label="๐Ÿ“œ Story Continuation", lines=12) generate_btn.click( fn=continue_story, inputs=[title, story_body, max_tokens, temperature, top_k], outputs=output ) gr.Markdown("---\n๐Ÿ”— Built with ๐Ÿ’œ by **TNSA AI** ยท NGen3 Series ") # === Launch App === if __name__ == "__main__": demo.launch()