import gradio as gr import numpy as np import random import torch import spaces from PIL import Image from diffusers import FlowMatchEulerDiscreteScheduler from optimization import optimize_pipeline_ from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3 from huggingface_hub import InferenceClient import math from huggingface_hub import hf_hub_download from safetensors.torch import load_file import os import base64 import json SYSTEM_PROMPT = ''' # Edit Instruction Rewriter You are a professional edit instruction rewriter. Your task is to generate a precise, concise, and visually achievable professional-level edit instruction based on the user-provided instruction and the image to be edited. Please strictly follow the rewriting rules below: ## 1. General Principles - Keep the rewritten prompt **concise and comprehensive**. Avoid overly long sentences and unnecessary descriptive language. - If the instruction is contradictory, vague, or unachievable, prioritize reasonable inference and correction, and supplement details when necessary. - Keep the main part of the original instruction unchanged, only enhancing its clarity, rationality, and visual feasibility. - All added objects or modifications must align with the logic and style of the scene in the input images. - If multiple sub-images are to be generated, describe the content of each sub-image individually. ## 2. Task-Type Handling Rules ### 1. Add, Delete, Replace Tasks - If the instruction is clear (already includes task type, target entity, position, quantity, attributes), preserve the original intent and only refine the grammar. - If the description is vague, supplement with minimal but sufficient details (category, color, size, orientation, position, etc.). For example: > Original: "Add an animal" > Rewritten: "Add a light-gray cat in the bottom-right corner, sitting and facing the camera" - Remove meaningless instructions: e.g., "Add 0 objects" should be ignored or flagged as invalid. - For replacement tasks, specify "Replace Y with X" and briefly describe the key visual features of X. ### 2. Text Editing Tasks - All text content must be enclosed in English double quotes `" "`. Keep the original language of the text, and keep the capitalization. - Both adding new text and replacing existing text are text replacement tasks, For example: - Replace "xx" to "yy" - Replace the mask / bounding box to "yy" - Replace the visual object to "yy" - Specify text position, color, and layout only if user has required. - If font is specified, keep the original language of the font. ### 3. Human Editing Tasks - Make the smallest changes to the given user's prompt. - If changes to background, action, expression, camera shot, or ambient lighting are required, please list each modification individually. - **Edits to makeup or facial features / expression must be subtle, not exaggerated, and must preserve the subject's identity consistency.** > Original: "Add eyebrows to the face" > Rewritten: "Slightly thicken the person's eyebrows with little change, look natural." ### 4. Style Conversion or Enhancement Tasks - If a style is specified, describe it concisely using key visual features. For example: > Original: "Disco style" > Rewritten: "1970s disco style: flashing lights, disco ball, mirrored walls, vibrant colors" - For style reference, analyze the original image and extract key characteristics (color, composition, texture, lighting, artistic style, etc.), integrating them into the instruction. - **Colorization tasks (including old photo restoration) must use the fixed template:** "Restore and colorize the old photo." - Clearly specify the object to be modified. For example: > Original: Modify the subject in Picture 1 to match the style of Picture 2. > Rewritten: Change the girl in Picture 1 to the ink-wash style of Picture 2 — rendered in black-and-white watercolor with soft color transitions. ### 5. Material Replacement - Clearly specify the object and the material. For example: "Change the material of the apple to papercut style." - For text material replacement, use the fixed template: "Change the material of text "xxxx" to laser style" ### 6. Logo/Pattern Editing - Material replacement should preserve the original shape and structure as much as possible. For example: > Original: "Convert to sapphire material" > Rewritten: "Convert the main subject in the image to sapphire material, preserving similar shape and structure" - When migrating logos/patterns to new scenes, ensure shape and structure consistency. For example: > Original: "Migrate the logo in the image to a new scene" > Rewritten: "Migrate the logo in the image to a new scene, preserving similar shape and structure" ### 7. Multi-Image Tasks - Rewritten prompts must clearly point out which image's element is being modified. For example: > Original: "Replace the subject of picture 1 with the subject of picture 2" > Rewritten: "Replace the girl of picture 1 with the boy of picture 2, keeping picture 2's background unchanged" - For stylization tasks, describe the reference image's style in the rewritten prompt, while preserving the visual content of the source image. ## 3. Rationale and Logic Check - Resolve contradictory instructions: e.g., "Remove all trees but keep all trees" requires logical correction. - Supplement missing critical information: e.g., if position is unspecified, choose a reasonable area based on composition (near subject, blank space, center/edge, etc.). # Output Format Example ```json { "Rewritten": "..." } ''' # --- NEW: Next Scene Prompt System Prompt --- NEXT_SCENE_SYSTEM_PROMPT = ''' # Next Scene Prompt Generator You are a cinematic AI director assistant. Your task is to analyze the provided image and generate a compelling "Next Scene" prompt that describes the natural cinematic progression from the current frame. ## Core Principles: - Think like a film director: Consider camera dynamics, visual composition, and narrative continuity - Create prompts that flow seamlessly from the current frame - Focus on **visual progression** rather than static modifications - Maintain compositional coherence while introducing organic transitions ## Prompt Structure: Always begin with "Next Scene: " followed by your cinematic description. ## Key Elements to Include: 1. **Camera Movement**: Specify one of these or combinations: - Dolly shots (camera moves toward/away from subject) - Push-ins or pull-backs - Tracking moves (camera follows subject) - Pan left/right - Tilt up/down - Zoom in/out 2. **Framing Evolution**: Describe how the shot composition changes: - Wide to close-up transitions - Angle shifts (high angle to eye level, etc.) - Reframing of subjects - Revealing new elements in frame 3. **Environmental Reveals** (if applicable): - New characters entering frame - Expanded scenery - Spatial progression - Background elements becoming visible 4. **Atmospheric Shifts** (if enhancing the scene): - Lighting changes (golden hour, shadows, lens flare) - Weather evolution - Time-of-day transitions - Depth and mood indicators ## Guidelines: - Keep descriptions concise but vivid (2-3 sentences max) - Always specify the camera action first - Focus on what changes between this frame and the next - Maintain the scene's existing style and mood unless intentionally transitioning - Prefer natural, organic progressions over abrupt changes ## Example Outputs: - "Next Scene: The camera pulls back from a tight close-up on the airship to a sweeping aerial view, revealing an entire fleet of vessels soaring through a fantasy landscape." - "Next Scene: The camera tracks forward and tilts down, bringing the sun and helicopters closer into frame as a strong lens flare intensifies." - "Next Scene: The camera pans right, removing the dragon and rider from view while revealing more of the floating mountain range in the distance." - "Next Scene: The camera moves slightly forward as sunlight breaks through the clouds, casting a soft glow around the character's silhouette in the mist. Realistic cinematic style, atmospheric depth." ## Output Format: Return ONLY the next scene prompt as plain text, starting with "Next Scene: " Do NOT include JSON formatting or additional explanations. ''' # --- NEW: Function to generate Next Scene prompts using VLM --- def generate_next_scene_prompt(images): """ Uses a VLM to analyze the uploaded image(s) and generate a cinematic "Next Scene" prompt following the guidelines of the next-scene LoRA. """ if images is None or len(images) == 0: return "Please upload an image first to generate a next scene prompt." # Ensure HF_TOKEN is set api_key = os.environ.get("HF_TOKEN") if not api_key: return "Error: HF_TOKEN not set. Cannot generate next scene prompt." try: # Load input images into PIL Images using the shared helper function pil_images = process_gallery_images(images) if len(pil_images) == 0: return "Error: Could not load images." # Initialize the InferenceClient with vision-capable model client = InferenceClient( provider="cerebras", api_key=api_key, ) # Format the messages for the chat completions API messages = [ {"role": "system", "content": NEXT_SCENE_SYSTEM_PROMPT}, {"role": "user", "content": []} ] # Add images to the message for img in pil_images: messages[1]["content"].append( {"image": f"data:image/png;base64,{encode_image(img)}"} ) # Add the text prompt messages[1]["content"].append({ "text": "Analyze this image and generate a compelling 'Next Scene' prompt that describes the natural cinematic progression from this frame. Focus on camera movement, framing changes, and atmospheric evolution." }) # Call the API completion = client.chat.completions.create( model="Qwen/Qwen3-235B-A22B-Instruct-2507", messages=messages, ) # Parse the response result = completion.choices[0].message.content.strip() # Ensure it starts with "Next Scene:" if not result.startswith("Next Scene:"): result = "Next Scene: " + result print(f"Generated Next Scene Prompt: {result}") return result except Exception as e: print(f"Error generating next scene prompt: {e}") return f"Error: Could not generate next scene prompt. {str(e)}" # --- Prompt Enhancement using Hugging Face InferenceClient --- def polish_prompt_hf(prompt, img_list): """ Rewrites the prompt using a Hugging Face InferenceClient. """ # Ensure HF_TOKEN is set api_key = os.environ.get("HF_TOKEN") if not api_key: print("Warning: HF_TOKEN not set. Falling back to original prompt.") return prompt try: # Initialize the client prompt = f"{SYSTEM_PROMPT}\n\nUser Input: {prompt}\n\nRewritten Prompt:" client = InferenceClient( provider="cerebras", api_key=api_key, ) # Format the messages for the chat completions API sys_promot = "you are a helpful assistant, you should provide useful answers to users." messages = [ {"role": "system", "content": sys_promot}, {"role": "user", "content": []}] for img in img_list: messages[1]["content"].append( {"image": f"data:image/png;base64,{encode_image(img)}"}) messages[1]["content"].append({"text": f"{prompt}"}) # Call the API completion = client.chat.completions.create( model="Qwen/Qwen3-235B-A22B-Instruct-2507", messages=messages, ) # Parse the response result = completion.choices[0].message.content # Try to extract JSON if present if '{"Rewritten"' in result: try: # Clean up the response result = result.replace('```json', '').replace('```', '') result_json = json.loads(result) polished_prompt = result_json.get('Rewritten', result) except: polished_prompt = result else: polished_prompt = result polished_prompt = polished_prompt.strip().replace("\n", " ") return polished_prompt except Exception as e: print(f"Error during API call to Hugging Face: {e}") # Fallback to original prompt if enhancement fails return prompt def encode_image(pil_image): import io buffered = io.BytesIO() pil_image.save(buffered, format="PNG") return base64.b64encode(buffered.getvalue()).decode("utf-8") def process_gallery_images(images): """ Helper function to convert Gradio gallery images to PIL Images. Handles various input formats from the gallery component. """ pil_images = [] if images is not None: for item in images: try: if isinstance(item[0], Image.Image): pil_images.append(item[0].convert("RGB")) elif isinstance(item[0], str): pil_images.append(Image.open(item[0]).convert("RGB")) elif hasattr(item, "name"): pil_images.append(Image.open(item.name).convert("RGB")) except Exception: continue return pil_images # --- Model Loading --- dtype = torch.bfloat16 device = "cuda" if torch.cuda.is_available() else "cpu" pipe = QwenImageEditPlusPipeline.from_pretrained("Qwen/Qwen-Image-Edit-2509", transformer= QwenImageTransformer2DModel.from_pretrained("linoyts/Qwen-Image-Edit-Rapid-AIO", subfolder='transformer', torch_dtype=dtype, device_map='cuda'),torch_dtype=dtype).to(device) pipe.load_lora_weights( "lovis93/next-scene-qwen-image-lora-2509", weight_name="next-scene_lora-v2-3000.safetensors", adapter_name="next-scene" ) pipe.set_adapters(["next-scene"], adapter_weights=[1.]) pipe.fuse_lora(adapter_names=["next-scene"], lora_scale=1.) pipe.unload_lora_weights() # Apply the same optimizations from the first version pipe.transformer.__class__ = QwenImageTransformer2DModel pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3()) # --- Ahead-of-time compilation --- optimize_pipeline_(pipe, image=[Image.new("RGB", (1024, 1024)), Image.new("RGB", (1024, 1024))], prompt="prompt") # --- UI Constants and Helpers --- MAX_SEED = np.iinfo(np.int32).max def use_output_as_input(result_gallery): """Takes the generated images from result and moves them to input_images.""" if result_gallery: # result_gallery is already a list of PIL images return result_gallery return [] @spaces.GPU def infer( images, prompt, seed=42, randomize_seed=False, true_guidance_scale=1.0, num_inference_steps=8, height=256, width=256, rewrite_prompt=False, num_images_per_prompt=1, progress=gr.Progress(track_tqdm=True), ): """ Generates an image using the local Qwen-Image diffusers pipeline. """ # Hardcode the negative prompt as requested negative_prompt = " " if randomize_seed: seed = random.randint(0, MAX_SEED) # Set up the generator for reproducibility generator = torch.Generator(device=device).manual_seed(seed) # Load input images into PIL Images using the shared helper function pil_images = process_gallery_images(images) if height==256 and width==256: height, width = None, None print(f"Calling pipeline with prompt: '{prompt}'") print(f"Negative Prompt: '{negative_prompt}'") print(f"Seed: {seed}, Steps: {num_inference_steps}, Guidance: {true_guidance_scale}, Size: {width}x{height}") if rewrite_prompt and len(pil_images) > 0: prompt = polish_prompt_hf(prompt, pil_images) print(f"Rewritten Prompt: {prompt}") # Generate the image image = pipe( image=pil_images if len(pil_images) > 0 else None, prompt=prompt, height=height, width=width, negative_prompt=negative_prompt, num_inference_steps=num_inference_steps, generator=generator, true_cfg_scale=true_guidance_scale, num_images_per_prompt=num_images_per_prompt, ).images # Return images, seed, and make button visible return image, seed, gr.update(visible=True) # --- Examples and UI Layout --- examples = [] css = """ #col-container { margin: 0 auto; max-width: 1024px; } #logo-title { text-align: center; } #logo-title img { width: 400px; } #edit_text{margin-top: -62px !important} """ with gr.Blocks(css=css) as demo: with gr.Column(elem_id="col-container"): gr.HTML("""