import gradio as gr import subprocess import os import spaces import inference_video_w import torch # Download the file subprocess.run([ "wget", "https://huggingface.co/r3gm/RIFE/resolve/main/RIFEv4.26_0921.zip", "-O", "RIFEv4.26_0921.zip" ], check=True) # Unzip the downloaded file subprocess.run([ "unzip", "-o", "RIFEv4.26_0921.zip" ], check=True) @spaces.GPU(duration=120) def run_rife( input_video, frame_multiplier, time_exponent, fixed_fps, video_scale, remove_duplicate_frames, create_montage, progress=gr.Progress(track_tqdm=True), ): if input_video is None: raise gr.Error("Please upload a video first.") ext = "mp4" model_dir = "train_log" # Construct output filename pattern to match what inference_video.py expects/generates video_path_wo_ext = os.path.splitext(os.path.basename(input_video))[0] # We pass the desired output name, though the function logic tries to stick to this pattern anyway output_base_name = "{}_{}X_fps.{}".format(video_path_wo_ext, int(frame_multiplier), ext) if fixed_fps > 0: gr.Warning("Will not merge audio because using fps flag!") print(f"Starting Inference for: {input_video}") try: # Call the imported function directly result_path = inference_video_w.inference( video=input_video, output=output_base_name, modelDir=model_dir, fp16=(True if torch.cuda.is_available() else False), UHD=False, scale=video_scale, skip=remove_duplicate_frames, fps=int(fixed_fps) if fixed_fps > 0 else None, ext=ext, exp=int(time_exponent), multi=int(frame_multiplier), montage=create_montage ) if result_path and os.path.exists(result_path): return result_path else: raise gr.Error(f"Output file not found. Expected: {result_path}") except Exception as e: raise gr.Error(f"An error occurred: {str(e)}") # --- Gradio UI Layout --- with gr.Blocks(title="Frame Rate Enhancer") as app: gr.Markdown("# ⚡ RIFE: Frame Rate Enhancer") gr.Markdown("Creates extra frames between the original ones to make motion in your videos smoother and more fluid.") gr.Markdown("⚠️ **Notice:** Keep input videos under 60 seconds for frame interpolation to prevent GPU task aborts.") with gr.Row(): # --- Left Column: Inputs & Settings --- with gr.Column(scale=1): input_vid = gr.Video(label="🎬 Input Source Video", sources=["upload"]) with gr.Group(): multi_param = gr.Dropdown( choices=[2, 3, 4, 5, 6], value=2, label="🗃️ Frame Multiplier", info="2X = Double FPS (e.g. 30 -> 60). Higher multipliers create more intermediate frames." ) with gr.Accordion("🛠️ Advanced Configuration", open=False): gr.Markdown("Control rendering parameters.") with gr.Row(): scale_param = gr.Dropdown( choices=[0.25, 0.5, 1.0, 2.0, 4.0], value=1.0, label="📉 Render Scale", info="1.0 = Original Resolution. Reduce to 0.5 for faster processing on 4K content." ) fps_param = gr.Number( value=0, label="🎯 Force Output FPS", info="0 = Auto-calculate. Set to 30 or 60 to lock the framerate. Audio will be removed when forcing FPS" ) exp_param = gr.Number( value=1, label="🔢 Exponent Power", info="Alternative multiplier calculation (2^exp)." ) with gr.Row(): skip_chk = gr.Checkbox( label="⏩ Skip Static Frames", value=False, info="Bypass processing for static frames to save time." ) montage_chk = gr.Checkbox( label="🆚 Split-Screen Comparison", value=False, info="Output video showing Original vs. Processed." ) btn_run = gr.Button("GENERATE INTERMEDIATE FRAMES", variant="primary", size="lg") # --- Right Column: Output --- with gr.Column(scale=1): output_vid = gr.Video(label="INTERPOLATED RESULT") gr.Markdown("**Status:** Rendering time depends on input resolution and duration.") # --- Bind Logic --- btn_run.click( fn=run_rife, inputs=[ input_vid, multi_param, exp_param, fps_param, scale_param, skip_chk, montage_chk ], outputs=output_vid ) if __name__ == "__main__": app.launch( theme=gr.themes.Soft(), mcp_server=True, )