NaviDriveVLM: Decoupling High-Level Reasoning and Motion Planning for Autonomous Driving
Paper • 2603.07901 • Published • 5
This model is a fine-tuned version of Qwen3-VL-2B-Instruct, specialized for autonomous driving reasoning.
import re
import torch
from transformers import AutoModelForImageTextToText, AutoProcessor
from datasets import load_dataset
# 1. Load Model & Processor
model_id = "Ximeng0831/NaviDrive-Qwen3-VL-2B-SFT"
model = AutoModelForImageTextToText.from_pretrained(
model_id, torch_dtype="auto", device_map="auto"
)
processor = AutoProcessor.from_pretrained(model_id)
# 2. Load Dataset
def filter_to_xy_str(original_str):
pattern = r"\(([^)]+)\)"
matches = re.findall(pattern, original_str)
return ", ".join([f"({p.split(',')[0].strip()}, {p.split(',')[1].strip()})" if len(p.split(',')) > 2 else f"({p})" for p in matches])
ds = load_dataset("Ximeng0831/NaviDrive-Reasoning", "qwen_32b", split="validation")
data = ds[100] # Example index
# 3. Construct the Prompt
system_prompt = """You are an expert autonomous driving planning module (Driver). Your goal is to output a safe, smooth, and kinematically feasible future trajectory.
Rules:
1. Coordinate System: Current ego position is (0,0). X-axis positive is forward, Y-axis positive is left.
2. Trajectory Timing: Output exactly 12 waypoints (except origin (0,0)) representing the next 6 seconds (sampled at 2Hz, 0.5s intervals).
3. Kinematic Constraints: Ensure the gaps between waypoints are consistent with the current velocity and acceleration. Avoid sudden jumps or unrealistic lateral shifts.
4. Safety Alignment: The trajectory must strictly follow the Navigator's safety analysis.
"""
reason = data['reasons'][0]
ego_status_prompt = (
"Current Dynamics:\n"
f"- Velocity: {data['vel_val']:.2f} m/s\n"
f"- Yaw Rate: {data['yr_val']:.2f} rad/s\n"
f"- Acceleration (Longitudinal x, Lateral y): {data['acc_val']}\n"
f"Past Trajectory (2Hz): {filter_to_xy_str(data['wp_past'])}\n"
f"High-level Command: {data['command']}\n\n"
)
driver_user_prompt = """
Predict the next 12 waypoints: (x1, y1), (x2, y2), ..., (x12, y12).
"""
full_driver_prompt = (
f"{reason}\n\n"
f"{ego_status_prompt}"
f"{driver_user_prompt}"
)
prompt_messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": full_driver_prompt}
]
# 4. Inference
prompt_text = processor.apply_chat_template(prompt_messages, tokenize=False, add_generation_prompt=True)
inputs = processor(text=[prompt_text], padding=True, return_tensors="pt").to(model.device)
output_ids = model.generate(**inputs,
max_new_tokens=1024,
do_sample=True,
temperature=0.7,
top_p=0.8,
num_return_sequences=6)
input_len = inputs["input_ids"].shape[1]
for i in range(6):
raw_output = processor.decode(output_ids[i][input_len:], skip_special_tokens=True)
print(f"Output {i}: {raw_output}\n")
print("Ground Truth: ", filter_to_xy_str(data['wp_future']))
Note Images are not included in the examples or dataset—only relative image paths are provided.
To use image inputs, please download the nuScenes Dataset and specify the dataset root path to properly load the images.
The model is fine-tuned in a fully supervised manner using a single NVIDIA RTX 4090 GPU.
For detailed training hyperparameters and configuration settings, please refer to the configuration file.
This model is based on the Qwen3-VL-2B-Instruct.