Skip to content
Open
124 changes: 113 additions & 11 deletions pufferlib/ocean/drive/drive.h
Original file line number Diff line number Diff line change
Expand Up @@ -2824,16 +2824,105 @@ void compute_observations(Drive *env) {
}
}

// Find a random collision-free position on a drivable lane for an existing agent.
// Returns true if a valid position was found and updates the agent's sim_x/y/z/heading.
static bool randomize_agent_position(Drive *env, int agent_idx) {
Agent *agent = &env->agents[agent_idx];

// Pre-compute drivable lanes
int drivable_lanes[env->num_roads];
float lane_lengths[env->num_roads];
int num_drivable = 0;
float total_lane_length = 0.0f;
for (int i = 0; i < env->num_roads; i++) {
if (env->road_elements[i].type == ROAD_LANE && env->road_elements[i].polyline_length > 0.0f) {
drivable_lanes[num_drivable] = i;
lane_lengths[num_drivable] = env->road_elements[i].polyline_length;
total_lane_length += lane_lengths[num_drivable];
num_drivable++;
}
}

if (num_drivable == 0)
Comment on lines +2937 to +2951
Copy link

Copilot AI Mar 29, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

randomize_agent_position allocates drivable_lanes/lane_lengths as VLAs sized by env->num_roads on the stack and recomputes drivable lanes each call. Since env->num_roads can be large and this function may run frequently (respawns/resets), this can cause excessive stack usage and unnecessary work. Prefer reusing env->num_drivable, env->drivable_lane_indices, env->drivable_lane_lengths, and env->total_drivable_lane_length computed in compute_drivable_lane_points() (or allocate once on the heap) instead of per-call stack arrays.

Suggested change
// Pre-compute drivable lanes
int drivable_lanes[env->num_roads];
float lane_lengths[env->num_roads];
int num_drivable = 0;
float total_lane_length = 0.0f;
for (int i = 0; i < env->num_roads; i++) {
if (env->road_elements[i].type == ROAD_LANE && env->road_elements[i].polyline_length > 0.0f) {
drivable_lanes[num_drivable] = i;
lane_lengths[num_drivable] = env->road_elements[i].polyline_length;
total_lane_length += lane_lengths[num_drivable];
num_drivable++;
}
}
if (num_drivable == 0)
// Use pre-computed drivable lanes from the environment to avoid per-call VLAs and recomputation.
int num_drivable = env->num_drivable;
float total_lane_length = env->total_drivable_lane_length;
int *drivable_lanes = env->drivable_lane_indices;
float *lane_lengths = env->drivable_lane_lengths;
if (num_drivable <= 0 || total_lane_length <= 0.0f || drivable_lanes == NULL || lane_lengths == NULL)

Copilot uses AI. Check for mistakes.
return false;

for (int attempt = 0; attempt < MAX_SPAWN_ATTEMPTS; attempt++) {
// Length-weighted lane selection
float r = ((float)rand() / (float)RAND_MAX) * total_lane_length;
float cumulative = 0.0f;
int selected = num_drivable - 1;
for (int k = 0; k < num_drivable; k++) {
cumulative += lane_lengths[k];
if (r < cumulative) {
selected = k;
break;
}
}
RoadMapElement *lane = &env->road_elements[drivable_lanes[selected]];

float spawn_x, spawn_y, spawn_z, spawn_heading;
get_random_point_on_lane(lane, &spawn_x, &spawn_y, &spawn_z, &spawn_heading);
spawn_z += agent->sim_height / 2.0f;

// Temporarily invalidate this agent so check_spawn_collision skips it
float saved_x = agent->sim_x;
agent->sim_x = INVALID_POSITION;
bool collision = check_spawn_collision(env, env->active_agent_count, spawn_x, spawn_y, spawn_z, spawn_heading,
agent->sim_length, agent->sim_width, agent->sim_height);
agent->sim_x = saved_x;
if (collision)
continue;

// Check offroad
if (check_spawn_offroad(env, spawn_x, spawn_y, spawn_z, spawn_heading, agent->sim_length, agent->sim_width,
agent->sim_height))
continue;

agent->sim_x = spawn_x;
agent->sim_y = spawn_y;
agent->sim_z = spawn_z;
agent->sim_heading = spawn_heading;
agent->heading_x = cosf(spawn_heading);
agent->heading_y = sinf(spawn_heading);
// Update stored initial position so future non-random resets are consistent
agent->log_trajectory_x[0] = spawn_x;
agent->log_trajectory_y[0] = spawn_y;
agent->log_trajectory_z[0] = spawn_z;
agent->log_heading[0] = spawn_heading;
return true;
}
return false;
}

void respawn_agent(Drive *env, int agent_idx) {
Agent *agent = &env->agents[agent_idx];
agent->sim_x = agent->log_trajectory_x[0];
agent->sim_y = agent->log_trajectory_y[0];
agent->sim_z = agent->log_trajectory_z[0];
agent->sim_heading = agent->log_heading[0];
agent->heading_x = cosf(agent->sim_heading);
agent->heading_y = sinf(agent->sim_heading);
agent->sim_vx = agent->log_velocity_x[0];
agent->sim_vy = agent->log_velocity_y[0];

if (env->init_mode == INIT_VARIABLE_AGENT_NUMBER) {
if (!randomize_agent_position(env, agent_idx)) {
// Fallback to original position if no valid spawn found
agent->sim_x = agent->log_trajectory_x[0];
agent->sim_y = agent->log_trajectory_y[0];
agent->sim_z = agent->log_trajectory_z[0];
agent->sim_heading = agent->log_heading[0];
agent->heading_x = cosf(agent->sim_heading);
agent->heading_y = sinf(agent->sim_heading);
}
// Sample a new goal relative to the new position
sample_new_goal(env, agent_idx);
agent->sim_vx = 0.0f;
agent->sim_vy = 0.0f;
agent->sim_speed = 0.0f;
agent->sim_speed_signed = 0.0f;
} else {
agent->sim_x = agent->log_trajectory_x[0];
agent->sim_y = agent->log_trajectory_y[0];
agent->sim_z = agent->log_trajectory_z[0];
agent->sim_heading = agent->log_heading[0];
agent->heading_x = cosf(agent->sim_heading);
agent->heading_y = sinf(agent->sim_heading);
agent->sim_vx = agent->log_velocity_x[0];
agent->sim_vy = agent->log_velocity_y[0];
}
agent->metrics_array[COLLISION_IDX] = 0.0f;
agent->metrics_array[OFFROAD_IDX] = 0.0f;
agent->metrics_array[REACHED_GOAL_IDX] = 0.0f;
Expand Down Expand Up @@ -3097,8 +3186,21 @@ void move_dynamics(Drive *env, int action_idx, int agent_idx) {

void c_reset(Drive *env) {
env->timestep = env->init_steps;
set_start_position(env);
reset_goal_positions(env);
if (env->init_mode == INIT_VARIABLE_AGENT_NUMBER) {
// Randomize all agent positions on reset
for (int x = 0; x < env->active_agent_count; x++) {
int agent_idx = env->active_agent_indices[x];
randomize_agent_position(env, agent_idx);
}
Comment on lines +3298 to +3301
Copy link

Copilot AI Mar 29, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In c_reset() variable-agent mode, the return value of randomize_agent_position() is ignored. If it fails to find a valid spawn, the agent will keep its prior episode position (which could be offroad/colliding), and the subsequent sample_new_goal() will be based on that stale state. Please check the return value and apply a deterministic fallback (e.g., retry with different constraints, respawn to log_trajectory_*[0], or mark/remove the agent) so resets are reliable.

Copilot uses AI. Check for mistakes.
Copy link

Copilot AI Mar 29, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In c_reset() variable-agent mode, positions are randomized but per-agent dynamics state (e.g., sim_vx/sim_vy, speed, accelerations, steering) is never reset. This means agents can start a new episode with leftover velocities from the previous episode even though their pose changed. Reset the dynamics fields after randomize_agent_position() (similar to respawn_agent()’s variable-mode branch) to avoid carry-over behavior and immediate post-reset collisions.

Suggested change
}
}
// Reset dynamics state after positions have been randomized to avoid carry-over
for (int x = 0; x < env->active_agent_count; x++) {
int agent_idx = env->active_agent_indices[x];
Agent *agent = &env->agents[agent_idx];
agent->sim_vx = 0.0f;
agent->sim_vy = 0.0f;
agent->sim_speed = 0.0f;
agent->sim_ax = 0.0f;
agent->sim_ay = 0.0f;
agent->steering = 0.0f;
}

Copilot uses AI. Check for mistakes.
// Sample new goals relative to new positions
for (int x = 0; x < env->active_agent_count; x++) {
int agent_idx = env->active_agent_indices[x];
sample_new_goal(env, agent_idx);
}
Comment on lines +3302 to +3306
Copy link

Copilot AI Mar 29, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

sample_new_goal() increments agent->goals_sampled_this_episode, but c_reset() later unconditionally sets goals_sampled_this_episode = 1.0f for all agents. In variable-agent mode this means the initial goal sampling done here will be double-counted or discarded depending on ordering. Consider moving goal sampling to after the per-agent reset loop (or resetting goals_sampled_this_episode appropriately before sampling) so the counter reflects the actual number of sampled goals.

Copilot uses AI. Check for mistakes.
Copy link

Copilot AI Mar 29, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This PR changes reset/respawn semantics in INIT_VARIABLE_AGENT_NUMBER (new random collision-free spawn on both c_reset() and respawn_agent()), but there doesn’t appear to be an automated regression test covering this behavior (e.g., positions change across resets in the same env instance and respawns remain collision-free). Adding a Drive test would help prevent future regressions, similar to existing spawn-diversity tests in tests/test_rand_seed_bug.py.

Copilot uses AI. Check for mistakes.
} else {
set_start_position(env);
reset_goal_positions(env);
}
for (int x = 0; x < env->active_agent_count; x++) {
env->logs[x] = (Log){0};
int agent_idx = env->active_agent_indices[x];
Expand Down Expand Up @@ -3128,7 +3230,7 @@ void c_reset(Drive *env) {
agent->prev_goal_z = agent->sim_z;
generate_reward_coefs(env, agent);

if (env->goal_behavior == GOAL_GENERATE_NEW) {
if (env->goal_behavior == GOAL_GENERATE_NEW && env->init_mode != INIT_VARIABLE_AGENT_NUMBER) {
agent->goal_position_x = agent->init_goal_x;
agent->goal_position_y = agent->init_goal_y;
agent->goal_position_z = agent->init_goal_z;
Expand Down
Loading