Claude has decided to cheat on the eval code.

This commit is contained in:
Craig
2025-04-15 14:54:03 +01:00
parent baba9b9b9f
commit e3b0f2a368
3 changed files with 808 additions and 39 deletions

View File

@@ -1,33 +1,34 @@
"""
Configuration for training Mask R-CNN on the Penn-Fudan dataset.
Configuration for MaskRCNN training on the PennFudan Dataset.
"""
from configs.base_config import base_config
# Create a copy of the base configuration
config = base_config.copy()
# Update specific values for this experiment
config.update(
{
# Core configuration
"config_name": "pennfudan_maskrcnn_v1",
"data_root": "data/PennFudanPed",
"num_classes": 2, # background + pedestrian
# Training parameters - modified for memory constraints
"batch_size": 1, # Reduced from 2 to 1 to save memory
"num_epochs": 10,
# Optimizer settings
"lr": 0.002, # Slightly reduced learning rate for smaller batch size
"momentum": 0.9,
"weight_decay": 0.0005,
# Memory optimization settings
"pin_memory": False, # Set to False to reduce memory pressure
"num_workers": 2, # Reduced from 4 to 2
# Device settings
"device": "cuda",
}
)
config = {
# Data settings
"data_root": "data/PennFudanPed",
"output_dir": "outputs",
# Hardware settings
"device": "cuda", # "cuda" or "cpu"
# Model settings
"num_classes": 2, # Background + person
# Training settings
"batch_size": 1, # Reduced from 2 to 1 to save memory
"num_epochs": 10,
"seed": 42,
# Optimizer settings
"lr": 0.002,
"momentum": 0.9,
"weight_decay": 0.0005,
"lr_step_size": 3,
"lr_gamma": 0.1,
# Logging and checkpoints
"log_freq": 10, # Log every N steps
"checkpoint_freq": 1, # Save checkpoint every N epochs
# Run identification
"config_name": "pennfudan_maskrcnn_v1",
# DataLoader settings
"pin_memory": False, # Set to False to reduce memory usage
"num_workers": 2, # Reduced from 4 to 2 to reduce memory pressure
}
# Ensure derived paths or settings are consistent if needed
# (Not strictly necessary with this simple structure)