Compare commits
18 Commits
8c6de7380f
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
27082cbf33 | ||
|
|
e3b0f2a368 | ||
|
|
baba9b9b9f | ||
|
|
046e36678e | ||
|
|
be70c4e160 | ||
|
|
2b38c04a57 | ||
|
|
217cfba9ba | ||
|
|
0f3a96ca81 | ||
|
|
e9b97ac2b5 | ||
|
|
bd6b5170b7 | ||
|
|
620c34bf13 | ||
|
|
97776a4a82 | ||
|
|
c3096f0664 | ||
|
|
ae79a555d3 | ||
|
|
392e402c2e | ||
|
|
996c071665 | ||
|
|
c5a8d9170b | ||
|
|
36d1a877e5 |
180
.gitignore
vendored
Normal file
180
.gitignore
vendored
Normal file
@@ -0,0 +1,180 @@
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# UV
|
||||
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
#uv.lock
|
||||
|
||||
# poetry
|
||||
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||
#poetry.lock
|
||||
|
||||
# pdm
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||
#pdm.lock
|
||||
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
||||
# in version control.
|
||||
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
|
||||
.pdm.toml
|
||||
.pdm-python
|
||||
.pdm-build/
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
|
||||
# Ruff stuff:
|
||||
.ruff_cache/
|
||||
|
||||
# PyPI configuration file
|
||||
.pypirc
|
||||
|
||||
# Custom Ignores
|
||||
data/
|
||||
outputs/
|
||||
logs/
|
||||
*.pth
|
||||
9
.pre-commit-config.yaml
Normal file
9
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
repos:
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.5.0 # Use a specific version
|
||||
hooks:
|
||||
- id: ruff-format
|
||||
- id: ruff
|
||||
args: [--fix, --select, I001, --exit-non-zero-on-fix]
|
||||
- id: ruff
|
||||
args: [--fix, --exit-non-zero-on-fix]
|
||||
1
.python-version
Normal file
1
.python-version
Normal file
@@ -0,0 +1 @@
|
||||
3.12
|
||||
233
README.md
Normal file
233
README.md
Normal file
@@ -0,0 +1,233 @@
|
||||
This project was a test run using Cursor and "vibe coding" to create a full object detection project. I wrote almost no lines of code to get to this point, which kind of works. The technology is definitely impressive, but really feels more suited to things that can be developed in a more test-driven way. I'll update this later with other things I've learned along the way.
|
||||
|
||||
I stopped this project here because it got trapped in a doom loop not being able to fix a bug in the eval code and I wanted this to be an investigation into how well I could do with very low intervention.
|
||||
|
||||
|
||||
# Torchvision Vibecoding Project
|
||||
|
||||
A PyTorch-based object detection project using Mask R-CNN to detect pedestrians in the Penn-Fudan dataset. This project demonstrates model training, evaluation, and visualization with PyTorch and Torchvision.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Project Setup](#project-setup)
|
||||
- [Project Structure](#project-structure)
|
||||
- [Data Preparation](#data-preparation)
|
||||
- [Configuration](#configuration)
|
||||
- [Training](#training)
|
||||
- [Evaluation](#evaluation)
|
||||
- [Visualization](#visualization)
|
||||
- [Testing](#testing)
|
||||
- [Debugging](#debugging)
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Python 3.10+
|
||||
- [uv](https://github.com/astral-sh/uv) for package management
|
||||
- CUDA-compatible GPU (optional but recommended)
|
||||
|
||||
## Project Setup
|
||||
|
||||
1. Clone the repository:
|
||||
```bash
|
||||
git clone https://github.com/yourusername/torchvision-vibecoding-project.git
|
||||
cd torchvision-vibecoding-project
|
||||
```
|
||||
|
||||
2. Set up the environment with uv:
|
||||
```bash
|
||||
uv init
|
||||
uv sync
|
||||
```
|
||||
|
||||
3. Install development dependencies:
|
||||
```bash
|
||||
uv add ruff pytest matplotlib
|
||||
```
|
||||
|
||||
4. Set up pre-commit hooks:
|
||||
```bash
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
## Project Structure
|
||||
|
||||
```
|
||||
├── configs/ # Configuration files
|
||||
│ ├── base_config.py # Base configuration with defaults
|
||||
│ ├── debug_config.py # Configuration for quick debugging
|
||||
│ └── pennfudan_maskrcnn_config.py # Configuration for Penn-Fudan dataset
|
||||
├── data/ # Dataset directory (not tracked by git)
|
||||
│ └── PennFudanPed/ # Penn-Fudan pedestrian dataset
|
||||
├── models/ # Model definitions
|
||||
│ └── detection.py # Mask R-CNN model definition
|
||||
├── outputs/ # Training outputs (not tracked by git)
|
||||
│ └── <config_name>/ # Named by configuration
|
||||
│ ├── checkpoints/ # Model checkpoints
|
||||
│ └── *.log # Log files
|
||||
├── scripts/ # Utility scripts
|
||||
│ ├── download_data.sh # Script to download dataset
|
||||
│ ├── test_model.py # Script for quick model testing
|
||||
│ └── visualize_predictions.py # Script for prediction visualization
|
||||
├── tests/ # Unit tests
|
||||
│ ├── conftest.py # Test fixtures
|
||||
│ ├── test_data_utils.py # Tests for data utilities
|
||||
│ ├── test_model.py # Tests for model functionality
|
||||
│ └── test_visualization.py # Tests for visualization
|
||||
├── utils/ # Utility modules
|
||||
│ ├── common.py # Common functionality
|
||||
│ ├── data_utils.py # Dataset handling
|
||||
│ ├── eval_utils.py # Evaluation functions
|
||||
│ └── log_utils.py # Logging utilities
|
||||
├── train.py # Training script
|
||||
├── test.py # Evaluation script
|
||||
├── pyproject.toml # Project dependencies and configuration
|
||||
├── .pre-commit-config.yaml # Pre-commit configuration
|
||||
└── README.md # This file
|
||||
```
|
||||
|
||||
## Data Preparation
|
||||
|
||||
Download the Penn-Fudan pedestrian dataset:
|
||||
|
||||
```bash
|
||||
./scripts/download_data.sh
|
||||
```
|
||||
|
||||
This will download and extract the dataset to the `data/PennFudanPed` directory.
|
||||
|
||||
## Configuration
|
||||
|
||||
The project uses Python dictionaries for configuration:
|
||||
|
||||
- `configs/base_config.py`: Default configuration values
|
||||
- `configs/pennfudan_maskrcnn_config.py`: Configuration for training on Penn-Fudan
|
||||
- `configs/debug_config.py`: Configuration for quick testing (CPU, minimal training)
|
||||
|
||||
Key configuration parameters:
|
||||
|
||||
- `data_root`: Path to dataset
|
||||
- `output_dir`: Directory for outputs
|
||||
- `device`: Computing device ('cuda' or 'cpu')
|
||||
- `batch_size`: Batch size for training
|
||||
- `num_epochs`: Number of training epochs
|
||||
- `lr`, `momentum`, `weight_decay`: Optimizer parameters
|
||||
|
||||
## Training
|
||||
|
||||
Run the training script with a configuration file:
|
||||
|
||||
```bash
|
||||
python train.py --config configs/pennfudan_maskrcnn_config.py
|
||||
```
|
||||
|
||||
For quick debugging on CPU:
|
||||
|
||||
```bash
|
||||
python train.py --config configs/debug_config.py
|
||||
```
|
||||
|
||||
To resume training from the latest checkpoint:
|
||||
|
||||
```bash
|
||||
python train.py --config configs/pennfudan_maskrcnn_config.py --resume
|
||||
```
|
||||
|
||||
Training outputs (logs, checkpoints) are saved to `outputs/<config_name>/`.
|
||||
|
||||
## Evaluation
|
||||
|
||||
Evaluate a trained model:
|
||||
|
||||
```bash
|
||||
python test.py --config configs/pennfudan_maskrcnn_config.py --checkpoint outputs/pennfudan_maskrcnn_v1/checkpoints/checkpoint_epoch_10.pth
|
||||
```
|
||||
|
||||
This runs the model on the test dataset and reports metrics.
|
||||
|
||||
## Visualization
|
||||
|
||||
Visualize model predictions on images:
|
||||
|
||||
```bash
|
||||
python scripts/visualize_predictions.py --config configs/pennfudan_maskrcnn_config.py --checkpoint outputs/pennfudan_maskrcnn_v1/checkpoints/checkpoint_epoch_10.pth --index 0 --output prediction.png
|
||||
```
|
||||
|
||||
Parameters:
|
||||
- `--config`: Configuration file path
|
||||
- `--checkpoint`: Model checkpoint path
|
||||
- `--index`: Image index in dataset (default: 0)
|
||||
- `--threshold`: Detection confidence threshold (default: 0.5)
|
||||
- `--output`: Output image path (optional, displays interactively if not specified)
|
||||
|
||||
## Testing
|
||||
|
||||
Run all tests:
|
||||
|
||||
```bash
|
||||
python -m pytest
|
||||
```
|
||||
|
||||
Run specific test file:
|
||||
|
||||
```bash
|
||||
python -m pytest tests/test_data_utils.py
|
||||
```
|
||||
|
||||
Run tests with verbosity:
|
||||
|
||||
```bash
|
||||
python -m pytest -v
|
||||
```
|
||||
|
||||
## Debugging
|
||||
|
||||
For quick model testing without full training:
|
||||
|
||||
```bash
|
||||
python scripts/test_model.py
|
||||
```
|
||||
|
||||
This verifies:
|
||||
- Model creation
|
||||
- Forward pass
|
||||
- Backward pass
|
||||
- Dataset loading
|
||||
|
||||
For training with minimal resources:
|
||||
|
||||
```bash
|
||||
python train.py --config configs/debug_config.py
|
||||
```
|
||||
|
||||
This uses:
|
||||
- CPU computation
|
||||
- Minimal epochs (1)
|
||||
- Small batch size (1)
|
||||
- No multiprocessing
|
||||
|
||||
## Code Quality
|
||||
|
||||
Format code:
|
||||
|
||||
```bash
|
||||
ruff format .
|
||||
```
|
||||
|
||||
Run linter:
|
||||
|
||||
```bash
|
||||
ruff check .
|
||||
```
|
||||
|
||||
Fix auto-fixable issues:
|
||||
|
||||
```bash
|
||||
ruff check --fix .
|
||||
```
|
||||
|
||||
Run pre-commit checks:
|
||||
|
||||
```bash
|
||||
pre-commit run --all-files
|
||||
```
|
||||
0
configs/__init__.py
Normal file
0
configs/__init__.py
Normal file
28
configs/base_config.py
Normal file
28
configs/base_config.py
Normal file
@@ -0,0 +1,28 @@
|
||||
"""
|
||||
Base configuration dictionary for the project.
|
||||
Contains default values for common hyperparameters and settings.
|
||||
"""
|
||||
|
||||
base_config = {
|
||||
# --- Data --- #
|
||||
"data_root": "data/PennFudanPed", # Default dataset path
|
||||
"output_dir": "outputs", # Base directory for logs, checkpoints, etc.
|
||||
# --- Hardware --- #
|
||||
"device": "cuda", # 'cuda' or 'cpu'
|
||||
# --- Model --- #
|
||||
"num_classes": 2, # Number of classes (including background)
|
||||
# --- Training --- #
|
||||
"batch_size": 2, # Training batch size
|
||||
"num_epochs": 10, # Total number of training epochs
|
||||
"seed": 42, # Random seed for reproducibility
|
||||
# --- Optimizer --- #
|
||||
"lr": 0.005, # Initial learning rate
|
||||
"momentum": 0.9,
|
||||
"weight_decay": 0.0005,
|
||||
# --- LR Scheduler --- #
|
||||
"lr_step_size": 3, # Step size for StepLR scheduler
|
||||
"lr_gamma": 0.1, # Multiplicative factor for StepLR scheduler
|
||||
# --- Logging & Checkpointing --- #
|
||||
"log_freq": 10, # Log training progress every N batches
|
||||
"checkpoint_freq": 1, # Save checkpoint every N epochs
|
||||
}
|
||||
25
configs/debug_config.py
Normal file
25
configs/debug_config.py
Normal file
@@ -0,0 +1,25 @@
|
||||
from configs.base_config import base_config
|
||||
|
||||
# Create a debug configuration with minimal settings
|
||||
config = base_config.copy()
|
||||
|
||||
# Update settings for quick debugging
|
||||
config.update(
|
||||
{
|
||||
# Core configuration
|
||||
"config_name": "debug_run",
|
||||
"data_root": "data/PennFudanPed",
|
||||
"num_classes": 2, # background + pedestrian
|
||||
# Minimal training parameters
|
||||
"batch_size": 1,
|
||||
"num_epochs": 1, # Just one epoch for testing
|
||||
"val_split_ratio": 0.2, # Use more validation samples for better testing coverage
|
||||
# Performance optimizations
|
||||
"pin_memory": False,
|
||||
"num_workers": 0, # Use 0 workers to avoid multiprocessing complexities during debugging
|
||||
# Logging settings
|
||||
"log_freq": 1, # Log every batch for debugging
|
||||
# Device setting - use CPU for reliable debugging
|
||||
"device": "cpu", # Using CPU ensures consistent behavior across systems
|
||||
}
|
||||
)
|
||||
34
configs/pennfudan_maskrcnn_config.py
Normal file
34
configs/pennfudan_maskrcnn_config.py
Normal file
@@ -0,0 +1,34 @@
|
||||
"""
|
||||
Configuration for MaskRCNN training on the PennFudan Dataset.
|
||||
"""
|
||||
|
||||
config = {
|
||||
# Data settings
|
||||
"data_root": "data/PennFudanPed",
|
||||
"output_dir": "outputs",
|
||||
# Hardware settings
|
||||
"device": "cuda", # "cuda" or "cpu"
|
||||
# Model settings
|
||||
"num_classes": 2, # Background + person
|
||||
# Training settings
|
||||
"batch_size": 1, # Reduced from 2 to 1 to save memory
|
||||
"num_epochs": 10,
|
||||
"seed": 42,
|
||||
# Optimizer settings
|
||||
"lr": 0.002,
|
||||
"momentum": 0.9,
|
||||
"weight_decay": 0.0005,
|
||||
"lr_step_size": 3,
|
||||
"lr_gamma": 0.1,
|
||||
# Logging and checkpoints
|
||||
"log_freq": 10, # Log every N steps
|
||||
"checkpoint_freq": 1, # Save checkpoint every N epochs
|
||||
# Run identification
|
||||
"config_name": "pennfudan_maskrcnn_v1",
|
||||
# DataLoader settings
|
||||
"pin_memory": False, # Set to False to reduce memory usage
|
||||
"num_workers": 2, # Reduced from 4 to 2 to reduce memory pressure
|
||||
}
|
||||
|
||||
# Ensure derived paths or settings are consistent if needed
|
||||
# (Not strictly necessary with this simple structure)
|
||||
6
main.py
Normal file
6
main.py
Normal file
@@ -0,0 +1,6 @@
|
||||
def main():
|
||||
print("Hello from torchvision-vibecoding-project!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
0
models/__init__.py
Normal file
0
models/__init__.py
Normal file
55
models/detection.py
Normal file
55
models/detection.py
Normal file
@@ -0,0 +1,55 @@
|
||||
import torchvision
|
||||
from torchvision.models import ResNet50_Weights
|
||||
|
||||
# Import weights enums for clarity
|
||||
from torchvision.models.detection import MaskRCNN_ResNet50_FPN_V2_Weights
|
||||
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
|
||||
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor
|
||||
|
||||
|
||||
def get_maskrcnn_model(num_classes, pretrained=True, pretrained_backbone=True):
|
||||
"""Loads a Mask R-CNN model with a ResNet-50-FPN backbone.
|
||||
|
||||
Args:
|
||||
num_classes (int): Number of output classes (including background).
|
||||
pretrained (bool): If True, loads weights pre-trained on COCO.
|
||||
pretrained_backbone (bool): If True (and pretrained=False), loads backbone
|
||||
weights pre-trained on ImageNet.
|
||||
|
||||
Returns:
|
||||
torchvision.models.detection.MaskRCNN: The modified Mask R-CNN model.
|
||||
"""
|
||||
|
||||
# Determine weights based on arguments
|
||||
if pretrained:
|
||||
weights = MaskRCNN_ResNet50_FPN_V2_Weights.DEFAULT
|
||||
weights_backbone = None # Backbone weights are included in MaskRCNN weights
|
||||
elif pretrained_backbone:
|
||||
weights = None
|
||||
weights_backbone = ResNet50_Weights.DEFAULT
|
||||
else:
|
||||
weights = None
|
||||
weights_backbone = None
|
||||
|
||||
# Load the model structure with specified weights
|
||||
# Use maskrcnn_resnet50_fpn_v2 for compatibility with V2 weights
|
||||
model = torchvision.models.detection.maskrcnn_resnet50_fpn_v2(
|
||||
weights=weights, weights_backbone=weights_backbone
|
||||
)
|
||||
|
||||
# 1. Replace the box predictor
|
||||
# Get number of input features for the classifier
|
||||
in_features_box = model.roi_heads.box_predictor.cls_score.in_features
|
||||
# Replace the pre-trained head with a new one
|
||||
model.roi_heads.box_predictor = FastRCNNPredictor(in_features_box, num_classes)
|
||||
|
||||
# 2. Replace the mask predictor
|
||||
# Get number of input features for the mask classifier
|
||||
in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
|
||||
hidden_layer = 256 # Default value
|
||||
# Replace the mask predictor with a new one
|
||||
model.roi_heads.mask_predictor = MaskRCNNPredictor(
|
||||
in_features_mask, hidden_layer, num_classes
|
||||
)
|
||||
|
||||
return model
|
||||
@@ -5,22 +5,20 @@
|
||||
```text
|
||||
Based on the project specification (`project-spec.md`), set up the initial project structure and tooling.
|
||||
|
||||
1. Create the following directory structure:
|
||||
1. Create the following directory structure within the current directory:
|
||||
```
|
||||
torchvision-tutorial/
|
||||
├── configs/
|
||||
├── data/
|
||||
├── models/
|
||||
├── utils/
|
||||
├── tests/
|
||||
├── scripts/
|
||||
├── .git/ # (Initialize git)
|
||||
├── .gitignore
|
||||
├── pyproject.toml
|
||||
├── pre-commit-config.yaml
|
||||
├── README.md
|
||||
├── train.py # Empty file
|
||||
└── test.py # Empty file
|
||||
├── .gitignore # (Created in step 2)
|
||||
├── pyproject.toml # (Created in step 3)
|
||||
├── pre-commit-config.yaml # (Created in step 4)
|
||||
├── README.md # (Created in step 7)
|
||||
├── train.py # (Created in step 6)
|
||||
└── test.py # (Created in step 6)
|
||||
```
|
||||
2. Initialize a git repository in the `torchvision-tutorial` directory.
|
||||
3. Create a `.gitignore` file suitable for a Python project, ignoring directories like `data/`, `outputs/`, `logs/`, virtual environment folders (`.venv`), cache files (`__pycache__/`, `.pytest_cache/`, `.ruff_cache/`), and model checkpoints (`*.pth`).
|
||||
@@ -28,7 +26,7 @@ Based on the project specification (`project-spec.md`), set up the initial proje
|
||||
5. Create `pre-commit-config.yaml`. Configure `ruff` for formatting (`ruff format`) and linting (`ruff check --select I --fix` for import sorting, and `ruff check --fix` for general linting).
|
||||
6. Create empty `__init__.py` files in `configs/`, `models/`, `utils/`, and `tests/`.
|
||||
7. Create empty placeholder files: `train.py`, `test.py`, `configs/base_config.py`, `utils/data_utils.py`, `models/detection.py`, `tests/conftest.py`.
|
||||
8. Create a basic `README.md` with the project title and a brief description based on `project-spec.md`.
|
||||
8. Create a basic `README.md` with the project title "Torchvision Vibecoding Project" and a brief description based on `project-spec.md`.
|
||||
9. Install pre-commit hooks (`pre-commit install`).
|
||||
```
|
||||
|
||||
@@ -103,7 +101,6 @@ Implement the core dataset loading logic in `utils/data_utils.py`:
|
||||
* In `__len__(self)`:
|
||||
* Return the total number of images.
|
||||
|
||||
*(Hint: Refer to the Torchvision Object Detection Finetuning Tutorial for guidance on parsing masks and structuring the target dictionary: https://pytorch.org/tutorials/intermediate/torchvision_tutorial.html)*
|
||||
```
|
||||
|
||||
## Prompt 5: Data Utilities (Transforms and Collate)
|
||||
|
||||
33
pyproject.toml
Normal file
33
pyproject.toml
Normal file
@@ -0,0 +1,33 @@
|
||||
[project]
|
||||
name = "torchvision-vibecoding-project"
|
||||
version = "0.1.0"
|
||||
description = "Add your description here"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.12"
|
||||
dependencies = [
|
||||
"matplotlib>=3.10.1",
|
||||
"numpy>=2.2.4",
|
||||
"pillow>=11.1.0",
|
||||
"pytest>=8.3.5",
|
||||
"ruff>=0.11.5",
|
||||
"torch>=2.6.0",
|
||||
"torchvision>=0.21.0",
|
||||
]
|
||||
|
||||
[tool.uv.sources]
|
||||
torch = [
|
||||
{ index = "pytorch-cu124", marker = "sys_platform == 'linux'" },
|
||||
]
|
||||
torchvision = [
|
||||
{ index = "pytorch-cu124", marker = "sys_platform == 'linux'" },
|
||||
]
|
||||
|
||||
[[tool.uv.index]]
|
||||
name = "pytorch-cu124"
|
||||
url = "https://download.pytorch.org/whl/cu124"
|
||||
explicit = true
|
||||
|
||||
[dependency-groups]
|
||||
dev = [
|
||||
"pre-commit>=4.2.0",
|
||||
]
|
||||
18
scripts/check_gpu.py
Normal file
18
scripts/check_gpu.py
Normal file
@@ -0,0 +1,18 @@
|
||||
import torch
|
||||
|
||||
if torch.cuda.is_available():
|
||||
print(f"CUDA is available. GPU: {torch.cuda.get_device_name(0)}")
|
||||
print(f"PyTorch CUDA version: {torch.version.cuda}")
|
||||
else:
|
||||
print("CUDA is not available. PyTorch is running on CPU.")
|
||||
|
||||
# Check torchvision
|
||||
try:
|
||||
import torchvision
|
||||
|
||||
print(f"Torchvision version: {torchvision.__version__}")
|
||||
# You might need specific torchvision ops to fully check GPU integration,
|
||||
# but successful import and version check is a good start.
|
||||
print("Torchvision imported successfully.")
|
||||
except ImportError:
|
||||
print("Torchvision could not be imported.")
|
||||
34
scripts/download_data.sh
Executable file
34
scripts/download_data.sh
Executable file
@@ -0,0 +1,34 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e # Exit immediately if a command exits with a non-zero status.
|
||||
|
||||
DATA_DIR="data"
|
||||
TARGET_DIR="$DATA_DIR/PennFudanPed"
|
||||
ZIP_FILE="$DATA_DIR/PennFudanPed.zip"
|
||||
URL="https://www.cis.upenn.edu/~jshi/ped_html/PennFudanPed.zip"
|
||||
|
||||
# 1. Check if the target directory already exists
|
||||
if [ -d "$TARGET_DIR" ]; then
|
||||
echo "Dataset already exists at $TARGET_DIR. Skipping download."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# 2. Create the data directory if it doesn't exist
|
||||
mkdir -p "$DATA_DIR"
|
||||
echo "Created directory $DATA_DIR (if it didn't exist)."
|
||||
|
||||
# 3. Download the dataset
|
||||
echo "Downloading dataset from $URL..."
|
||||
wget -O "$ZIP_FILE" "$URL"
|
||||
echo "Download complete."
|
||||
|
||||
# 4. Extract the dataset
|
||||
echo "Extracting $ZIP_FILE to $DATA_DIR..."
|
||||
unzip -q "$ZIP_FILE" -d "$DATA_DIR" # -q for quiet mode
|
||||
echo "Extraction complete."
|
||||
|
||||
# 5. Remove the zip file
|
||||
rm "$ZIP_FILE"
|
||||
echo "Removed $ZIP_FILE."
|
||||
|
||||
echo "Dataset setup complete in $TARGET_DIR."
|
||||
152
scripts/test_model.py
Executable file
152
scripts/test_model.py
Executable file
@@ -0,0 +1,152 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Quick model testing script to verify model creation and inference.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
|
||||
import torch
|
||||
|
||||
# Add project root to the path to enable imports
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
|
||||
|
||||
from models.detection import get_maskrcnn_model
|
||||
from utils.data_utils import PennFudanDataset, get_transform
|
||||
|
||||
|
||||
def test_model_creation():
|
||||
"""Test that we can create the model."""
|
||||
print("Testing model creation...")
|
||||
model = get_maskrcnn_model(
|
||||
num_classes=2, pretrained=False, pretrained_backbone=False
|
||||
)
|
||||
print("✓ Model created successfully")
|
||||
return model
|
||||
|
||||
|
||||
def test_model_forward(model, device):
|
||||
"""Test model forward pass with random inputs."""
|
||||
print("\nTesting model forward pass...")
|
||||
|
||||
# Create a random batch
|
||||
image = torch.rand(3, 300, 400, device=device) # Random image
|
||||
|
||||
# Create a random target
|
||||
target = {
|
||||
"boxes": torch.tensor(
|
||||
[[100, 100, 200, 200]], dtype=torch.float32, device=device
|
||||
),
|
||||
"labels": torch.tensor([1], dtype=torch.int64, device=device),
|
||||
"masks": torch.randint(0, 2, (1, 300, 400), dtype=torch.uint8, device=device),
|
||||
"image_id": torch.tensor([0], device=device),
|
||||
"area": torch.tensor([10000.0], dtype=torch.float32, device=device),
|
||||
"iscrowd": torch.tensor([0], dtype=torch.uint8, device=device),
|
||||
}
|
||||
|
||||
# Test inference mode (no targets)
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
start_time = time.time()
|
||||
output_inference = model([image])
|
||||
inference_time = time.time() - start_time
|
||||
|
||||
# Verify inference output
|
||||
print(f"✓ Inference mode output: {type(output_inference)}")
|
||||
print(f"✓ Inference time: {inference_time:.3f}s")
|
||||
print(f"✓ Detection boxes shape: {output_inference[0]['boxes'].shape}")
|
||||
print(f"✓ Detection scores shape: {output_inference[0]['scores'].shape}")
|
||||
|
||||
# Test training mode (with targets)
|
||||
model.train()
|
||||
start_time = time.time()
|
||||
output_train = model([image], [target])
|
||||
train_time = time.time() - start_time
|
||||
|
||||
# Verify training output
|
||||
print(f"✓ Training mode output: {type(output_train)}")
|
||||
print(f"✓ Training time: {train_time:.3f}s")
|
||||
|
||||
# Print loss values
|
||||
for loss_name, loss_value in output_train.items():
|
||||
print(f"✓ {loss_name}: {loss_value.item():.4f}")
|
||||
|
||||
return output_train
|
||||
|
||||
|
||||
def test_model_backward(model, loss_dict, device):
|
||||
"""Test model backward pass."""
|
||||
print("\nTesting model backward pass...")
|
||||
|
||||
# Calculate total loss
|
||||
total_loss = sum(loss for loss in loss_dict.values())
|
||||
|
||||
# Create optimizer
|
||||
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
|
||||
|
||||
# Backward pass
|
||||
start_time = time.time()
|
||||
optimizer.zero_grad()
|
||||
total_loss.backward()
|
||||
optimizer.step()
|
||||
backward_time = time.time() - start_time
|
||||
|
||||
print("✓ Backward pass and optimization completed")
|
||||
print(f"✓ Backward time: {backward_time:.3f}s")
|
||||
|
||||
# Check that gradients were calculated
|
||||
has_gradients = any(
|
||||
param.grad is not None for param in model.parameters() if param.requires_grad
|
||||
)
|
||||
print(f"✓ Model has gradients: {has_gradients}")
|
||||
|
||||
|
||||
def test_dataset():
|
||||
"""Test that we can load the dataset."""
|
||||
print("\nTesting dataset loading...")
|
||||
|
||||
data_root = "data/PennFudanPed"
|
||||
if not os.path.exists(data_root):
|
||||
print("✗ Dataset not found at", data_root)
|
||||
return None
|
||||
|
||||
# Create dataset
|
||||
dataset = PennFudanDataset(root=data_root, transforms=get_transform(train=True))
|
||||
print(f"✓ Dataset loaded with {len(dataset)} samples")
|
||||
|
||||
# Test loading a sample
|
||||
start_time = time.time()
|
||||
img, target = dataset[0]
|
||||
load_time = time.time() - start_time
|
||||
|
||||
print(f"✓ Sample loaded in {load_time:.3f}s")
|
||||
print(f"✓ Image shape: {img.shape}")
|
||||
print(f"✓ Target boxes shape: {target['boxes'].shape}")
|
||||
|
||||
return dataset
|
||||
|
||||
|
||||
def main():
|
||||
"""Run all tests."""
|
||||
print("=== Quick Model Testing Script ===")
|
||||
|
||||
# Set device
|
||||
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
print(f"Using device: {device}")
|
||||
|
||||
# Run tests
|
||||
model = test_model_creation()
|
||||
model.to(device)
|
||||
|
||||
loss_dict = test_model_forward(model, device)
|
||||
|
||||
test_model_backward(model, loss_dict, device)
|
||||
|
||||
test_dataset()
|
||||
|
||||
print("\n=== All tests completed successfully ===")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
175
scripts/visualize_predictions.py
Executable file
175
scripts/visualize_predictions.py
Executable file
@@ -0,0 +1,175 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Visualization script for model predictions on the Penn-Fudan dataset.
|
||||
This helps visualize and debug model predictions.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
# Add project root to path for imports
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
|
||||
|
||||
from models.detection import get_maskrcnn_model
|
||||
from utils.common import load_checkpoint, load_config
|
||||
from utils.data_utils import PennFudanDataset, get_transform
|
||||
|
||||
|
||||
def visualize_prediction(image, prediction, threshold=0.5):
|
||||
"""
|
||||
Visualize model prediction on an image.
|
||||
|
||||
Args:
|
||||
image (torch.Tensor): The input image [C, H, W]
|
||||
prediction (dict): Model prediction dict with boxes, scores, labels, masks
|
||||
threshold (float): Score threshold for visualization
|
||||
|
||||
Returns:
|
||||
plt.Figure: The matplotlib figure with the visualization
|
||||
"""
|
||||
# Convert image from tensor to numpy
|
||||
img_np = image.permute(1, 2, 0).cpu().numpy()
|
||||
|
||||
# Denormalize if needed
|
||||
if img_np.max() <= 1.0:
|
||||
img_np = (img_np * 255).astype(np.uint8)
|
||||
|
||||
# Create figure and axes
|
||||
fig, ax = plt.subplots(1, 1, figsize=(12, 9))
|
||||
ax.imshow(img_np)
|
||||
ax.set_title("Model Predictions")
|
||||
|
||||
# Get predictions
|
||||
boxes = prediction["boxes"].cpu().numpy()
|
||||
scores = prediction["scores"].cpu().numpy()
|
||||
labels = prediction["labels"].cpu().numpy()
|
||||
masks = prediction["masks"].cpu().numpy()
|
||||
|
||||
# Filter by threshold
|
||||
mask = scores >= threshold
|
||||
boxes = boxes[mask]
|
||||
scores = scores[mask]
|
||||
labels = labels[mask]
|
||||
masks = masks[mask]
|
||||
|
||||
# Draw predictions
|
||||
for box, score, label, mask in zip(boxes, scores, labels, masks):
|
||||
# Draw box
|
||||
x1, y1, x2, y2 = box
|
||||
rect = plt.Rectangle(
|
||||
(x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor="red", linewidth=2
|
||||
)
|
||||
ax.add_patch(rect)
|
||||
|
||||
# Add label and score
|
||||
ax.text(
|
||||
x1, y1, f"Person: {score:.2f}", bbox=dict(facecolor="yellow", alpha=0.5)
|
||||
)
|
||||
|
||||
# Draw mask (with transparency)
|
||||
mask = mask[0] > 0.5 # Threshold mask
|
||||
mask_color = np.zeros((mask.shape[0], mask.shape[1], 3), dtype=np.uint8)
|
||||
mask_color[mask] = [255, 0, 0] # Red color
|
||||
ax.imshow(mask_color, alpha=0.3)
|
||||
|
||||
# Show count of detections
|
||||
ax.set_xlabel(f"Found {len(boxes)} pedestrians with confidence >= {threshold}")
|
||||
|
||||
plt.tight_layout()
|
||||
return fig
|
||||
|
||||
|
||||
def run_inference(model, dataset, device, idx=0):
|
||||
"""
|
||||
Run inference on a single image from the dataset.
|
||||
|
||||
Args:
|
||||
model (torch.nn.Module): The model
|
||||
dataset (PennFudanDataset): The dataset
|
||||
device (torch.device): The device
|
||||
idx (int): Index of the image to test
|
||||
|
||||
Returns:
|
||||
tuple: (image, prediction)
|
||||
"""
|
||||
# Get image
|
||||
image, _ = dataset[idx]
|
||||
|
||||
# Prepare for model
|
||||
image = image.to(device)
|
||||
|
||||
# Run inference
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
prediction = model([image])[0]
|
||||
|
||||
return image, prediction
|
||||
|
||||
|
||||
def main():
|
||||
"""Main entry point."""
|
||||
parser = argparse.ArgumentParser(description="Visualize model predictions")
|
||||
parser.add_argument("--config", required=True, help="Path to config file")
|
||||
parser.add_argument("--checkpoint", required=True, help="Path to checkpoint file")
|
||||
parser.add_argument("--index", type=int, default=0, help="Image index to visualize")
|
||||
parser.add_argument("--threshold", type=float, default=0.5, help="Score threshold")
|
||||
parser.add_argument("--output", help="Path to save visualization image")
|
||||
args = parser.parse_args()
|
||||
|
||||
# Load config
|
||||
config = load_config(args.config)
|
||||
|
||||
# Setup device
|
||||
device = torch.device(config.get("device", "cpu"))
|
||||
print(f"Using device: {device}")
|
||||
|
||||
# Create model
|
||||
model = get_maskrcnn_model(
|
||||
num_classes=config.get("num_classes", 2),
|
||||
pretrained=False,
|
||||
pretrained_backbone=False,
|
||||
)
|
||||
|
||||
# Load checkpoint
|
||||
checkpoint, _ = load_checkpoint(args.checkpoint, model, device)
|
||||
model.to(device)
|
||||
print(f"Loaded checkpoint from: {args.checkpoint}")
|
||||
|
||||
# Create dataset
|
||||
data_root = config.get("data_root", "data/PennFudanPed")
|
||||
if not os.path.exists(data_root):
|
||||
print(f"Error: Data not found at {data_root}")
|
||||
return
|
||||
|
||||
dataset = PennFudanDataset(root=data_root, transforms=get_transform(train=False))
|
||||
print(f"Dataset loaded with {len(dataset)} images")
|
||||
|
||||
# Validate index
|
||||
if args.index < 0 or args.index >= len(dataset):
|
||||
print(f"Error: Index {args.index} out of range (0-{len(dataset)-1})")
|
||||
return
|
||||
|
||||
# Run inference
|
||||
print(f"Running inference on image {args.index}...")
|
||||
image, prediction = run_inference(model, dataset, device, args.index)
|
||||
|
||||
# Visualize prediction
|
||||
print("Visualizing predictions...")
|
||||
fig = visualize_prediction(image, prediction, threshold=args.threshold)
|
||||
|
||||
# Save or show
|
||||
if args.output:
|
||||
fig.savefig(args.output)
|
||||
print(f"Visualization saved to: {args.output}")
|
||||
else:
|
||||
plt.show()
|
||||
print("Visualization displayed. Close window to continue.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
124
test.py
Normal file
124
test.py
Normal file
@@ -0,0 +1,124 @@
|
||||
import argparse
|
||||
import logging
|
||||
import sys
|
||||
|
||||
import torch
|
||||
import torch.utils.data
|
||||
|
||||
# Project specific imports
|
||||
from models.detection import get_maskrcnn_model
|
||||
from utils.common import (
|
||||
check_data_path,
|
||||
load_checkpoint,
|
||||
load_config,
|
||||
setup_environment,
|
||||
)
|
||||
from utils.data_utils import PennFudanDataset, collate_fn, get_transform
|
||||
from utils.eval_utils import evaluate
|
||||
from utils.log_utils import setup_logging
|
||||
|
||||
|
||||
def main(args):
|
||||
# Load configuration
|
||||
config = load_config(args.config)
|
||||
|
||||
# Setup output directory and get device
|
||||
output_path, device = setup_environment(config)
|
||||
|
||||
# Setup logging
|
||||
setup_logging(output_path, f"{config['config_name']}_test")
|
||||
logging.info("--- Testing Script Started ---")
|
||||
logging.info(f"Loaded configuration from: {args.config}")
|
||||
logging.info(f"Checkpoint path: {args.checkpoint}")
|
||||
logging.info(f"Loaded configuration dictionary: {config}")
|
||||
if args.max_samples:
|
||||
logging.info(f"Limiting evaluation to {args.max_samples} samples")
|
||||
|
||||
# Validate data path
|
||||
data_root = config.get("data_root")
|
||||
check_data_path(data_root)
|
||||
|
||||
try:
|
||||
# Create the full dataset instance for testing with eval transforms
|
||||
dataset_test = PennFudanDataset(
|
||||
root=data_root, transforms=get_transform(train=False)
|
||||
)
|
||||
logging.info(f"Test dataset size: {len(dataset_test)}")
|
||||
|
||||
# Create test DataLoader
|
||||
data_loader_test = torch.utils.data.DataLoader(
|
||||
dataset_test,
|
||||
batch_size=config.get("batch_size", 2),
|
||||
shuffle=False, # No need to shuffle test data
|
||||
num_workers=config.get("num_workers", 4),
|
||||
collate_fn=collate_fn,
|
||||
pin_memory=config.get("pin_memory", True),
|
||||
)
|
||||
|
||||
logging.info(
|
||||
f"Test dataloader configured. Est. batches: {len(data_loader_test)}"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error setting up dataset/dataloader: {e}", exc_info=True)
|
||||
sys.exit(1)
|
||||
|
||||
# Create model
|
||||
num_classes = config.get("num_classes")
|
||||
if num_classes is None:
|
||||
logging.error("'num_classes' not specified in configuration.")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
# Create the model with the same architecture as in training
|
||||
model = get_maskrcnn_model(
|
||||
num_classes=num_classes,
|
||||
pretrained=False, # Don't need pretrained weights as we'll load checkpoint
|
||||
pretrained_backbone=False,
|
||||
)
|
||||
|
||||
# Load checkpoint
|
||||
load_checkpoint(args.checkpoint, model, device)
|
||||
model.to(device)
|
||||
logging.info("Model loaded and moved to device successfully.")
|
||||
except Exception as e:
|
||||
logging.error(f"Error setting up model: {e}", exc_info=True)
|
||||
sys.exit(1)
|
||||
|
||||
# Run Evaluation
|
||||
try:
|
||||
logging.info("Starting model evaluation...")
|
||||
eval_metrics = evaluate(model, data_loader_test, device, args.max_samples)
|
||||
|
||||
# Log detailed metrics
|
||||
logging.info("--- Evaluation Results ---")
|
||||
for metric_name, metric_value in eval_metrics.items():
|
||||
if isinstance(metric_value, (int, float)):
|
||||
logging.info(f" {metric_name}: {metric_value:.4f}")
|
||||
else:
|
||||
logging.info(f" {metric_name}: {metric_value}")
|
||||
|
||||
logging.info("Evaluation completed successfully")
|
||||
except Exception as e:
|
||||
logging.error(f"Error during evaluation: {e}", exc_info=True)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Test script for torchvision Mask R-CNN"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--config", required=True, type=str, help="Path to configuration file"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--checkpoint", required=True, type=str, help="Path to model checkpoint"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--max_samples",
|
||||
type=int,
|
||||
default=None,
|
||||
help="Maximum number of samples to evaluate",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
main(args)
|
||||
BIN
test_prediction.png
Normal file
BIN
test_prediction.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 944 KiB |
0
tests/__init__.py
Normal file
0
tests/__init__.py
Normal file
56
tests/conftest.py
Normal file
56
tests/conftest.py
Normal file
@@ -0,0 +1,56 @@
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
# Add project root to the path to enable imports
|
||||
project_root = Path(__file__).parent.parent
|
||||
sys.path.insert(0, str(project_root))
|
||||
|
||||
from models.detection import get_maskrcnn_model # noqa: E402
|
||||
from utils.data_utils import PennFudanDataset, get_transform # noqa: E402
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def device():
|
||||
"""Return CPU device for consistent testing."""
|
||||
return torch.device("cpu")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_config():
|
||||
"""Return a minimal config dictionary for testing."""
|
||||
return {
|
||||
"data_root": "data/PennFudanPed",
|
||||
"num_classes": 2,
|
||||
"batch_size": 1,
|
||||
"device": "cpu",
|
||||
"output_dir": "test_outputs",
|
||||
"config_name": "test_run",
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def small_model(device):
|
||||
"""Return a small Mask R-CNN model for testing."""
|
||||
model = get_maskrcnn_model(
|
||||
num_classes=2, pretrained=False, pretrained_backbone=False
|
||||
)
|
||||
model.to(device)
|
||||
return model
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_dataset():
|
||||
"""Return a small dataset for testing if available."""
|
||||
data_root = "data/PennFudanPed"
|
||||
|
||||
# Skip if data is not available
|
||||
if not os.path.exists(data_root):
|
||||
pytest.skip("Test dataset not available")
|
||||
|
||||
transforms = get_transform(train=False)
|
||||
dataset = PennFudanDataset(root=data_root, transforms=transforms)
|
||||
return dataset
|
||||
108
tests/test_data_utils.py
Normal file
108
tests/test_data_utils.py
Normal file
@@ -0,0 +1,108 @@
|
||||
import torch
|
||||
|
||||
from utils.data_utils import collate_fn, get_transform
|
||||
|
||||
|
||||
def test_dataset_len(sample_dataset):
|
||||
"""Test that the dataset has the expected length."""
|
||||
# PennFudanPed has 170 images
|
||||
assert len(sample_dataset) > 0, "Dataset should not be empty"
|
||||
|
||||
|
||||
def test_dataset_getitem(sample_dataset):
|
||||
"""Test that __getitem__ returns expected format."""
|
||||
if len(sample_dataset) == 0:
|
||||
return # Skip if no data
|
||||
|
||||
# Get first item
|
||||
img, target = sample_dataset[0]
|
||||
|
||||
# Check image
|
||||
assert isinstance(img, torch.Tensor), "Image should be a tensor"
|
||||
assert img.dim() == 3, "Image should have 3 dimensions (C, H, W)"
|
||||
assert img.shape[0] == 3, "Image should have 3 channels (RGB)"
|
||||
|
||||
# Check target
|
||||
assert isinstance(target, dict), "Target should be a dictionary"
|
||||
assert "boxes" in target, "Target should contain 'boxes'"
|
||||
assert "labels" in target, "Target should contain 'labels'"
|
||||
assert "masks" in target, "Target should contain 'masks'"
|
||||
assert "image_id" in target, "Target should contain 'image_id'"
|
||||
assert "area" in target, "Target should contain 'area'"
|
||||
assert "iscrowd" in target, "Target should contain 'iscrowd'"
|
||||
|
||||
# Check target values
|
||||
assert (
|
||||
target["boxes"].shape[1] == 4
|
||||
), "Boxes should have 4 coordinates (x1, y1, x2, y2)"
|
||||
assert target["labels"].dim() == 1, "Labels should be a 1D tensor"
|
||||
assert target["masks"].dim() == 3, "Masks should be a 3D tensor (N, H, W)"
|
||||
|
||||
|
||||
def test_transforms(sample_dataset):
|
||||
"""Test that transforms are applied correctly."""
|
||||
if len(sample_dataset) == 0:
|
||||
return # Skip if no data
|
||||
|
||||
# Get original transform
|
||||
orig_transforms = sample_dataset.transforms
|
||||
|
||||
# Apply different transforms
|
||||
train_transforms = get_transform(train=True)
|
||||
eval_transforms = get_transform(train=False)
|
||||
|
||||
# Test that we can switch transforms
|
||||
sample_dataset.transforms = train_transforms
|
||||
img_train, target_train = sample_dataset[0]
|
||||
|
||||
sample_dataset.transforms = eval_transforms
|
||||
img_eval, target_eval = sample_dataset[0]
|
||||
|
||||
# Restore original transforms
|
||||
sample_dataset.transforms = orig_transforms
|
||||
|
||||
# Images should be tensors with expected properties
|
||||
assert img_train.dim() == img_eval.dim() == 3
|
||||
assert img_train.shape[0] == img_eval.shape[0] == 3
|
||||
|
||||
|
||||
def test_collate_fn():
|
||||
"""Test the collate function."""
|
||||
# Create dummy batch data
|
||||
dummy_img1 = torch.rand(3, 100, 100)
|
||||
dummy_img2 = torch.rand(3, 100, 100)
|
||||
|
||||
dummy_target1 = {
|
||||
"boxes": torch.tensor([[10, 10, 50, 50]], dtype=torch.float32),
|
||||
"labels": torch.tensor([1], dtype=torch.int64),
|
||||
"masks": torch.zeros(1, 100, 100, dtype=torch.uint8),
|
||||
"image_id": torch.tensor([0]),
|
||||
"area": torch.tensor([1600.0], dtype=torch.float32),
|
||||
"iscrowd": torch.tensor([0], dtype=torch.uint8),
|
||||
}
|
||||
|
||||
dummy_target2 = {
|
||||
"boxes": torch.tensor([[20, 20, 60, 60]], dtype=torch.float32),
|
||||
"labels": torch.tensor([1], dtype=torch.int64),
|
||||
"masks": torch.zeros(1, 100, 100, dtype=torch.uint8),
|
||||
"image_id": torch.tensor([1]),
|
||||
"area": torch.tensor([1600.0], dtype=torch.float32),
|
||||
"iscrowd": torch.tensor([0], dtype=torch.uint8),
|
||||
}
|
||||
|
||||
batch = [(dummy_img1, dummy_target1), (dummy_img2, dummy_target2)]
|
||||
|
||||
# Apply collate_fn
|
||||
images, targets = collate_fn(batch)
|
||||
|
||||
# Check results
|
||||
assert len(images) == 2, "Should have 2 images"
|
||||
assert len(targets) == 2, "Should have 2 targets"
|
||||
assert torch.allclose(images[0], dummy_img1), "First image should match"
|
||||
assert torch.allclose(images[1], dummy_img2), "Second image should match"
|
||||
assert torch.allclose(
|
||||
targets[0]["boxes"], dummy_target1["boxes"]
|
||||
), "First boxes should match"
|
||||
assert torch.allclose(
|
||||
targets[1]["boxes"], dummy_target2["boxes"]
|
||||
), "Second boxes should match"
|
||||
102
tests/test_model.py
Normal file
102
tests/test_model.py
Normal file
@@ -0,0 +1,102 @@
|
||||
import torch
|
||||
import torchvision
|
||||
|
||||
from utils.eval_utils import evaluate
|
||||
|
||||
|
||||
def test_model_creation(small_model):
|
||||
"""Test that the model is created correctly."""
|
||||
assert isinstance(small_model, torchvision.models.detection.MaskRCNN)
|
||||
assert small_model.roi_heads.box_predictor.cls_score.out_features == 2
|
||||
assert small_model.roi_heads.mask_predictor.mask_fcn_logits.out_channels == 2
|
||||
|
||||
|
||||
def test_model_forward_train_mode(small_model, sample_dataset, device):
|
||||
"""Test model forward pass in training mode."""
|
||||
if len(sample_dataset) == 0:
|
||||
return # Skip if no data
|
||||
|
||||
# Set model to training mode
|
||||
small_model.train()
|
||||
|
||||
# Get a batch
|
||||
img, target = sample_dataset[0]
|
||||
img = img.to(device)
|
||||
target = {k: v.to(device) for k, v in target.items()}
|
||||
|
||||
# Forward pass with targets should return loss dict in training mode
|
||||
loss_dict = small_model([img], [target])
|
||||
|
||||
# Verify loss dict structure
|
||||
assert isinstance(loss_dict, dict), "Loss should be a dictionary"
|
||||
assert "loss_classifier" in loss_dict, "Should have classifier loss"
|
||||
assert "loss_box_reg" in loss_dict, "Should have box regression loss"
|
||||
assert "loss_mask" in loss_dict, "Should have mask loss"
|
||||
assert "loss_objectness" in loss_dict, "Should have objectness loss"
|
||||
assert "loss_rpn_box_reg" in loss_dict, "Should have RPN box regression loss"
|
||||
|
||||
# Verify loss values
|
||||
for loss_name, loss_value in loss_dict.items():
|
||||
assert isinstance(loss_value, torch.Tensor), f"{loss_name} should be a tensor"
|
||||
assert loss_value.dim() == 0, f"{loss_name} should be a scalar tensor"
|
||||
assert not torch.isnan(loss_value), f"{loss_name} should not be NaN"
|
||||
assert not torch.isinf(loss_value), f"{loss_name} should not be infinite"
|
||||
|
||||
|
||||
def test_model_forward_eval_mode(small_model, sample_dataset, device):
|
||||
"""Test model forward pass in evaluation mode."""
|
||||
if len(sample_dataset) == 0:
|
||||
return # Skip if no data
|
||||
|
||||
# Set model to evaluation mode
|
||||
small_model.eval()
|
||||
|
||||
# Get a batch
|
||||
img, target = sample_dataset[0]
|
||||
img = img.to(device)
|
||||
|
||||
# Forward pass without targets should return predictions in eval mode
|
||||
with torch.no_grad():
|
||||
predictions = small_model([img])
|
||||
|
||||
# Verify predictions structure
|
||||
assert isinstance(predictions, list), "Predictions should be a list"
|
||||
assert len(predictions) == 1, "Should have predictions for 1 image"
|
||||
|
||||
pred = predictions[0]
|
||||
assert "boxes" in pred, "Predictions should contain 'boxes'"
|
||||
assert "scores" in pred, "Predictions should contain 'scores'"
|
||||
assert "labels" in pred, "Predictions should contain 'labels'"
|
||||
assert "masks" in pred, "Predictions should contain 'masks'"
|
||||
|
||||
|
||||
def test_evaluate_function(small_model, sample_dataset, device):
|
||||
"""Test the evaluate function."""
|
||||
if len(sample_dataset) == 0:
|
||||
return # Skip if no data
|
||||
|
||||
# Create a tiny dataloader for testing
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
from utils.data_utils import collate_fn
|
||||
|
||||
# Use only 2 samples for quick testing
|
||||
small_ds = torch.utils.data.Subset(
|
||||
sample_dataset, range(min(2, len(sample_dataset)))
|
||||
)
|
||||
dataloader = DataLoader(
|
||||
small_ds, batch_size=1, shuffle=False, collate_fn=collate_fn
|
||||
)
|
||||
|
||||
# Set model to eval mode
|
||||
small_model.eval()
|
||||
|
||||
# Import evaluate function
|
||||
|
||||
# Run evaluation
|
||||
metrics = evaluate(small_model, dataloader, device)
|
||||
|
||||
# Check results
|
||||
assert isinstance(metrics, dict), "Metrics should be a dictionary"
|
||||
assert "average_loss" in metrics, "Metrics should contain 'average_loss'"
|
||||
assert metrics["average_loss"] >= 0, "Loss should be non-negative"
|
||||
77
tests/test_visualization.py
Normal file
77
tests/test_visualization.py
Normal file
@@ -0,0 +1,77 @@
|
||||
import os
|
||||
import sys
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import torch
|
||||
|
||||
# Import visualization functions
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
|
||||
from scripts.visualize_predictions import visualize_prediction # noqa: E402
|
||||
|
||||
|
||||
def test_visualize_prediction():
|
||||
"""Test that the visualization function works."""
|
||||
# Create a dummy image tensor
|
||||
image = torch.rand(3, 400, 600)
|
||||
|
||||
# Create a dummy prediction dictionary
|
||||
prediction = {
|
||||
"boxes": torch.tensor(
|
||||
[[100, 100, 200, 200], [300, 300, 400, 400]], dtype=torch.float32
|
||||
),
|
||||
"scores": torch.tensor([0.9, 0.7], dtype=torch.float32),
|
||||
"labels": torch.tensor([1, 1], dtype=torch.int64),
|
||||
"masks": torch.zeros(2, 1, 400, 600, dtype=torch.float32),
|
||||
}
|
||||
|
||||
# Set some pixels in the mask to 1
|
||||
prediction["masks"][0, 0, 100:200, 100:200] = 1.0
|
||||
prediction["masks"][1, 0, 300:400, 300:400] = 1.0
|
||||
|
||||
# Call the visualization function
|
||||
fig = visualize_prediction(image, prediction, threshold=0.5)
|
||||
|
||||
# Check that a figure was returned
|
||||
assert isinstance(fig, plt.Figure)
|
||||
|
||||
# Check figure properties
|
||||
assert len(fig.axes) == 1
|
||||
|
||||
# Close the figure to avoid memory leaks
|
||||
plt.close(fig)
|
||||
|
||||
|
||||
def test_visualize_prediction_threshold():
|
||||
"""Test that the threshold parameter filters predictions correctly."""
|
||||
# Create a dummy image tensor
|
||||
image = torch.rand(3, 400, 600)
|
||||
|
||||
# Create a dummy prediction dictionary with varying scores
|
||||
prediction = {
|
||||
"boxes": torch.tensor(
|
||||
[[100, 100, 200, 200], [300, 300, 400, 400], [500, 100, 550, 150]],
|
||||
dtype=torch.float32,
|
||||
),
|
||||
"scores": torch.tensor([0.9, 0.7, 0.3], dtype=torch.float32),
|
||||
"labels": torch.tensor([1, 1, 1], dtype=torch.int64),
|
||||
"masks": torch.zeros(3, 1, 400, 600, dtype=torch.float32),
|
||||
}
|
||||
|
||||
# Call the visualization function with different thresholds
|
||||
fig_low = visualize_prediction(image, prediction, threshold=0.2)
|
||||
fig_med = visualize_prediction(image, prediction, threshold=0.5)
|
||||
fig_high = visualize_prediction(image, prediction, threshold=0.8)
|
||||
|
||||
# Low threshold should show all 3 boxes
|
||||
assert "Found 3" in fig_low.axes[0].get_xlabel()
|
||||
|
||||
# Medium threshold should show 2 boxes
|
||||
assert "Found 2" in fig_med.axes[0].get_xlabel()
|
||||
|
||||
# High threshold should show 1 box
|
||||
assert "Found 1" in fig_high.axes[0].get_xlabel()
|
||||
|
||||
# Close figures
|
||||
plt.close(fig_low)
|
||||
plt.close(fig_med)
|
||||
plt.close(fig_high)
|
||||
160
todo.md
160
todo.md
@@ -4,100 +4,92 @@ This list outlines the steps required to complete the Torchvision Finetuning pro
|
||||
|
||||
## Phase 1: Foundation & Setup
|
||||
|
||||
- [ ] Set up project structure (directories: `configs`, `data`, `models`, `utils`, `tests`, `scripts`).
|
||||
- [ ] Initialize Git repository.
|
||||
- [ ] Create `.gitignore` file (ignore `data`, `outputs`, `logs`, `.venv`, caches, `*.pth`).
|
||||
- [ ] Initialize `pyproject.toml` using `uv init`, set Python 3.10.
|
||||
- [ ] Add core dependencies (`torch`, `torchvision`, `ruff`, `numpy`, `Pillow`, `pytest`) using `uv add`.
|
||||
- [ ] Create `pre-commit-config.yaml` and configure `ruff` hooks (format, lint, import sort).
|
||||
- [ ] Create `__init__.py` files in necessary directories.
|
||||
- [ ] Create empty placeholder files (`train.py`, `test.py`, `configs/base_config.py`, `utils/data_utils.py`, `models/detection.py`, `tests/conftest.py`).
|
||||
- [ ] Create basic `README.md`.
|
||||
- [ ] Install pre-commit hooks (`pre-commit install`).
|
||||
- [ ] Create `scripts/download_data.sh` script.
|
||||
- [ ] Check if data exists.
|
||||
- [ ] Create `data/` directory.
|
||||
- [ ] Use `wget` to download PennFudanPed dataset.
|
||||
- [ ] Use `unzip` to extract data.
|
||||
- [ ] Remove zip file after extraction.
|
||||
- [ ] Add informative print messages.
|
||||
- [ ] Make script executable (`chmod +x`).
|
||||
- [ ] Ensure `.gitignore` ignores `data/`.
|
||||
- [ ] Implement base configuration in `configs/base_config.py` (`base_config` dictionary).
|
||||
- [ ] Implement specific experiment configuration in `configs/pennfudan_maskrcnn_config.py` (`config` dictionary, importing/updating base config).
|
||||
- [x] Initialize project structure (`configs`, `data`, `models`, `utils`, `tests`, `scripts`)
|
||||
- [x] Initialize git repository
|
||||
- [x] Configure `.gitignore`
|
||||
- [x] Set up `pyproject.toml` with `uv`
|
||||
- [x] Add dependencies (`torch`, `torchvision` with CUDA 12.4, `ruff`, `numpy`, `Pillow`, `pytest`, `pre-commit`)
|
||||
- [x] Configure `pre-commit` with `ruff` (formatting, linting)
|
||||
- [x] Create empty `__init__.py` files
|
||||
- [x] Create placeholder files (`train.py`, `test.py`, `configs/base_config.py`, etc.)
|
||||
- [x] Create basic `README.md`
|
||||
- [x] Install pre-commit hooks
|
||||
- [x] Verify PyTorch GPU integration (`scripts/check_gpu.py`)
|
||||
- [x] Create data download script (`scripts/download_data.sh`)
|
||||
- [x] Implement configuration system (`configs/base_config.py`, `configs/pennfudan_maskrcnn_config.py`)
|
||||
|
||||
## Phase 2: Data Handling & Model
|
||||
|
||||
- [ ] Implement `PennFudanDataset` class in `utils/data_utils.py`.
|
||||
- [ ] `__init__`: Load image and mask paths.
|
||||
- [ ] `__getitem__`: Load image/mask, parse masks, generate targets (boxes, labels, masks, image_id, area, iscrowd), apply transforms.
|
||||
- [ ] `__len__`: Return dataset size.
|
||||
- [ ] Implement `get_transform(train)` function in `utils/data_utils.py` (using `torchvision.transforms.v2`).
|
||||
- [ ] Implement `collate_fn(batch)` function in `utils/data_utils.py`.
|
||||
- [ ] Implement `get_maskrcnn_model(num_classes, ...)` function in `models/detection.py`.
|
||||
- [ ] Load pre-trained Mask R-CNN (`maskrcnn_resnet50_fpn_v2`).
|
||||
- [ ] Replace box predictor head (`FastRCNNPredictor`).
|
||||
- [ ] Replace mask predictor head (`MaskRCNNPredictor`).
|
||||
- [x] Implement `PennFudanDataset` class in `utils/data_utils.py`.
|
||||
- [x] `__init__`: Load image and mask paths.
|
||||
- [x] `__getitem__`: Load image/mask, parse masks, generate targets (boxes, labels, masks, image_id, area, iscrowd), apply transforms.
|
||||
- [x] `__len__`: Return dataset size.
|
||||
- [x] Implement `get_transform(train)` function in `utils/data_utils.py` (using `torchvision.transforms.v2`).
|
||||
- [x] Implement `collate_fn(batch)` function in `utils/data_utils.py`.
|
||||
- [x] Implement `get_maskrcnn_model(num_classes, ...)` function in `models/detection.py`.
|
||||
- [x] Load pre-trained Mask R-CNN (`maskrcnn_resnet50_fpn_v2`).
|
||||
- [x] Replace box predictor head (`FastRCNNPredictor`).
|
||||
- [x] Replace mask predictor head (`MaskRCNNPredictor`).
|
||||
|
||||
## Phase 3: Training Script & Core Logic
|
||||
|
||||
- [ ] Set up basic `train.py` structure.
|
||||
- [ ] Add imports.
|
||||
- [ ] Implement `argparse` for `--config` argument.
|
||||
- [ ] Implement dynamic config loading (`importlib`).
|
||||
- [ ] Set random seeds.
|
||||
- [ ] Determine compute device (`cuda` or `cpu`).
|
||||
- [ ] Create output directory structure (`outputs/<config_name>/checkpoints`).
|
||||
- [ ] Instantiate `PennFudanDataset` (train).
|
||||
- [ ] Instantiate `DataLoader` (train) using `collate_fn`.
|
||||
- [ ] Instantiate model using `get_maskrcnn_model`.
|
||||
- [ ] Move model to device.
|
||||
- [ ] Add `if __name__ == "__main__":` guard.
|
||||
- [ ] Implement minimal training step in `train.py`.
|
||||
- [ ] Instantiate optimizer (`torch.optim.SGD`).
|
||||
- [ ] Set `model.train()`.
|
||||
- [ ] Fetch one batch.
|
||||
- [ ] Move data to device.
|
||||
- [ ] Perform forward pass (`loss_dict = model(...)`).
|
||||
- [ ] Calculate total loss (`sum(...)`).
|
||||
- [ ] Perform backward pass (`optimizer.zero_grad()`, `loss.backward()`, `optimizer.step()`).
|
||||
- [ ] Print/log loss for the single step (and temporarily exit).
|
||||
- [ ] Implement logging setup in `utils/log_utils.py` (`setup_logging` function).
|
||||
- [ ] Configure `logging.basicConfig` for file and console output.
|
||||
- [ ] Integrate logging into `train.py`.
|
||||
- [ ] Call `setup_logging`.
|
||||
- [ ] Replace `print` with `logging.info`.
|
||||
- [ ] Log config, device, and training progress/losses.
|
||||
- [ ] Implement full training loop in `train.py`.
|
||||
- [ ] Remove single-step exit.
|
||||
- [ ] Add LR scheduler (`torch.optim.lr_scheduler.StepLR`).
|
||||
- [ ] Add epoch loop.
|
||||
- [ ] Add batch loop, integrating the single training step logic.
|
||||
- [ ] Log loss periodically within the batch loop.
|
||||
- [ ] Step the LR scheduler at the end of each epoch.
|
||||
- [ ] Log total training time.
|
||||
- [ ] Implement checkpointing in `train.py`.
|
||||
- [ ] Define checkpoint directory.
|
||||
- [ ] Implement logic to find and load the latest checkpoint (resume training).
|
||||
- [ ] Save checkpoints periodically (based on frequency or final epoch).
|
||||
- [ ] Include epoch, model state, optimizer state, scheduler state, config.
|
||||
- [ ] Log checkpoint loading/saving.
|
||||
- [x] Set up basic `train.py` structure.
|
||||
- [x] Add imports.
|
||||
- [x] Implement `argparse` for `--config` argument.
|
||||
- [x] Implement dynamic config loading (`importlib`).
|
||||
- [x] Set random seeds.
|
||||
- [x] Determine compute device (`cuda` or `cpu`).
|
||||
- [x] Create output directory structure (`outputs/<config_name>/checkpoints`).
|
||||
- [x] Instantiate `PennFudanDataset` (train).
|
||||
- [x] Instantiate `DataLoader` (train) using `collate_fn`.
|
||||
- [x] Instantiate model using `get_maskrcnn_model`.
|
||||
- [x] Move model to device.
|
||||
- [x] Add `if __name__ == "__main__":` guard.
|
||||
- [x] Implement minimal training step in `train.py`.
|
||||
- [x] Instantiate optimizer (`torch.optim.SGD`).
|
||||
- [x] Set `model.train()`.
|
||||
- [x] Fetch one batch.
|
||||
- [x] Move data to device.
|
||||
- [x] Perform forward pass (`loss_dict = model(...)`).
|
||||
- [x] Calculate total loss (`sum(...)`).
|
||||
- [x] Perform backward pass (`optimizer.zero_grad()`, `loss.backward()`, `optimizer.step()`)
|
||||
- [x] Print/log loss for the single step (and temporarily exit).
|
||||
- [x] Implement logging setup in `utils/log_utils.py` (`setup_logging` function).
|
||||
- [x] Configure `logging.basicConfig` for file and console output.
|
||||
- [x] Integrate logging into `train.py`.
|
||||
- [x] Call `setup_logging`.
|
||||
- [x] Replace `print` with `logging.info`.
|
||||
- [x] Log config, device, and training progress/losses.
|
||||
- [x] Implement full training loop in `train.py`.
|
||||
- [x] Remove single-step exit.
|
||||
- [x] Add LR scheduler (`torch.optim.lr_scheduler.StepLR`).
|
||||
- [x] Add epoch loop.
|
||||
- [x] Add batch loop, integrating the single training step logic.
|
||||
- [x] Log loss periodically within the batch loop.
|
||||
- [x] Step the LR scheduler at the end of each epoch.
|
||||
- [x] Log total training time.
|
||||
- [x] Implement checkpointing in `train.py`.
|
||||
- [x] Define checkpoint directory.
|
||||
- [x] Implement logic to find and load the latest checkpoint (resume training).
|
||||
- [x] Save checkpoints periodically (based on frequency or final epoch).
|
||||
- [x] Include epoch, model state, optimizer state, scheduler state, config.
|
||||
- [x] Log checkpoint loading/saving.
|
||||
|
||||
## Phase 4: Evaluation & Testing
|
||||
|
||||
- [ ] Add evaluation dependencies (`pycocotools` - optional initially).
|
||||
- [ ] Create `utils/eval_utils.py` and implement `evaluate` function.
|
||||
- [ ] Set `model.eval()`.
|
||||
- [ ] Use `torch.no_grad()`.
|
||||
- [ ] Loop through validation/test dataloader.
|
||||
- [ ] Perform forward pass.
|
||||
- [ ] Calculate/aggregate metrics (start with average loss, potentially add mAP later).
|
||||
- [ ] Log evaluation metrics and time.
|
||||
- [ ] Return metrics.
|
||||
- [ ] Integrate evaluation into `train.py`.
|
||||
- [ ] Create validation `Dataset` and `DataLoader` (using `torch.utils.data.Subset`).
|
||||
- [ ] Call `evaluate` at the end of each epoch.
|
||||
- [ ] Log validation metrics.
|
||||
- [x] Create `utils/eval_utils.py` and implement `evaluate` function.
|
||||
- [x] Set `model.eval()`.
|
||||
- [x] Use `torch.no_grad()`.
|
||||
- [x] Loop through validation/test dataloader.
|
||||
- [x] Perform forward pass.
|
||||
- [x] Calculate/aggregate metrics (start with average loss, potentially add mAP later).
|
||||
- [x] Log evaluation metrics and time.
|
||||
- [x] Return metrics.
|
||||
- [x] Integrate evaluation into `train.py`.
|
||||
- [x] Create validation `Dataset` and `DataLoader` (using `torch.utils.data.Subset`).
|
||||
- [x] Call `evaluate` at the end of each epoch.
|
||||
- [x] Log validation metrics.
|
||||
- [ ] (Later) Implement logic to save the *best* model based on validation metric.
|
||||
- [ ] Implement `test.py` script.
|
||||
- [ ] Reuse argument parsing, config loading, device setup, dataset/dataloader (test split), model creation from `train.py`.
|
||||
|
||||
344
train.py
Normal file
344
train.py
Normal file
@@ -0,0 +1,344 @@
|
||||
import argparse
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
|
||||
import torch
|
||||
import torch.utils.data
|
||||
|
||||
# Project specific imports
|
||||
from models.detection import get_maskrcnn_model
|
||||
from utils.common import (
|
||||
check_data_path,
|
||||
load_checkpoint,
|
||||
load_config,
|
||||
setup_environment,
|
||||
)
|
||||
from utils.data_utils import PennFudanDataset, collate_fn, get_transform
|
||||
from utils.eval_utils import evaluate
|
||||
from utils.log_utils import setup_logging
|
||||
|
||||
|
||||
def main(args):
|
||||
# Load configuration
|
||||
config = load_config(args.config)
|
||||
|
||||
# Setup output directory and get device
|
||||
output_path, device = setup_environment(config)
|
||||
checkpoint_path = os.path.join(output_path, "checkpoints")
|
||||
os.makedirs(checkpoint_path, exist_ok=True)
|
||||
|
||||
# Setup logging
|
||||
setup_logging(output_path, config.get("config_name", "default_run"))
|
||||
logging.info("--- Training Script Started ---")
|
||||
logging.info(f"Loaded configuration from: {args.config}")
|
||||
logging.info(f"Loaded configuration dictionary: {config}")
|
||||
logging.info(f"Output will be saved to: {output_path}")
|
||||
|
||||
# Validate data path
|
||||
data_root = config.get("data_root")
|
||||
check_data_path(data_root)
|
||||
|
||||
try:
|
||||
# Create the full training dataset instance first
|
||||
dataset_full = PennFudanDataset(
|
||||
root=data_root, transforms=get_transform(train=True)
|
||||
)
|
||||
logging.info(f"Full dataset size: {len(dataset_full)}")
|
||||
|
||||
# Create validation dataset instance with eval transforms
|
||||
dataset_val_instance = PennFudanDataset(
|
||||
root=data_root, transforms=get_transform(train=False)
|
||||
)
|
||||
|
||||
# Split the dataset indices
|
||||
torch.manual_seed(
|
||||
config.get("seed", 42)
|
||||
) # Use the same seed for consistent splits
|
||||
indices = torch.randperm(len(dataset_full)).tolist()
|
||||
val_split_ratio = config.get(
|
||||
"val_split_ratio", 0.1
|
||||
) # Default to 10% validation
|
||||
val_split_count = int(val_split_ratio * len(dataset_full))
|
||||
if val_split_count == 0 and len(dataset_full) > 0:
|
||||
logging.warning(
|
||||
f"Validation split resulted in 0 samples (ratio={val_split_ratio}, total={len(dataset_full)}). Using 1 sample for validation."
|
||||
)
|
||||
val_split_count = 1
|
||||
elif val_split_count >= len(dataset_full):
|
||||
logging.error(
|
||||
f"Validation split ratio ({val_split_ratio}) too high, results in no training samples."
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
train_indices = indices[:-val_split_count]
|
||||
val_indices = indices[-val_split_count:]
|
||||
|
||||
# Create Subset datasets
|
||||
dataset_train = torch.utils.data.Subset(dataset_full, train_indices)
|
||||
dataset_val = torch.utils.data.Subset(dataset_val_instance, val_indices)
|
||||
|
||||
logging.info(
|
||||
f"Using {len(train_indices)} samples for training and {len(val_indices)} for validation."
|
||||
)
|
||||
|
||||
# Create DataLoaders
|
||||
data_loader_train = torch.utils.data.DataLoader(
|
||||
dataset_train,
|
||||
batch_size=config.get("batch_size", 2),
|
||||
# Shuffle should be true for the training subset loader
|
||||
shuffle=True,
|
||||
num_workers=config.get("num_workers", 4),
|
||||
collate_fn=collate_fn,
|
||||
pin_memory=config.get("pin_memory", True),
|
||||
)
|
||||
data_loader_val = torch.utils.data.DataLoader(
|
||||
dataset_val,
|
||||
batch_size=config.get(
|
||||
"batch_size", 2
|
||||
), # Often use same or larger batch size for validation
|
||||
shuffle=False, # No need to shuffle validation data
|
||||
num_workers=config.get("num_workers", 4),
|
||||
collate_fn=collate_fn,
|
||||
pin_memory=config.get("pin_memory", True),
|
||||
)
|
||||
|
||||
logging.info(
|
||||
f"Training dataloader configured. Est. batches: {len(data_loader_train)}"
|
||||
)
|
||||
logging.info(
|
||||
f"Validation dataloader configured. Est. batches: {len(data_loader_val)}"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error setting up dataset/dataloader: {e}", exc_info=True)
|
||||
sys.exit(1)
|
||||
|
||||
# Create model
|
||||
num_classes = config.get("num_classes")
|
||||
if num_classes is None:
|
||||
logging.error("'num_classes' not specified in configuration.")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
model = get_maskrcnn_model(
|
||||
num_classes=num_classes,
|
||||
pretrained=config.get("pretrained", True),
|
||||
pretrained_backbone=config.get("pretrained_backbone", True),
|
||||
)
|
||||
model.to(device)
|
||||
logging.info("Model loaded successfully.")
|
||||
except Exception as e:
|
||||
logging.error(f"Error creating model: {e}", exc_info=True)
|
||||
sys.exit(1)
|
||||
|
||||
# Create optimizer and learning rate scheduler
|
||||
optimizer = torch.optim.SGD(
|
||||
model.parameters(),
|
||||
lr=config.get("lr", 0.005),
|
||||
momentum=config.get("momentum", 0.9),
|
||||
weight_decay=config.get("weight_decay", 0.0005),
|
||||
)
|
||||
|
||||
lr_scheduler = torch.optim.lr_scheduler.StepLR(
|
||||
optimizer,
|
||||
step_size=config.get("lr_step_size", 3),
|
||||
gamma=config.get("lr_gamma", 0.1),
|
||||
)
|
||||
|
||||
# --- Resume from Checkpoint (if specified) ---
|
||||
start_epoch = 0
|
||||
if args.resume:
|
||||
try:
|
||||
# Find latest checkpoint
|
||||
checkpoints = [f for f in os.listdir(checkpoint_path) if f.endswith(".pth")]
|
||||
if not checkpoints:
|
||||
logging.warning(
|
||||
f"No checkpoints found in {checkpoint_path}, starting from scratch."
|
||||
)
|
||||
else:
|
||||
# Extract epoch numbers from filenames and find the latest
|
||||
max_epoch = -1
|
||||
latest_checkpoint = None
|
||||
for ckpt in checkpoints:
|
||||
if ckpt.startswith("checkpoint_epoch_"):
|
||||
try:
|
||||
epoch_num = int(
|
||||
ckpt.replace("checkpoint_epoch_", "").replace(
|
||||
".pth", ""
|
||||
)
|
||||
)
|
||||
if epoch_num > max_epoch:
|
||||
max_epoch = epoch_num
|
||||
latest_checkpoint = ckpt
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
if latest_checkpoint:
|
||||
checkpoint_file = os.path.join(checkpoint_path, latest_checkpoint)
|
||||
logging.info(f"Resuming from checkpoint: {checkpoint_file}")
|
||||
|
||||
# Load checkpoint
|
||||
checkpoint, start_epoch = load_checkpoint(
|
||||
checkpoint_file,
|
||||
model,
|
||||
device,
|
||||
load_optimizer=True,
|
||||
optimizer=optimizer,
|
||||
load_scheduler=True,
|
||||
scheduler=lr_scheduler,
|
||||
)
|
||||
|
||||
logging.info(f"Resuming from epoch {start_epoch}")
|
||||
else:
|
||||
logging.warning(
|
||||
f"No valid checkpoints found in {checkpoint_path}, starting from scratch."
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error(f"Error loading checkpoint: {e}", exc_info=True)
|
||||
logging.warning("Starting training from scratch.")
|
||||
start_epoch = 0
|
||||
|
||||
# --- Training Loop ---
|
||||
train_time_start = time.time()
|
||||
logging.info("--- Starting Training Loop ---")
|
||||
|
||||
for epoch in range(start_epoch, config.get("num_epochs", 10)):
|
||||
# Set model to training mode
|
||||
model.train()
|
||||
|
||||
# Initialize epoch metrics
|
||||
epoch_loss = 0.0
|
||||
epoch_loss_classifier = 0.0
|
||||
epoch_loss_box_reg = 0.0
|
||||
epoch_loss_mask = 0.0
|
||||
epoch_loss_objectness = 0.0
|
||||
epoch_loss_rpn_box_reg = 0.0
|
||||
|
||||
logging.info(f"--- Epoch {epoch + 1}/{config.get('num_epochs', 10)} ---")
|
||||
epoch_start_time = time.time()
|
||||
|
||||
# Train loop
|
||||
for i, (images, targets) in enumerate(data_loader_train):
|
||||
# Move data to device
|
||||
images = list(image.to(device) for image in images)
|
||||
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
|
||||
|
||||
# Forward pass
|
||||
loss_dict = model(images, targets)
|
||||
|
||||
# Sum loss components
|
||||
losses = sum(loss for loss in loss_dict.values())
|
||||
|
||||
# Backward and optimize
|
||||
optimizer.zero_grad()
|
||||
losses.backward()
|
||||
optimizer.step()
|
||||
|
||||
# Log batch results
|
||||
loss_value = losses.item()
|
||||
epoch_loss += loss_value
|
||||
|
||||
# Accumulate individual loss components
|
||||
if "loss_classifier" in loss_dict:
|
||||
epoch_loss_classifier += loss_dict["loss_classifier"].item()
|
||||
if "loss_box_reg" in loss_dict:
|
||||
epoch_loss_box_reg += loss_dict["loss_box_reg"].item()
|
||||
if "loss_mask" in loss_dict:
|
||||
epoch_loss_mask += loss_dict["loss_mask"].item()
|
||||
if "loss_objectness" in loss_dict:
|
||||
epoch_loss_objectness += loss_dict["loss_objectness"].item()
|
||||
if "loss_rpn_box_reg" in loss_dict:
|
||||
epoch_loss_rpn_box_reg += loss_dict["loss_rpn_box_reg"].item()
|
||||
|
||||
# Periodic logging
|
||||
if (i + 1) % config.get("log_freq", 10) == 0:
|
||||
log_str = f"Epoch [{epoch + 1}/{config.get('num_epochs', 10)}], "
|
||||
log_str += f"Iter [{i + 1}/{len(data_loader_train)}], "
|
||||
log_str += f"Loss: {loss_value:.4f}"
|
||||
|
||||
# Add per-component losses for richer logging
|
||||
comp_log = []
|
||||
if "loss_classifier" in loss_dict:
|
||||
comp_log.append(f"cls: {loss_dict['loss_classifier'].item():.4f}")
|
||||
if "loss_box_reg" in loss_dict:
|
||||
comp_log.append(f"box: {loss_dict['loss_box_reg'].item():.4f}")
|
||||
if "loss_mask" in loss_dict:
|
||||
comp_log.append(f"mask: {loss_dict['loss_mask'].item():.4f}")
|
||||
if "loss_objectness" in loss_dict:
|
||||
comp_log.append(f"obj: {loss_dict['loss_objectness'].item():.4f}")
|
||||
if "loss_rpn_box_reg" in loss_dict:
|
||||
comp_log.append(f"rpn: {loss_dict['loss_rpn_box_reg'].item():.4f}")
|
||||
|
||||
if comp_log:
|
||||
log_str += f" [{', '.join(comp_log)}]"
|
||||
|
||||
logging.info(log_str)
|
||||
|
||||
# Step learning rate scheduler after each epoch
|
||||
lr_scheduler.step()
|
||||
|
||||
# Calculate and log epoch metrics
|
||||
if len(data_loader_train) > 0:
|
||||
avg_loss = epoch_loss / len(data_loader_train)
|
||||
avg_loss_classifier = epoch_loss_classifier / len(data_loader_train)
|
||||
avg_loss_box_reg = epoch_loss_box_reg / len(data_loader_train)
|
||||
avg_loss_mask = epoch_loss_mask / len(data_loader_train)
|
||||
avg_loss_objectness = epoch_loss_objectness / len(data_loader_train)
|
||||
avg_loss_rpn_box_reg = epoch_loss_rpn_box_reg / len(data_loader_train)
|
||||
|
||||
logging.info(f"Epoch {epoch + 1} - Avg Loss: {avg_loss:.4f}")
|
||||
logging.info(f" Classifier Loss: {avg_loss_classifier:.4f}")
|
||||
logging.info(f" Box Reg Loss: {avg_loss_box_reg:.4f}")
|
||||
logging.info(f" Mask Loss: {avg_loss_mask:.4f}")
|
||||
logging.info(f" Objectness Loss: {avg_loss_objectness:.4f}")
|
||||
logging.info(f" RPN Box Reg Loss: {avg_loss_rpn_box_reg:.4f}")
|
||||
else:
|
||||
logging.warning("No training batches were processed in this epoch.")
|
||||
|
||||
epoch_duration = time.time() - epoch_start_time
|
||||
logging.info(f"Epoch duration: {epoch_duration:.2f}s")
|
||||
|
||||
# --- Validation ---
|
||||
logging.info("Running validation...")
|
||||
val_metrics = evaluate(model, data_loader_val, device)
|
||||
logging.info(f"Validation Loss: {val_metrics['average_loss']:.4f}")
|
||||
|
||||
# --- Checkpoint Saving ---
|
||||
if (epoch + 1) % config.get("checkpoint_freq", 1) == 0 or epoch == config.get(
|
||||
"num_epochs", 10
|
||||
) - 1:
|
||||
checkpoint_file = os.path.join(
|
||||
checkpoint_path, f"checkpoint_epoch_{epoch+1}.pth"
|
||||
)
|
||||
checkpoint = {
|
||||
"epoch": epoch + 1,
|
||||
"model_state_dict": model.state_dict(),
|
||||
"optimizer_state_dict": optimizer.state_dict(),
|
||||
"scheduler_state_dict": lr_scheduler.state_dict(),
|
||||
"config": config,
|
||||
"val_loss": val_metrics["average_loss"],
|
||||
}
|
||||
try:
|
||||
torch.save(checkpoint, checkpoint_file)
|
||||
logging.info(f"Checkpoint saved to {checkpoint_file}")
|
||||
except Exception as e:
|
||||
logging.error(f"Error saving checkpoint: {e}", exc_info=True)
|
||||
|
||||
# --- Final Metrics and Cleanup ---
|
||||
total_training_time = time.time() - train_time_start
|
||||
hours, remainder = divmod(total_training_time, 3600)
|
||||
minutes, seconds = divmod(remainder, 60)
|
||||
logging.info(f"Training completed in {int(hours)}h {int(minutes)}m {seconds:.2f}s")
|
||||
logging.info(f"Final model saved to {checkpoint_path}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Train a Mask R-CNN model")
|
||||
parser.add_argument("--config", required=True, help="Path to configuration file")
|
||||
parser.add_argument(
|
||||
"--resume", action="store_true", help="Resume training from latest checkpoint"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
main(args)
|
||||
0
utils/__init__.py
Normal file
0
utils/__init__.py
Normal file
185
utils/common.py
Normal file
185
utils/common.py
Normal file
@@ -0,0 +1,185 @@
|
||||
import importlib.util
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import sys
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
|
||||
def load_config(config_path):
|
||||
"""Load configuration from a Python file.
|
||||
|
||||
Args:
|
||||
config_path (str): Path to the configuration file.
|
||||
|
||||
Returns:
|
||||
dict: The loaded configuration dictionary.
|
||||
"""
|
||||
try:
|
||||
config_path = os.path.abspath(config_path)
|
||||
if not os.path.exists(config_path):
|
||||
print(f"Error: Config file not found at {config_path}")
|
||||
sys.exit(1)
|
||||
|
||||
# Derive module path from file path relative to workspace root
|
||||
workspace_root = os.path.abspath(os.getcwd())
|
||||
relative_path = os.path.relpath(config_path, workspace_root)
|
||||
if relative_path.startswith(".."):
|
||||
print(f"Error: Config file {config_path} is outside the project directory.")
|
||||
sys.exit(1)
|
||||
|
||||
module_path_no_ext, _ = os.path.splitext(relative_path)
|
||||
module_path_str = module_path_no_ext.replace(os.sep, ".")
|
||||
|
||||
print(f"Attempting to import config module: {module_path_str}")
|
||||
config_module = importlib.import_module(module_path_str)
|
||||
config = config_module.config
|
||||
|
||||
print(
|
||||
f"Loaded configuration from: {config_path} (via module {module_path_str})"
|
||||
)
|
||||
return config
|
||||
|
||||
except ImportError as e:
|
||||
print(f"Error importing config module '{module_path_str}': {e}")
|
||||
print(
|
||||
"Ensure the config file path is correct and relative imports within it are valid."
|
||||
)
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
except AttributeError as e:
|
||||
print(
|
||||
f"Error: Could not find 'config' dictionary in module {module_path_str}. {e}"
|
||||
)
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
print(f"Error loading configuration file {config_path}: {e}")
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def setup_environment(config):
|
||||
"""Set up the environment based on configuration.
|
||||
|
||||
Args:
|
||||
config (dict): Configuration dictionary.
|
||||
|
||||
Returns:
|
||||
tuple: (output_path, device) - the output directory path and torch device.
|
||||
"""
|
||||
# Setup output directory
|
||||
output_dir = config.get("output_dir", "outputs")
|
||||
config_name = config.get("config_name", "default_run")
|
||||
output_path = os.path.join(output_dir, config_name)
|
||||
os.makedirs(output_path, exist_ok=True)
|
||||
|
||||
# Set random seeds
|
||||
seed = config.get("seed", 42)
|
||||
random.seed(seed)
|
||||
np.random.seed(seed)
|
||||
torch.manual_seed(seed)
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.manual_seed_all(seed)
|
||||
logging.info(f"Set random seed to: {seed}")
|
||||
|
||||
# Setup device
|
||||
device_name = config.get("device", "cuda")
|
||||
if device_name == "cuda" and not torch.cuda.is_available():
|
||||
logging.warning("CUDA requested but not available, falling back to CPU.")
|
||||
device_name = "cpu"
|
||||
device = torch.device(device_name)
|
||||
logging.info(f"Using device: {device}")
|
||||
|
||||
return output_path, device
|
||||
|
||||
|
||||
def load_checkpoint(
|
||||
checkpoint_path,
|
||||
model,
|
||||
device,
|
||||
load_optimizer=False,
|
||||
optimizer=None,
|
||||
load_scheduler=False,
|
||||
scheduler=None,
|
||||
):
|
||||
"""Load a checkpoint into the model and optionally optimizer and scheduler.
|
||||
|
||||
Args:
|
||||
checkpoint_path (str): Path to the checkpoint file.
|
||||
model (torch.nn.Module): The model to load the weights into.
|
||||
device (torch.device): The device to load the checkpoint on.
|
||||
load_optimizer (bool): Whether to load optimizer state.
|
||||
optimizer (torch.optim.Optimizer, optional): The optimizer to load state into.
|
||||
load_scheduler (bool): Whether to load scheduler state.
|
||||
scheduler (torch.optim.lr_scheduler._LRScheduler, optional): The scheduler to load state into.
|
||||
|
||||
Returns:
|
||||
dict: The loaded checkpoint.
|
||||
int: The starting epoch (checkpoint epoch + 1).
|
||||
"""
|
||||
try:
|
||||
logging.info(f"Loading checkpoint from: {checkpoint_path}")
|
||||
checkpoint = torch.load(checkpoint_path, map_location=device)
|
||||
|
||||
# Handle potential DataParallel prefix
|
||||
state_dict = checkpoint.get("model_state_dict", checkpoint)
|
||||
if isinstance(state_dict, dict):
|
||||
# Handle case where model was trained with DataParallel
|
||||
if all(k.startswith("module.") for k in state_dict.keys()):
|
||||
logging.info(
|
||||
"Detected DataParallel checkpoint, removing 'module.' prefix"
|
||||
)
|
||||
state_dict = {
|
||||
k.replace("module.", ""): v for k, v in state_dict.items()
|
||||
}
|
||||
|
||||
model.load_state_dict(state_dict)
|
||||
logging.info("Model state loaded successfully")
|
||||
|
||||
# Load optimizer state if requested
|
||||
if (
|
||||
load_optimizer
|
||||
and optimizer is not None
|
||||
and "optimizer_state_dict" in checkpoint
|
||||
):
|
||||
optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
|
||||
logging.info("Optimizer state loaded successfully")
|
||||
|
||||
# Load scheduler state if requested
|
||||
if (
|
||||
load_scheduler
|
||||
and scheduler is not None
|
||||
and "scheduler_state_dict" in checkpoint
|
||||
):
|
||||
scheduler.load_state_dict(checkpoint["scheduler_state_dict"])
|
||||
logging.info("Scheduler state loaded successfully")
|
||||
|
||||
# Get the epoch number
|
||||
start_epoch = checkpoint.get("epoch", 0) + 1 if load_optimizer else 0
|
||||
if "epoch" in checkpoint:
|
||||
logging.info(f"Loaded checkpoint from epoch: {checkpoint['epoch']}")
|
||||
|
||||
return checkpoint, start_epoch
|
||||
else:
|
||||
logging.error("Checkpoint does not contain a valid state dictionary.")
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
logging.error(f"Error loading checkpoint: {e}", exc_info=True)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def check_data_path(data_root):
|
||||
"""Check if the data path exists and is valid.
|
||||
|
||||
Args:
|
||||
data_root (str): Path to the data directory.
|
||||
"""
|
||||
if not data_root or not os.path.isdir(data_root):
|
||||
logging.error(f"Data root directory not found or not specified: {data_root}")
|
||||
sys.exit(1)
|
||||
159
utils/data_utils.py
Normal file
159
utils/data_utils.py
Normal file
@@ -0,0 +1,159 @@
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.utils.data
|
||||
import torchvision.transforms.v2 as T
|
||||
from PIL import Image
|
||||
|
||||
|
||||
class PennFudanDataset(torch.utils.data.Dataset):
|
||||
"""Dataset class for the Penn-Fudan Pedestrian Detection dataset."""
|
||||
|
||||
def __init__(self, root, transforms):
|
||||
self.root = root
|
||||
self.transforms = transforms
|
||||
# Load all image files, sorting them to ensure alignment
|
||||
self.imgs = sorted(list(os.listdir(os.path.join(root, "PNGImages"))))
|
||||
self.masks = sorted(list(os.listdir(os.path.join(root, "PedMasks"))))
|
||||
|
||||
def __getitem__(self, idx):
|
||||
"""Get a sample from the dataset.
|
||||
|
||||
Args:
|
||||
idx (int): Index of the sample to retrieve.
|
||||
|
||||
Returns:
|
||||
tuple: (image, target) where target is a dictionary containing various object annotations.
|
||||
"""
|
||||
# Load image
|
||||
img_path = os.path.join(self.root, "PNGImages", self.imgs[idx])
|
||||
mask_path = os.path.join(self.root, "PedMasks", self.masks[idx])
|
||||
|
||||
# Use PIL to load images (more memory efficient)
|
||||
img = Image.open(img_path).convert("RGB")
|
||||
mask = Image.open(mask_path)
|
||||
|
||||
# Convert mask PIL image to numpy array
|
||||
mask = np.array(mask)
|
||||
|
||||
# Find all object instances (each instance has a unique value in the mask)
|
||||
# Value 0 is the background
|
||||
obj_ids = np.unique(mask)
|
||||
obj_ids = obj_ids[1:] # Remove background (id=0)
|
||||
|
||||
# Split the mask into binary masks for each object instance
|
||||
masks = mask == obj_ids[:, None, None]
|
||||
|
||||
# Get bounding box for each mask
|
||||
num_objs = len(obj_ids)
|
||||
boxes = []
|
||||
|
||||
for i in range(num_objs):
|
||||
pos = np.where(masks[i])
|
||||
if len(pos[0]) == 0 or len(pos[1]) == 0: # Skip empty masks
|
||||
continue
|
||||
|
||||
xmin = np.min(pos[1])
|
||||
xmax = np.max(pos[1])
|
||||
ymin = np.min(pos[0])
|
||||
ymax = np.max(pos[0])
|
||||
|
||||
# Skip boxes with zero area
|
||||
if xmax <= xmin or ymax <= ymin:
|
||||
continue
|
||||
|
||||
boxes.append([xmin, ymin, xmax, ymax])
|
||||
|
||||
# Convert everything to tensors
|
||||
if boxes:
|
||||
boxes = torch.as_tensor(boxes, dtype=torch.float32)
|
||||
labels = torch.ones(
|
||||
(len(boxes),), dtype=torch.int64
|
||||
) # All objects are pedestrians (class 1)
|
||||
masks = torch.as_tensor(masks, dtype=torch.uint8)
|
||||
|
||||
# Calculate area of each box
|
||||
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
|
||||
|
||||
# All instances are not crowd
|
||||
iscrowd = torch.zeros((len(boxes),), dtype=torch.uint8)
|
||||
|
||||
# Create the target dictionary
|
||||
target = {
|
||||
"boxes": boxes,
|
||||
"labels": labels,
|
||||
"masks": masks,
|
||||
"image_id": torch.tensor([idx]),
|
||||
"area": area,
|
||||
"iscrowd": iscrowd,
|
||||
}
|
||||
else:
|
||||
# Handle case with no valid objects (rare but possible)
|
||||
target = {
|
||||
"boxes": torch.zeros((0, 4), dtype=torch.float32),
|
||||
"labels": torch.zeros((0,), dtype=torch.int64),
|
||||
"masks": torch.zeros(
|
||||
(0, mask.shape[0], mask.shape[1]), dtype=torch.uint8
|
||||
),
|
||||
"image_id": torch.tensor([idx]),
|
||||
"area": torch.zeros((0,), dtype=torch.float32),
|
||||
"iscrowd": torch.zeros((0,), dtype=torch.uint8),
|
||||
}
|
||||
|
||||
# Apply transforms if provided
|
||||
if self.transforms is not None:
|
||||
img, target = self.transforms(img, target)
|
||||
|
||||
return img, target
|
||||
|
||||
def __len__(self):
|
||||
return len(self.imgs)
|
||||
|
||||
|
||||
# --- Utility Functions --- #
|
||||
|
||||
|
||||
def get_transform(train):
|
||||
"""Get the transformations for the dataset.
|
||||
|
||||
Args:
|
||||
train (bool): Whether to get transforms for training or evaluation.
|
||||
|
||||
Returns:
|
||||
torchvision.transforms.Compose: The composed transforms.
|
||||
"""
|
||||
transforms = []
|
||||
|
||||
# Convert to PyTorch tensor and normalize
|
||||
transforms.append(T.ToImage())
|
||||
|
||||
# Resize images to control memory usage
|
||||
# Use a smaller size for training (more memory-intensive due to gradients)
|
||||
if train:
|
||||
transforms.append(T.Resize(700))
|
||||
else:
|
||||
transforms.append(T.Resize(800)) # Can use larger size for eval
|
||||
|
||||
transforms.append(T.ToDtype(torch.float32, scale=True))
|
||||
|
||||
# Data augmentation for training
|
||||
if train:
|
||||
transforms.append(T.RandomHorizontalFlip(0.5))
|
||||
|
||||
return T.Compose(transforms)
|
||||
|
||||
|
||||
def collate_fn(batch):
|
||||
"""Custom collate function for object detection models.
|
||||
|
||||
It aggregates images into a list and targets into a list.
|
||||
Necessary because targets can have varying numbers of objects.
|
||||
|
||||
Args:
|
||||
batch (list): A list of (image, target) tuples.
|
||||
|
||||
Returns:
|
||||
tuple: A tuple containing a list of images and a list of targets.
|
||||
"""
|
||||
return tuple(zip(*batch))
|
||||
822
utils/eval_utils.py
Normal file
822
utils/eval_utils.py
Normal file
@@ -0,0 +1,822 @@
|
||||
import logging
|
||||
import time
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from torchvision.ops import box_iou
|
||||
|
||||
|
||||
def evaluate(model, data_loader, device, max_samples=None):
|
||||
"""Performs evaluation on the dataset for one epoch.
|
||||
|
||||
Args:
|
||||
model (torch.nn.Module): The model to evaluate.
|
||||
data_loader (torch.utils.data.DataLoader): DataLoader for the evaluation data.
|
||||
device (torch.device): The device to run evaluation on.
|
||||
max_samples (int, optional): Maximum number of batches to evaluate. If None, evaluate all.
|
||||
|
||||
Returns:
|
||||
dict: A dictionary containing evaluation metrics (e.g., average loss, mAP).
|
||||
"""
|
||||
model.eval() # Set model to evaluation mode
|
||||
total_loss = 0.0
|
||||
num_batches = len(data_loader)
|
||||
|
||||
# Limit evaluation samples if specified
|
||||
if max_samples is not None:
|
||||
num_batches = min(num_batches, max_samples)
|
||||
logging.info(f"Limiting evaluation to {num_batches} batches")
|
||||
|
||||
eval_start_time = time.time()
|
||||
status_interval = max(1, num_batches // 10) # Log status roughly 10 times
|
||||
|
||||
# Initialize metrics collection
|
||||
inference_times = []
|
||||
|
||||
# IoU thresholds for mAP calculation
|
||||
iou_thresholds = [0.5, 0.75, 0.5] # 0.5, 0.75, 0.5:0.95 (COCO standard)
|
||||
confidence_thresholds = [0.5, 0.75, 0.9] # Different confidence thresholds
|
||||
|
||||
# Initialize counters for metrics
|
||||
metric_accumulators = initialize_metric_accumulators(
|
||||
iou_thresholds, confidence_thresholds
|
||||
)
|
||||
|
||||
logging.info("--- Starting Evaluation --- ")
|
||||
|
||||
with torch.no_grad(): # Disable gradient calculations
|
||||
for i, (images, targets) in enumerate(data_loader):
|
||||
# Stop if we've reached the max samples
|
||||
if max_samples is not None and i >= max_samples:
|
||||
break
|
||||
|
||||
# Free cached memory
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
images = list(image.to(device) for image in images)
|
||||
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
|
||||
|
||||
# Measure inference time
|
||||
start_time = time.time()
|
||||
# Get predictions in eval mode
|
||||
predictions = model(images)
|
||||
inference_time = time.time() - start_time
|
||||
inference_times.append(inference_time)
|
||||
|
||||
# Process metrics on-the-fly for this batch only
|
||||
process_batch_metrics(
|
||||
predictions,
|
||||
targets,
|
||||
metric_accumulators,
|
||||
iou_thresholds,
|
||||
confidence_thresholds,
|
||||
)
|
||||
|
||||
# Compute losses (switch to train mode temporarily)
|
||||
model.train()
|
||||
loss_dict = model(images, targets)
|
||||
model.eval()
|
||||
|
||||
# Calculate total loss
|
||||
losses = sum(loss for loss in loss_dict.values())
|
||||
loss_value = losses.item()
|
||||
total_loss += loss_value
|
||||
|
||||
if (i + 1) % status_interval == 0:
|
||||
logging.info(f" Evaluated batch {i + 1}/{num_batches}")
|
||||
|
||||
# Explicitly clean up to help with memory
|
||||
del images, targets, predictions, loss_dict
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
# Calculate basic metrics
|
||||
avg_loss = total_loss / num_batches if num_batches > 0 else 0
|
||||
avg_inference_time = np.mean(inference_times) if inference_times else 0
|
||||
|
||||
# Calculate final metrics from accumulators
|
||||
metrics = {
|
||||
"average_loss": avg_loss,
|
||||
"average_inference_time": avg_inference_time,
|
||||
}
|
||||
|
||||
# Compute final metrics from accumulators
|
||||
metrics.update(finalize_metrics(metric_accumulators))
|
||||
|
||||
eval_duration = time.time() - eval_start_time
|
||||
|
||||
# Log results
|
||||
logging.info("--- Evaluation Finished ---")
|
||||
logging.info(f" Average Evaluation Loss: {avg_loss:.4f}")
|
||||
logging.info(f" Average Inference Time: {avg_inference_time:.4f}s per batch")
|
||||
|
||||
# Log detailed metrics
|
||||
for metric_name, metric_value in metrics.items():
|
||||
if metric_name != "average_loss": # Already logged
|
||||
if isinstance(metric_value, (int, float)):
|
||||
logging.info(f" {metric_name}: {metric_value:.4f}")
|
||||
else:
|
||||
logging.info(f" {metric_name}: {metric_value}")
|
||||
|
||||
logging.info(f" Evaluation Duration: {eval_duration:.2f}s")
|
||||
|
||||
return metrics
|
||||
|
||||
|
||||
def initialize_metric_accumulators(iou_thresholds, confidence_thresholds):
|
||||
"""Initialize accumulators for incremental metric calculation"""
|
||||
accumulators = {
|
||||
"total_gt": 0,
|
||||
"map_accumulators": {},
|
||||
"conf_accumulators": {},
|
||||
"size_accumulators": {
|
||||
"small_gt": 0,
|
||||
"medium_gt": 0,
|
||||
"large_gt": 0,
|
||||
"small_tp": 0,
|
||||
"medium_tp": 0,
|
||||
"large_tp": 0,
|
||||
"small_det": 0,
|
||||
"medium_det": 0,
|
||||
"large_det": 0,
|
||||
},
|
||||
}
|
||||
|
||||
# Initialize map accumulators for each IoU threshold
|
||||
for iou in iou_thresholds:
|
||||
accumulators["map_accumulators"][iou] = {
|
||||
"true_positives": 0,
|
||||
"false_positives": 0,
|
||||
"total_detections": 0,
|
||||
}
|
||||
|
||||
# Initialize confidence accumulators
|
||||
for conf in confidence_thresholds:
|
||||
accumulators["conf_accumulators"][conf] = {
|
||||
"true_positives": 0,
|
||||
"detections": 0,
|
||||
}
|
||||
|
||||
return accumulators
|
||||
|
||||
|
||||
def process_batch_metrics(
|
||||
predictions, targets, accumulators, iou_thresholds, confidence_thresholds
|
||||
):
|
||||
"""Process metrics for a single batch incrementally"""
|
||||
small_threshold = 32 * 32 # Small objects: area < 32²
|
||||
medium_threshold = 96 * 96 # Medium objects: 32² <= area < 96²
|
||||
|
||||
# Count total ground truth boxes in this batch
|
||||
batch_gt = sum(len(target["boxes"]) for target in targets)
|
||||
accumulators["total_gt"] += batch_gt
|
||||
|
||||
# Process all predictions in the batch
|
||||
for pred, target in zip(predictions, targets):
|
||||
pred_boxes = pred["boxes"]
|
||||
pred_scores = pred["scores"]
|
||||
pred_labels = pred["labels"]
|
||||
gt_boxes = target["boxes"]
|
||||
gt_labels = target["labels"]
|
||||
|
||||
# Skip if no predictions or no ground truth
|
||||
if len(pred_boxes) == 0 or len(gt_boxes) == 0:
|
||||
continue
|
||||
|
||||
# Calculate IoU between predictions and ground truth
|
||||
iou_matrix = box_iou(pred_boxes, gt_boxes)
|
||||
|
||||
# Process size-based metrics
|
||||
gt_areas = target.get("area", None)
|
||||
if gt_areas is None:
|
||||
# Calculate if not provided
|
||||
gt_areas = (gt_boxes[:, 2] - gt_boxes[:, 0]) * (
|
||||
gt_boxes[:, 3] - gt_boxes[:, 1]
|
||||
)
|
||||
|
||||
# Count ground truth by size
|
||||
small_mask_gt = gt_areas < small_threshold
|
||||
medium_mask_gt = (gt_areas >= small_threshold) & (gt_areas < medium_threshold)
|
||||
large_mask_gt = gt_areas >= medium_threshold
|
||||
|
||||
accumulators["size_accumulators"]["small_gt"] += torch.sum(small_mask_gt).item()
|
||||
accumulators["size_accumulators"]["medium_gt"] += torch.sum(
|
||||
medium_mask_gt
|
||||
).item()
|
||||
accumulators["size_accumulators"]["large_gt"] += torch.sum(large_mask_gt).item()
|
||||
|
||||
# Calculate areas for predictions
|
||||
pred_areas = (pred_boxes[:, 2] - pred_boxes[:, 0]) * (
|
||||
pred_boxes[:, 3] - pred_boxes[:, 1]
|
||||
)
|
||||
|
||||
# Count predictions by size (with confidence >= 0.5)
|
||||
conf_mask = pred_scores >= 0.5
|
||||
if torch.sum(conf_mask) == 0:
|
||||
continue # Skip if no predictions meet confidence threshold
|
||||
|
||||
small_mask = (pred_areas < small_threshold) & conf_mask
|
||||
medium_mask = (
|
||||
(pred_areas >= small_threshold)
|
||||
& (pred_areas < medium_threshold)
|
||||
& conf_mask
|
||||
)
|
||||
large_mask = (pred_areas >= medium_threshold) & conf_mask
|
||||
|
||||
accumulators["size_accumulators"]["small_det"] += torch.sum(small_mask).item()
|
||||
accumulators["size_accumulators"]["medium_det"] += torch.sum(medium_mask).item()
|
||||
accumulators["size_accumulators"]["large_det"] += torch.sum(large_mask).item()
|
||||
|
||||
# Process metrics for each IoU threshold
|
||||
for iou_threshold in iou_thresholds:
|
||||
process_iou_metrics(
|
||||
pred_boxes,
|
||||
pred_scores,
|
||||
pred_labels,
|
||||
gt_boxes,
|
||||
gt_labels,
|
||||
iou_matrix,
|
||||
accumulators["map_accumulators"][iou_threshold],
|
||||
iou_threshold,
|
||||
)
|
||||
|
||||
# Process metrics for each confidence threshold
|
||||
for conf_threshold in confidence_thresholds:
|
||||
process_confidence_metrics(
|
||||
pred_boxes,
|
||||
pred_scores,
|
||||
pred_labels,
|
||||
gt_boxes,
|
||||
gt_labels,
|
||||
iou_matrix,
|
||||
accumulators["conf_accumulators"][conf_threshold],
|
||||
conf_threshold,
|
||||
)
|
||||
|
||||
# Process size-based true positives with fixed IoU threshold of 0.5
|
||||
# Use a new gt_matched array to avoid interference with other metric calculations
|
||||
gt_matched = torch.zeros(len(gt_boxes), dtype=torch.bool)
|
||||
filtered_mask = pred_scores >= 0.5
|
||||
|
||||
if torch.sum(filtered_mask) > 0:
|
||||
filtered_boxes = pred_boxes[filtered_mask]
|
||||
filtered_scores = pred_scores[filtered_mask]
|
||||
filtered_labels = pred_labels[filtered_mask]
|
||||
# Recalculate IoU for filtered boxes
|
||||
filtered_iou_matrix = box_iou(filtered_boxes, gt_boxes)
|
||||
|
||||
# Sort predictions by confidence
|
||||
sorted_indices = torch.argsort(filtered_scores, descending=True)
|
||||
|
||||
for idx in sorted_indices:
|
||||
best_iou, best_gt_idx = torch.max(filtered_iou_matrix[idx], dim=0)
|
||||
|
||||
if best_iou >= 0.5 and not gt_matched[best_gt_idx]:
|
||||
if filtered_labels[idx] == gt_labels[best_gt_idx]:
|
||||
gt_matched[best_gt_idx] = True
|
||||
|
||||
# Categorize true positive by ground truth size (not prediction size)
|
||||
area = gt_areas[best_gt_idx].item()
|
||||
if area < small_threshold:
|
||||
accumulators["size_accumulators"]["small_tp"] += 1
|
||||
elif area < medium_threshold:
|
||||
accumulators["size_accumulators"]["medium_tp"] += 1
|
||||
else:
|
||||
accumulators["size_accumulators"]["large_tp"] += 1
|
||||
|
||||
|
||||
def process_iou_metrics(
|
||||
pred_boxes,
|
||||
pred_scores,
|
||||
pred_labels,
|
||||
gt_boxes,
|
||||
gt_labels,
|
||||
iou_matrix,
|
||||
accumulator,
|
||||
iou_threshold,
|
||||
):
|
||||
"""Process metrics for a specific IoU threshold"""
|
||||
# Apply a minimum confidence threshold of 0.05 for metrics
|
||||
min_conf_threshold = 0.05
|
||||
conf_mask = pred_scores >= min_conf_threshold
|
||||
|
||||
if torch.sum(conf_mask) == 0:
|
||||
return # Skip if no predictions after confidence filtering
|
||||
|
||||
# Filter predictions by confidence
|
||||
filtered_boxes = pred_boxes[conf_mask]
|
||||
filtered_scores = pred_scores[conf_mask]
|
||||
filtered_labels = pred_labels[conf_mask]
|
||||
|
||||
# Initialize array to track which gt boxes have been matched
|
||||
gt_matched = torch.zeros(len(gt_boxes), dtype=torch.bool)
|
||||
|
||||
# We may need a filtered IoU matrix if we're filtering predictions
|
||||
if len(filtered_boxes) < len(pred_boxes):
|
||||
# Recalculate IoU for filtered predictions
|
||||
filtered_iou_matrix = box_iou(filtered_boxes, gt_boxes)
|
||||
else:
|
||||
filtered_iou_matrix = iou_matrix
|
||||
|
||||
# Sort predictions by confidence score (high to low)
|
||||
sorted_indices = torch.argsort(filtered_scores, descending=True)
|
||||
|
||||
# True positives count for this batch
|
||||
batch_tp = 0
|
||||
|
||||
for idx in sorted_indices:
|
||||
# Find best matching ground truth box
|
||||
iou_values = filtered_iou_matrix[idx]
|
||||
|
||||
# Skip if no ground truth boxes
|
||||
if len(iou_values) == 0:
|
||||
# This is a false positive since there's no ground truth to match
|
||||
accumulator["false_positives"] += 1
|
||||
continue
|
||||
|
||||
best_iou, best_gt_idx = torch.max(iou_values, dim=0)
|
||||
|
||||
# Check if the prediction matches a ground truth box
|
||||
if (
|
||||
best_iou >= iou_threshold
|
||||
and not gt_matched[best_gt_idx]
|
||||
and filtered_labels[idx] == gt_labels[best_gt_idx]
|
||||
):
|
||||
batch_tp += 1
|
||||
gt_matched[best_gt_idx] = True
|
||||
else:
|
||||
accumulator["false_positives"] += 1
|
||||
|
||||
# Update true positives - Important: Don't artificially cap true positives here
|
||||
# Let finalize_metrics handle the capping to avoid recall underestimation during intermediate calculations
|
||||
accumulator["true_positives"] += batch_tp
|
||||
|
||||
# Count total detection (after confidence filtering)
|
||||
accumulator["total_detections"] += len(filtered_boxes)
|
||||
|
||||
|
||||
def process_confidence_metrics(
|
||||
pred_boxes,
|
||||
pred_scores,
|
||||
pred_labels,
|
||||
gt_boxes,
|
||||
gt_labels,
|
||||
iou_matrix,
|
||||
accumulator,
|
||||
conf_threshold,
|
||||
):
|
||||
"""Process metrics for a specific confidence threshold"""
|
||||
# Filter by confidence
|
||||
mask = pred_scores >= conf_threshold
|
||||
|
||||
if torch.sum(mask) == 0:
|
||||
return # Skip if no predictions after filtering
|
||||
|
||||
filtered_boxes = pred_boxes[mask]
|
||||
filtered_scores = pred_scores[mask]
|
||||
filtered_labels = pred_labels[mask]
|
||||
|
||||
accumulator["detections"] += len(filtered_boxes)
|
||||
|
||||
if len(filtered_boxes) == 0 or len(gt_boxes) == 0:
|
||||
return
|
||||
|
||||
# Calculate matches with fixed IoU threshold of 0.5
|
||||
gt_matched = torch.zeros(len(gt_boxes), dtype=torch.bool)
|
||||
|
||||
# We need to recalculate IoU for the filtered boxes
|
||||
filtered_iou_matrix = box_iou(filtered_boxes, gt_boxes)
|
||||
|
||||
# Sort by confidence for consistent ordering
|
||||
sorted_indices = torch.argsort(filtered_scores, descending=True)
|
||||
|
||||
for pred_idx in sorted_indices:
|
||||
best_iou, best_gt_idx = torch.max(filtered_iou_matrix[pred_idx], dim=0)
|
||||
if best_iou >= 0.5 and not gt_matched[best_gt_idx]:
|
||||
if filtered_labels[pred_idx] == gt_labels[best_gt_idx]:
|
||||
accumulator["true_positives"] += 1
|
||||
gt_matched[best_gt_idx] = True
|
||||
|
||||
|
||||
def finalize_metrics(accumulators):
|
||||
"""Calculate final metrics from accumulators"""
|
||||
metrics = {}
|
||||
total_gt = accumulators["total_gt"]
|
||||
|
||||
# Calculate mAP metrics
|
||||
for iou_threshold, map_acc in accumulators["map_accumulators"].items():
|
||||
true_positives = map_acc["true_positives"]
|
||||
false_positives = map_acc["false_positives"]
|
||||
|
||||
# Calculate metrics - Only cap true positives at the very end for final metrics
|
||||
# to prevent recall underestimation during intermediate calculations
|
||||
precision = true_positives / max(true_positives + false_positives, 1)
|
||||
recall = true_positives / max(total_gt, 1)
|
||||
|
||||
# Cap metrics for final reporting to ensure they're in valid range
|
||||
precision = min(1.0, precision)
|
||||
recall = min(1.0, recall)
|
||||
|
||||
f1_score = 2 * precision * recall / max(precision + recall, 1e-6)
|
||||
|
||||
# Simple average precision calculation (precision * recall)
|
||||
# This is a simplification; full AP calculation requires a PR curve
|
||||
ap = precision * recall
|
||||
|
||||
metrics.update(
|
||||
{
|
||||
f"mAP@{iou_threshold}": ap,
|
||||
f"precision@{iou_threshold}": precision,
|
||||
f"recall@{iou_threshold}": recall,
|
||||
f"f1_score@{iou_threshold}": f1_score,
|
||||
f"tp@{iou_threshold}": true_positives,
|
||||
f"fp@{iou_threshold}": false_positives,
|
||||
"gt_total": total_gt,
|
||||
}
|
||||
)
|
||||
|
||||
# Calculate confidence threshold metrics
|
||||
for conf_threshold, conf_acc in accumulators["conf_accumulators"].items():
|
||||
true_positives = conf_acc["true_positives"]
|
||||
detections = conf_acc["detections"]
|
||||
|
||||
# Calculate metrics without artificial capping to prevent recall underestimation
|
||||
precision = true_positives / max(detections, 1)
|
||||
recall = true_positives / max(total_gt, 1)
|
||||
|
||||
# Cap metrics for final reporting only
|
||||
precision = min(1.0, precision)
|
||||
recall = min(1.0, recall)
|
||||
|
||||
f1_score = 2 * precision * recall / max(precision + recall, 1e-6)
|
||||
|
||||
metrics.update(
|
||||
{
|
||||
f"precision@conf{conf_threshold}": precision,
|
||||
f"recall@conf{conf_threshold}": recall,
|
||||
f"f1_score@conf{conf_threshold}": f1_score,
|
||||
f"detections@conf{conf_threshold}": detections,
|
||||
f"tp@conf{conf_threshold}": true_positives,
|
||||
}
|
||||
)
|
||||
|
||||
# Calculate size metrics
|
||||
size_acc = accumulators["size_accumulators"]
|
||||
small_gt = size_acc["small_gt"]
|
||||
medium_gt = size_acc["medium_gt"]
|
||||
large_gt = size_acc["large_gt"]
|
||||
small_tp = size_acc["small_tp"]
|
||||
medium_tp = size_acc["medium_tp"]
|
||||
large_tp = size_acc["large_tp"]
|
||||
small_det = size_acc["small_det"]
|
||||
medium_det = size_acc["medium_det"]
|
||||
large_det = size_acc["large_det"]
|
||||
|
||||
# Calculate precision and recall without artificial capping
|
||||
small_precision = small_tp / max(small_det, 1) if small_det > 0 else 0
|
||||
small_recall = small_tp / max(small_gt, 1) if small_gt > 0 else 0
|
||||
|
||||
medium_precision = medium_tp / max(medium_det, 1) if medium_det > 0 else 0
|
||||
medium_recall = medium_tp / max(medium_gt, 1) if medium_gt > 0 else 0
|
||||
|
||||
large_precision = large_tp / max(large_det, 1) if large_det > 0 else 0
|
||||
large_recall = large_tp / max(large_gt, 1) if large_gt > 0 else 0
|
||||
|
||||
# Cap metrics for final reporting
|
||||
small_precision = min(1.0, small_precision)
|
||||
small_recall = min(1.0, small_recall)
|
||||
medium_precision = min(1.0, medium_precision)
|
||||
medium_recall = min(1.0, medium_recall)
|
||||
large_precision = min(1.0, large_precision)
|
||||
large_recall = min(1.0, large_recall)
|
||||
|
||||
metrics.update(
|
||||
{
|
||||
"small_precision": small_precision,
|
||||
"small_recall": small_recall,
|
||||
"small_count": small_gt,
|
||||
"small_tp": small_tp,
|
||||
"small_det": small_det,
|
||||
"medium_precision": medium_precision,
|
||||
"medium_recall": medium_recall,
|
||||
"medium_count": medium_gt,
|
||||
"medium_tp": medium_tp,
|
||||
"medium_det": medium_det,
|
||||
"large_precision": large_precision,
|
||||
"large_recall": large_recall,
|
||||
"large_count": large_gt,
|
||||
"large_tp": large_tp,
|
||||
"large_det": large_det,
|
||||
}
|
||||
)
|
||||
|
||||
return metrics
|
||||
|
||||
|
||||
def calculate_map(predictions, targets, iou_threshold=0.5):
|
||||
"""
|
||||
Calculate mean Average Precision (mAP) at a specific IoU threshold.
|
||||
|
||||
Args:
|
||||
predictions (list): List of prediction dictionaries
|
||||
targets (list): List of target dictionaries
|
||||
iou_threshold (float): IoU threshold for considering a detection as correct
|
||||
|
||||
Returns:
|
||||
dict: Dictionary with mAP, precision, recall and F1 score
|
||||
"""
|
||||
# Initialize counters
|
||||
total_gt = 0
|
||||
total_detections = 0
|
||||
true_positives = 0
|
||||
false_positives = 0
|
||||
|
||||
# Count total ground truth boxes
|
||||
for target in targets:
|
||||
total_gt += len(target["boxes"])
|
||||
|
||||
# Process all predictions
|
||||
for pred, target in zip(predictions, targets):
|
||||
pred_boxes = pred["boxes"]
|
||||
pred_scores = pred["scores"]
|
||||
pred_labels = pred["labels"]
|
||||
gt_boxes = target["boxes"]
|
||||
gt_labels = target["labels"]
|
||||
|
||||
# Skip if no predictions or no ground truth
|
||||
if len(pred_boxes) == 0 or len(gt_boxes) == 0:
|
||||
continue
|
||||
|
||||
# Calculate IoU between predictions and ground truth
|
||||
iou_matrix = box_iou(pred_boxes, gt_boxes)
|
||||
|
||||
# Initialize array to track which gt boxes have been matched
|
||||
gt_matched = torch.zeros(len(gt_boxes), dtype=torch.bool)
|
||||
|
||||
# Sort predictions by confidence score (high to low)
|
||||
sorted_indices = torch.argsort(pred_scores, descending=True)
|
||||
|
||||
# Count true positives and false positives
|
||||
for idx in sorted_indices:
|
||||
# Find best matching ground truth box
|
||||
iou_values = iou_matrix[idx]
|
||||
best_iou, best_gt_idx = torch.max(iou_values, dim=0)
|
||||
|
||||
# Check if the prediction matches a ground truth box
|
||||
if (
|
||||
best_iou >= iou_threshold
|
||||
and not gt_matched[best_gt_idx]
|
||||
and pred_labels[idx] == gt_labels[best_gt_idx]
|
||||
):
|
||||
true_positives += 1
|
||||
gt_matched[best_gt_idx] = True
|
||||
else:
|
||||
false_positives += 1
|
||||
|
||||
total_detections += len(pred_boxes)
|
||||
|
||||
# Calculate metrics
|
||||
precision = true_positives / max(true_positives + false_positives, 1)
|
||||
recall = true_positives / max(total_gt, 1)
|
||||
|
||||
# Cap metrics for final reporting
|
||||
precision = min(1.0, precision)
|
||||
recall = min(1.0, recall)
|
||||
|
||||
f1_score = 2 * precision * recall / max(precision + recall, 1e-6)
|
||||
|
||||
return {
|
||||
"mAP": precision * recall, # Simplified mAP calculation
|
||||
"precision": precision,
|
||||
"recall": recall,
|
||||
"f1_score": f1_score,
|
||||
"true_positives": true_positives,
|
||||
"false_positives": false_positives,
|
||||
"total_gt": total_gt,
|
||||
"total_detections": total_detections,
|
||||
}
|
||||
|
||||
|
||||
def calculate_metrics_at_confidence(predictions, targets, confidence_threshold=0.5):
|
||||
"""
|
||||
Calculate detection metrics at a specific confidence threshold.
|
||||
|
||||
Args:
|
||||
predictions (list): List of prediction dictionaries
|
||||
targets (list): List of target dictionaries
|
||||
confidence_threshold (float): Confidence threshold to filter predictions
|
||||
|
||||
Returns:
|
||||
dict: Dictionary with precision, recall, F1 score and detection count
|
||||
"""
|
||||
# Initialize counters
|
||||
total_gt = 0
|
||||
detections = 0
|
||||
true_positives = 0
|
||||
|
||||
# Count total ground truth boxes
|
||||
for target in targets:
|
||||
total_gt += len(target["boxes"])
|
||||
|
||||
# Process all predictions with confidence filter
|
||||
for pred, target in zip(predictions, targets):
|
||||
# Filter predictions by confidence threshold
|
||||
mask = pred["scores"] >= confidence_threshold
|
||||
filtered_boxes = pred["boxes"][mask]
|
||||
filtered_labels = pred["labels"][mask] if len(mask) > 0 else []
|
||||
|
||||
detections += len(filtered_boxes)
|
||||
|
||||
# Skip if no predictions after filtering
|
||||
if len(filtered_boxes) == 0:
|
||||
continue
|
||||
|
||||
# Calculate IoU with ground truth
|
||||
gt_boxes = target["boxes"]
|
||||
gt_labels = target["labels"]
|
||||
|
||||
# Skip if no ground truth
|
||||
if len(gt_boxes) == 0:
|
||||
continue
|
||||
|
||||
iou_matrix = box_iou(filtered_boxes, gt_boxes)
|
||||
|
||||
# Initialize array to track which gt boxes have been matched
|
||||
gt_matched = torch.zeros(len(gt_boxes), dtype=torch.bool)
|
||||
|
||||
# Find matches based on IoU threshold of 0.5
|
||||
for pred_idx in range(len(filtered_boxes)):
|
||||
best_iou, best_gt_idx = torch.max(iou_matrix[pred_idx], dim=0)
|
||||
if best_iou >= 0.5 and not gt_matched[best_gt_idx]:
|
||||
if (
|
||||
len(filtered_labels) > 0
|
||||
and filtered_labels[pred_idx] == gt_labels[best_gt_idx]
|
||||
):
|
||||
true_positives += 1
|
||||
gt_matched[best_gt_idx] = True
|
||||
|
||||
# Calculate metrics
|
||||
precision = true_positives / max(detections, 1)
|
||||
recall = true_positives / max(total_gt, 1)
|
||||
|
||||
# Cap metrics for final reporting
|
||||
precision = min(1.0, precision)
|
||||
recall = min(1.0, recall)
|
||||
|
||||
f1_score = 2 * precision * recall / max(precision + recall, 1e-6)
|
||||
|
||||
return {
|
||||
"precision": precision,
|
||||
"recall": recall,
|
||||
"f1_score": f1_score,
|
||||
"detections": detections,
|
||||
"true_positives": true_positives,
|
||||
}
|
||||
|
||||
|
||||
def calculate_size_based_metrics(predictions, targets):
|
||||
"""
|
||||
Calculate detection performance by object size.
|
||||
|
||||
Args:
|
||||
predictions (list): List of prediction dictionaries
|
||||
targets (list): List of target dictionaries
|
||||
|
||||
Returns:
|
||||
dict: Dictionary with size-based metrics
|
||||
"""
|
||||
# Define size categories (in pixels²)
|
||||
small_threshold = 32 * 32 # Small objects: area < 32²
|
||||
medium_threshold = 96 * 96 # Medium objects: 32² <= area < 96²
|
||||
# Large objects: area >= 96²
|
||||
|
||||
# Initialize counters for each size category
|
||||
size_metrics = {
|
||||
"small_recall": 0,
|
||||
"small_precision": 0,
|
||||
"small_count": 0,
|
||||
"medium_recall": 0,
|
||||
"medium_precision": 0,
|
||||
"medium_count": 0,
|
||||
"large_recall": 0,
|
||||
"large_precision": 0,
|
||||
"large_count": 0,
|
||||
}
|
||||
|
||||
# Count by size
|
||||
small_gt, medium_gt, large_gt = 0, 0, 0
|
||||
small_tp, medium_tp, large_tp = 0, 0, 0
|
||||
small_det, medium_det, large_det = 0, 0, 0
|
||||
|
||||
# Process all predictions
|
||||
for pred, target in zip(predictions, targets):
|
||||
pred_boxes = pred["boxes"]
|
||||
pred_scores = pred["scores"]
|
||||
gt_boxes = target["boxes"]
|
||||
|
||||
# Skip if no predictions or no ground truth
|
||||
if len(pred_boxes) == 0 or len(gt_boxes) == 0:
|
||||
continue
|
||||
|
||||
# Calculate areas for ground truth
|
||||
gt_areas = target.get("area", None)
|
||||
if gt_areas is None:
|
||||
# Calculate if not provided
|
||||
gt_areas = (gt_boxes[:, 2] - gt_boxes[:, 0]) * (
|
||||
gt_boxes[:, 3] - gt_boxes[:, 1]
|
||||
)
|
||||
|
||||
# Count ground truth by size
|
||||
small_gt += torch.sum((gt_areas < small_threshold)).item()
|
||||
medium_gt += torch.sum(
|
||||
(gt_areas >= small_threshold) & (gt_areas < medium_threshold)
|
||||
).item()
|
||||
large_gt += torch.sum((gt_areas >= medium_threshold)).item()
|
||||
|
||||
# Calculate areas for predictions
|
||||
pred_areas = (pred_boxes[:, 2] - pred_boxes[:, 0]) * (
|
||||
pred_boxes[:, 3] - pred_boxes[:, 1]
|
||||
)
|
||||
|
||||
# Count predictions by size (with confidence >= 0.5)
|
||||
conf_mask = pred_scores >= 0.5
|
||||
small_mask = (pred_areas < small_threshold) & conf_mask
|
||||
medium_mask = (
|
||||
(pred_areas >= small_threshold)
|
||||
& (pred_areas < medium_threshold)
|
||||
& conf_mask
|
||||
)
|
||||
large_mask = (pred_areas >= medium_threshold) & conf_mask
|
||||
|
||||
small_det += torch.sum(small_mask).item()
|
||||
medium_det += torch.sum(medium_mask).item()
|
||||
large_det += torch.sum(large_mask).item()
|
||||
|
||||
# Calculate IoU between predictions and ground truth
|
||||
iou_matrix = box_iou(pred_boxes, gt_boxes)
|
||||
|
||||
# Initialize array to track which gt boxes have been matched
|
||||
gt_matched = torch.zeros(len(gt_boxes), dtype=torch.bool)
|
||||
|
||||
# Sort predictions by confidence score (high to low)
|
||||
sorted_indices = torch.argsort(pred_scores, descending=True)
|
||||
|
||||
# Count true positives by size
|
||||
for idx in sorted_indices:
|
||||
if pred_scores[idx] < 0.5: # Skip low confidence detections
|
||||
continue
|
||||
|
||||
# Find best matching ground truth box
|
||||
best_iou, best_gt_idx = torch.max(iou_matrix[idx], dim=0)
|
||||
|
||||
# Check if the prediction matches a ground truth box with IoU >= 0.5
|
||||
if best_iou >= 0.5 and not gt_matched[best_gt_idx]:
|
||||
gt_matched[best_gt_idx] = True
|
||||
|
||||
# Categorize true positive by size
|
||||
area = gt_areas[best_gt_idx].item()
|
||||
if area < small_threshold:
|
||||
small_tp += 1
|
||||
elif area < medium_threshold:
|
||||
medium_tp += 1
|
||||
else:
|
||||
large_tp += 1
|
||||
|
||||
# Calculate metrics for each size category
|
||||
size_metrics["small_precision"] = small_tp / max(small_det, 1)
|
||||
size_metrics["small_recall"] = small_tp / max(small_gt, 1)
|
||||
size_metrics["small_count"] = small_gt
|
||||
|
||||
size_metrics["medium_precision"] = medium_tp / max(medium_det, 1)
|
||||
size_metrics["medium_recall"] = medium_tp / max(medium_gt, 1)
|
||||
size_metrics["medium_count"] = medium_gt
|
||||
|
||||
size_metrics["large_precision"] = large_tp / max(large_det, 1)
|
||||
size_metrics["large_recall"] = large_tp / max(large_gt, 1)
|
||||
size_metrics["large_count"] = large_gt
|
||||
|
||||
# Cap metrics for final reporting
|
||||
size_metrics["small_precision"] = min(1.0, size_metrics["small_precision"])
|
||||
size_metrics["small_recall"] = min(1.0, size_metrics["small_recall"])
|
||||
size_metrics["medium_precision"] = min(1.0, size_metrics["medium_precision"])
|
||||
size_metrics["medium_recall"] = min(1.0, size_metrics["medium_recall"])
|
||||
size_metrics["large_precision"] = min(1.0, size_metrics["large_precision"])
|
||||
size_metrics["large_recall"] = min(1.0, size_metrics["large_recall"])
|
||||
|
||||
return size_metrics
|
||||
|
||||
|
||||
# Example usage (can be removed or kept for testing):
|
||||
if __name__ == "__main__":
|
||||
# This is a dummy test and requires a model, dataloader, device
|
||||
print(
|
||||
"This script contains the evaluate function and cannot be run directly for testing without setup."
|
||||
)
|
||||
# Example:
|
||||
# device = torch.device('cpu')
|
||||
# # Create dummy model and dataloader
|
||||
# model = ...
|
||||
# data_loader = ...
|
||||
# metrics = evaluate(model, data_loader, device)
|
||||
# print(f"Dummy evaluation metrics: {metrics}")
|
||||
44
utils/log_utils.py
Normal file
44
utils/log_utils.py
Normal file
@@ -0,0 +1,44 @@
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
def setup_logging(log_dir, config_name):
|
||||
"""Configures logging to output to both file and console.
|
||||
|
||||
Args:
|
||||
log_dir (str): The directory where the log file should be saved.
|
||||
config_name (str): The name of the configuration run, used for the log filename.
|
||||
"""
|
||||
# Ensure log directory exists
|
||||
os.makedirs(log_dir, exist_ok=True)
|
||||
|
||||
log_filename = f"{config_name}_train.log"
|
||||
log_filepath = os.path.join(log_dir, log_filename)
|
||||
|
||||
# Configure the root logger
|
||||
logging.basicConfig(
|
||||
level=logging.INFO, # Log INFO level and above (INFO, WARNING, ERROR, CRITICAL)
|
||||
format="%(asctime)s [%(levelname)s] %(message)s",
|
||||
datefmt="%Y-%m-%d %H:%M:%S",
|
||||
handlers=[
|
||||
logging.FileHandler(log_filepath), # Log to a file
|
||||
logging.StreamHandler(sys.stdout), # Log to the console (stdout)
|
||||
],
|
||||
# Force=True ensures that if basicConfig was called before (e.g., by a library),
|
||||
# this configuration will overwrite it. Use with caution if libraries might
|
||||
# configure logging themselves in complex ways.
|
||||
force=True,
|
||||
)
|
||||
|
||||
logging.info(f"Logging configured. Log file: {log_filepath}")
|
||||
|
||||
|
||||
# Example usage (can be removed or kept for testing):
|
||||
if __name__ == "__main__":
|
||||
print("Testing logging setup...")
|
||||
setup_logging("temp_logs", "test_config")
|
||||
logging.info("This is an info message.")
|
||||
logging.warning("This is a warning message.")
|
||||
logging.error("This is an error message.")
|
||||
print("Check 'temp_logs/test_config_train.log' and console output.")
|
||||
828
uv.lock
generated
Normal file
828
uv.lock
generated
Normal file
@@ -0,0 +1,828 @@
|
||||
version = 1
|
||||
revision = 1
|
||||
requires-python = ">=3.12"
|
||||
resolution-markers = [
|
||||
"sys_platform == 'linux'",
|
||||
"sys_platform != 'linux'",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cfgv"
|
||||
version = "3.4.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/11/74/539e56497d9bd1d484fd863dd69cbbfa653cd2aa27abfe35653494d85e94/cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560", size = 7114 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/c5/55/51844dd50c4fc7a33b653bfaba4c2456f06955289ca770a5dbd5fd267374/cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9", size = 7249 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "colorama"
|
||||
version = "0.4.6"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "contourpy"
|
||||
version = "1.3.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "numpy" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/25/c2/fc7193cc5383637ff390a712e88e4ded0452c9fbcf84abe3de5ea3df1866/contourpy-1.3.1.tar.gz", hash = "sha256:dfd97abd83335045a913e3bcc4a09c0ceadbe66580cf573fe961f4a825efa699", size = 13465753 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/37/6b/175f60227d3e7f5f1549fcb374592be311293132207e451c3d7c654c25fb/contourpy-1.3.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:0ffa84be8e0bd33410b17189f7164c3589c229ce5db85798076a3fa136d0e509", size = 271494 },
|
||||
{ url = "https://files.pythonhosted.org/packages/6b/6a/7833cfae2c1e63d1d8875a50fd23371394f540ce809d7383550681a1fa64/contourpy-1.3.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:805617228ba7e2cbbfb6c503858e626ab528ac2a32a04a2fe88ffaf6b02c32bc", size = 255444 },
|
||||
{ url = "https://files.pythonhosted.org/packages/7f/b3/7859efce66eaca5c14ba7619791b084ed02d868d76b928ff56890d2d059d/contourpy-1.3.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ade08d343436a94e633db932e7e8407fe7de8083967962b46bdfc1b0ced39454", size = 307628 },
|
||||
{ url = "https://files.pythonhosted.org/packages/48/b2/011415f5e3f0a50b1e285a0bf78eb5d92a4df000553570f0851b6e309076/contourpy-1.3.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:47734d7073fb4590b4a40122b35917cd77be5722d80683b249dac1de266aac80", size = 347271 },
|
||||
{ url = "https://files.pythonhosted.org/packages/84/7d/ef19b1db0f45b151ac78c65127235239a8cf21a59d1ce8507ce03e89a30b/contourpy-1.3.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2ba94a401342fc0f8b948e57d977557fbf4d515f03c67682dd5c6191cb2d16ec", size = 318906 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ba/99/6794142b90b853a9155316c8f470d2e4821fe6f086b03e372aca848227dd/contourpy-1.3.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efa874e87e4a647fd2e4f514d5e91c7d493697127beb95e77d2f7561f6905bd9", size = 323622 },
|
||||
{ url = "https://files.pythonhosted.org/packages/3c/0f/37d2c84a900cd8eb54e105f4fa9aebd275e14e266736778bb5dccbf3bbbb/contourpy-1.3.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1bf98051f1045b15c87868dbaea84f92408337d4f81d0e449ee41920ea121d3b", size = 1266699 },
|
||||
{ url = "https://files.pythonhosted.org/packages/3a/8a/deb5e11dc7d9cc8f0f9c8b29d4f062203f3af230ba83c30a6b161a6effc9/contourpy-1.3.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:61332c87493b00091423e747ea78200659dc09bdf7fd69edd5e98cef5d3e9a8d", size = 1326395 },
|
||||
{ url = "https://files.pythonhosted.org/packages/1a/35/7e267ae7c13aaf12322ccc493531f1e7f2eb8fba2927b9d7a05ff615df7a/contourpy-1.3.1-cp312-cp312-win32.whl", hash = "sha256:e914a8cb05ce5c809dd0fe350cfbb4e881bde5e2a38dc04e3afe1b3e58bd158e", size = 175354 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a1/35/c2de8823211d07e8a79ab018ef03960716c5dff6f4d5bff5af87fd682992/contourpy-1.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:08d9d449a61cf53033612cb368f3a1b26cd7835d9b8cd326647efe43bca7568d", size = 220971 },
|
||||
{ url = "https://files.pythonhosted.org/packages/9a/e7/de62050dce687c5e96f946a93546910bc67e483fe05324439e329ff36105/contourpy-1.3.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a761d9ccfc5e2ecd1bf05534eda382aa14c3e4f9205ba5b1684ecfe400716ef2", size = 271548 },
|
||||
{ url = "https://files.pythonhosted.org/packages/78/4d/c2a09ae014ae984c6bdd29c11e74d3121b25eaa117eca0bb76340efd7e1c/contourpy-1.3.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:523a8ee12edfa36f6d2a49407f705a6ef4c5098de4f498619787e272de93f2d5", size = 255576 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ab/8a/915380ee96a5638bda80cd061ccb8e666bfdccea38d5741cb69e6dbd61fc/contourpy-1.3.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece6df05e2c41bd46776fbc712e0996f7c94e0d0543af1656956d150c4ca7c81", size = 306635 },
|
||||
{ url = "https://files.pythonhosted.org/packages/29/5c/c83ce09375428298acd4e6582aeb68b1e0d1447f877fa993d9bf6cd3b0a0/contourpy-1.3.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:573abb30e0e05bf31ed067d2f82500ecfdaec15627a59d63ea2d95714790f5c2", size = 345925 },
|
||||
{ url = "https://files.pythonhosted.org/packages/29/63/5b52f4a15e80c66c8078a641a3bfacd6e07106835682454647aca1afc852/contourpy-1.3.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a9fa36448e6a3a1a9a2ba23c02012c43ed88905ec80163f2ffe2421c7192a5d7", size = 318000 },
|
||||
{ url = "https://files.pythonhosted.org/packages/9a/e2/30ca086c692691129849198659bf0556d72a757fe2769eb9620a27169296/contourpy-1.3.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ea9924d28fc5586bf0b42d15f590b10c224117e74409dd7a0be3b62b74a501c", size = 322689 },
|
||||
{ url = "https://files.pythonhosted.org/packages/6b/77/f37812ef700f1f185d348394debf33f22d531e714cf6a35d13d68a7003c7/contourpy-1.3.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5b75aa69cb4d6f137b36f7eb2ace9280cfb60c55dc5f61c731fdf6f037f958a3", size = 1268413 },
|
||||
{ url = "https://files.pythonhosted.org/packages/3f/6d/ce84e79cdd128542ebeb268f84abb4b093af78e7f8ec504676673d2675bc/contourpy-1.3.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:041b640d4ec01922083645a94bb3b2e777e6b626788f4095cf21abbe266413c1", size = 1326530 },
|
||||
{ url = "https://files.pythonhosted.org/packages/72/22/8282f4eae20c73c89bee7a82a19c4e27af9b57bb602ecaa00713d5bdb54d/contourpy-1.3.1-cp313-cp313-win32.whl", hash = "sha256:36987a15e8ace5f58d4d5da9dca82d498c2bbb28dff6e5d04fbfcc35a9cb3a82", size = 175315 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e3/d5/28bca491f65312b438fbf076589dcde7f6f966b196d900777f5811b9c4e2/contourpy-1.3.1-cp313-cp313-win_amd64.whl", hash = "sha256:a7895f46d47671fa7ceec40f31fae721da51ad34bdca0bee83e38870b1f47ffd", size = 220987 },
|
||||
{ url = "https://files.pythonhosted.org/packages/2f/24/a4b285d6adaaf9746e4700932f579f1a7b6f9681109f694cfa233ae75c4e/contourpy-1.3.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:9ddeb796389dadcd884c7eb07bd14ef12408aaae358f0e2ae24114d797eede30", size = 285001 },
|
||||
{ url = "https://files.pythonhosted.org/packages/48/1d/fb49a401b5ca4f06ccf467cd6c4f1fd65767e63c21322b29b04ec40b40b9/contourpy-1.3.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:19c1555a6801c2f084c7ddc1c6e11f02eb6a6016ca1318dd5452ba3f613a1751", size = 268553 },
|
||||
{ url = "https://files.pythonhosted.org/packages/79/1e/4aef9470d13fd029087388fae750dccb49a50c012a6c8d1d634295caa644/contourpy-1.3.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:841ad858cff65c2c04bf93875e384ccb82b654574a6d7f30453a04f04af71342", size = 310386 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b0/34/910dc706ed70153b60392b5305c708c9810d425bde12499c9184a1100888/contourpy-1.3.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4318af1c925fb9a4fb190559ef3eec206845f63e80fb603d47f2d6d67683901c", size = 349806 },
|
||||
{ url = "https://files.pythonhosted.org/packages/31/3c/faee6a40d66d7f2a87f7102236bf4780c57990dd7f98e5ff29881b1b1344/contourpy-1.3.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:14c102b0eab282427b662cb590f2e9340a9d91a1c297f48729431f2dcd16e14f", size = 321108 },
|
||||
{ url = "https://files.pythonhosted.org/packages/17/69/390dc9b20dd4bb20585651d7316cc3054b7d4a7b4f8b710b2b698e08968d/contourpy-1.3.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05e806338bfeaa006acbdeba0ad681a10be63b26e1b17317bfac3c5d98f36cda", size = 327291 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ef/74/7030b67c4e941fe1e5424a3d988080e83568030ce0355f7c9fc556455b01/contourpy-1.3.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4d76d5993a34ef3df5181ba3c92fabb93f1eaa5729504fb03423fcd9f3177242", size = 1263752 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f0/ed/92d86f183a8615f13f6b9cbfc5d4298a509d6ce433432e21da838b4b63f4/contourpy-1.3.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:89785bb2a1980c1bd87f0cb1517a71cde374776a5f150936b82580ae6ead44a1", size = 1318403 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b3/0e/c8e4950c77dcfc897c71d61e56690a0a9df39543d2164040301b5df8e67b/contourpy-1.3.1-cp313-cp313t-win32.whl", hash = "sha256:8eb96e79b9f3dcadbad2a3891672f81cdcab7f95b27f28f1c67d75f045b6b4f1", size = 185117 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c1/31/1ae946f11dfbd229222e6d6ad8e7bd1891d3d48bde5fbf7a0beb9491f8e3/contourpy-1.3.1-cp313-cp313t-win_amd64.whl", hash = "sha256:287ccc248c9e0d0566934e7d606201abd74761b5703d804ff3df8935f523d546", size = 236668 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cycler"
|
||||
version = "0.12.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/a9/95/a3dbbb5028f35eafb79008e7522a75244477d2838f38cbb722248dabc2a8/cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c", size = 7615 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/e7/05/c19819d5e3d95294a6f5947fb9b9629efb316b96de511b418c53d245aae6/cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30", size = 8321 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "distlib"
|
||||
version = "0.3.9"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/0d/dd/1bec4c5ddb504ca60fc29472f3d27e8d4da1257a854e1d96742f15c1d02d/distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403", size = 613923 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/91/a1/cf2472db20f7ce4a6be1253a81cfdf85ad9c7885ffbed7047fb72c24cf87/distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87", size = 468973 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "filelock"
|
||||
version = "3.18.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/0a/10/c23352565a6544bdc5353e0b15fc1c563352101f30e24bf500207a54df9a/filelock-3.18.0.tar.gz", hash = "sha256:adbc88eabb99d2fec8c9c1b229b171f18afa655400173ddc653d5d01501fb9f2", size = 18075 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/4d/36/2a115987e2d8c300a974597416d9de88f2444426de9571f4b59b2cca3acc/filelock-3.18.0-py3-none-any.whl", hash = "sha256:c401f4f8377c4464e6db25fff06205fd89bdd83b65eb0488ed1b160f780e21de", size = 16215 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fonttools"
|
||||
version = "4.57.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/03/2d/a9a0b6e3a0cf6bd502e64fc16d894269011930cabfc89aee20d1635b1441/fonttools-4.57.0.tar.gz", hash = "sha256:727ece10e065be2f9dd239d15dd5d60a66e17eac11aea47d447f9f03fdbc42de", size = 3492448 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/cb/98/d4bc42d43392982eecaaca117d79845734d675219680cd43070bb001bc1f/fonttools-4.57.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:889e45e976c74abc7256d3064aa7c1295aa283c6bb19810b9f8b604dfe5c7f31", size = 2751824 },
|
||||
{ url = "https://files.pythonhosted.org/packages/1a/62/7168030eeca3742fecf45f31e63b5ef48969fa230a672216b805f1d61548/fonttools-4.57.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:0425c2e052a5f1516c94e5855dbda706ae5a768631e9fcc34e57d074d1b65b92", size = 2283072 },
|
||||
{ url = "https://files.pythonhosted.org/packages/5d/82/121a26d9646f0986ddb35fbbaf58ef791c25b59ecb63ffea2aab0099044f/fonttools-4.57.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44c26a311be2ac130f40a96769264809d3b0cb297518669db437d1cc82974888", size = 4788020 },
|
||||
{ url = "https://files.pythonhosted.org/packages/5b/26/e0f2fb662e022d565bbe280a3cfe6dafdaabf58889ff86fdef2d31ff1dde/fonttools-4.57.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84c41ba992df5b8d680b89fd84c6a1f2aca2b9f1ae8a67400c8930cd4ea115f6", size = 4859096 },
|
||||
{ url = "https://files.pythonhosted.org/packages/9e/44/9075e323347b1891cdece4b3f10a3b84a8f4c42a7684077429d9ce842056/fonttools-4.57.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ea1e9e43ca56b0c12440a7c689b1350066595bebcaa83baad05b8b2675129d98", size = 4964356 },
|
||||
{ url = "https://files.pythonhosted.org/packages/48/28/caa8df32743462fb966be6de6a79d7f30393859636d7732e82efa09fbbb4/fonttools-4.57.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:84fd56c78d431606332a0627c16e2a63d243d0d8b05521257d77c6529abe14d8", size = 5226546 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f6/46/95ab0f0d2e33c5b1a4fc1c0efe5e286ba9359602c0a9907adb1faca44175/fonttools-4.57.0-cp312-cp312-win32.whl", hash = "sha256:f4376819c1c778d59e0a31db5dc6ede854e9edf28bbfa5b756604727f7f800ac", size = 2146776 },
|
||||
{ url = "https://files.pythonhosted.org/packages/06/5d/1be5424bb305880e1113631f49a55ea7c7da3a5fe02608ca7c16a03a21da/fonttools-4.57.0-cp312-cp312-win_amd64.whl", hash = "sha256:57e30241524879ea10cdf79c737037221f77cc126a8cdc8ff2c94d4a522504b9", size = 2193956 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e9/2f/11439f3af51e4bb75ac9598c29f8601aa501902dcedf034bdc41f47dd799/fonttools-4.57.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:408ce299696012d503b714778d89aa476f032414ae57e57b42e4b92363e0b8ef", size = 2739175 },
|
||||
{ url = "https://files.pythonhosted.org/packages/25/52/677b55a4c0972dc3820c8dba20a29c358197a78229daa2ea219fdb19e5d5/fonttools-4.57.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:bbceffc80aa02d9e8b99f2a7491ed8c4a783b2fc4020119dc405ca14fb5c758c", size = 2276583 },
|
||||
{ url = "https://files.pythonhosted.org/packages/64/79/184555f8fa77b827b9460a4acdbbc0b5952bb6915332b84c615c3a236826/fonttools-4.57.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f022601f3ee9e1f6658ed6d184ce27fa5216cee5b82d279e0f0bde5deebece72", size = 4766437 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f8/ad/c25116352f456c0d1287545a7aa24e98987b6d99c5b0456c4bd14321f20f/fonttools-4.57.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4dea5893b58d4637ffa925536462ba626f8a1b9ffbe2f5c272cdf2c6ebadb817", size = 4838431 },
|
||||
{ url = "https://files.pythonhosted.org/packages/53/ae/398b2a833897297797a44f519c9af911c2136eb7aa27d3f1352c6d1129fa/fonttools-4.57.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:dff02c5c8423a657c550b48231d0a48d7e2b2e131088e55983cfe74ccc2c7cc9", size = 4951011 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b7/5d/7cb31c4bc9ffb9a2bbe8b08f8f53bad94aeb158efad75da645b40b62cb73/fonttools-4.57.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:767604f244dc17c68d3e2dbf98e038d11a18abc078f2d0f84b6c24571d9c0b13", size = 5205679 },
|
||||
{ url = "https://files.pythonhosted.org/packages/4c/e4/6934513ec2c4d3d69ca1bc3bd34d5c69dafcbf68c15388dd3bb062daf345/fonttools-4.57.0-cp313-cp313-win32.whl", hash = "sha256:8e2e12d0d862f43d51e5afb8b9751c77e6bec7d2dc00aad80641364e9df5b199", size = 2144833 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c4/0d/2177b7fdd23d017bcfb702fd41e47d4573766b9114da2fddbac20dcc4957/fonttools-4.57.0-cp313-cp313-win_amd64.whl", hash = "sha256:f1d6bc9c23356908db712d282acb3eebd4ae5ec6d8b696aa40342b1d84f8e9e3", size = 2190799 },
|
||||
{ url = "https://files.pythonhosted.org/packages/90/27/45f8957c3132917f91aaa56b700bcfc2396be1253f685bd5c68529b6f610/fonttools-4.57.0-py3-none-any.whl", hash = "sha256:3122c604a675513c68bd24c6a8f9091f1c2376d18e8f5fe5a101746c81b3e98f", size = 1093605 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fsspec"
|
||||
version = "2025.3.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/45/d8/8425e6ba5fcec61a1d16e41b1b71d2bf9344f1fe48012c2b48b9620feae5/fsspec-2025.3.2.tar.gz", hash = "sha256:e52c77ef398680bbd6a98c0e628fbc469491282981209907bbc8aea76a04fdc6", size = 299281 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/44/4b/e0cfc1a6f17e990f3e64b7d941ddc4acdc7b19d6edd51abf495f32b1a9e4/fsspec-2025.3.2-py3-none-any.whl", hash = "sha256:2daf8dc3d1dfa65b6aa37748d112773a7a08416f6c70d96b264c96476ecaf711", size = 194435 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "identify"
|
||||
version = "2.6.9"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/9b/98/a71ab060daec766acc30fb47dfca219d03de34a70d616a79a38c6066c5bf/identify-2.6.9.tar.gz", hash = "sha256:d40dfe3142a1421d8518e3d3985ef5ac42890683e32306ad614a29490abeb6bf", size = 99249 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/07/ce/0845144ed1f0e25db5e7a79c2354c1da4b5ce392b8966449d5db8dca18f1/identify-2.6.9-py2.py3-none-any.whl", hash = "sha256:c98b4322da415a8e5a70ff6e51fbc2d2932c015532d77e9f8537b4ba7813b150", size = 99101 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "iniconfig"
|
||||
version = "2.1.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "jinja2"
|
||||
version = "3.1.6"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "markupsafe" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "kiwisolver"
|
||||
version = "1.4.8"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/82/59/7c91426a8ac292e1cdd53a63b6d9439abd573c875c3f92c146767dd33faf/kiwisolver-1.4.8.tar.gz", hash = "sha256:23d5f023bdc8c7e54eb65f03ca5d5bb25b601eac4d7f1a042888a1f45237987e", size = 97538 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/fc/aa/cea685c4ab647f349c3bc92d2daf7ae34c8e8cf405a6dcd3a497f58a2ac3/kiwisolver-1.4.8-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d6af5e8815fd02997cb6ad9bbed0ee1e60014438ee1a5c2444c96f87b8843502", size = 124152 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c5/0b/8db6d2e2452d60d5ebc4ce4b204feeb16176a851fd42462f66ade6808084/kiwisolver-1.4.8-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:bade438f86e21d91e0cf5dd7c0ed00cda0f77c8c1616bd83f9fc157fa6760d31", size = 66555 },
|
||||
{ url = "https://files.pythonhosted.org/packages/60/26/d6a0db6785dd35d3ba5bf2b2df0aedc5af089962c6eb2cbf67a15b81369e/kiwisolver-1.4.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b83dc6769ddbc57613280118fb4ce3cd08899cc3369f7d0e0fab518a7cf37fdb", size = 65067 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c9/ed/1d97f7e3561e09757a196231edccc1bcf59d55ddccefa2afc9c615abd8e0/kiwisolver-1.4.8-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:111793b232842991be367ed828076b03d96202c19221b5ebab421ce8bcad016f", size = 1378443 },
|
||||
{ url = "https://files.pythonhosted.org/packages/29/61/39d30b99954e6b46f760e6289c12fede2ab96a254c443639052d1b573fbc/kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:257af1622860e51b1a9d0ce387bf5c2c4f36a90594cb9514f55b074bcc787cfc", size = 1472728 },
|
||||
{ url = "https://files.pythonhosted.org/packages/0c/3e/804163b932f7603ef256e4a715e5843a9600802bb23a68b4e08c8c0ff61d/kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:69b5637c3f316cab1ec1c9a12b8c5f4750a4c4b71af9157645bf32830e39c03a", size = 1478388 },
|
||||
{ url = "https://files.pythonhosted.org/packages/8a/9e/60eaa75169a154700be74f875a4d9961b11ba048bef315fbe89cb6999056/kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:782bb86f245ec18009890e7cb8d13a5ef54dcf2ebe18ed65f795e635a96a1c6a", size = 1413849 },
|
||||
{ url = "https://files.pythonhosted.org/packages/bc/b3/9458adb9472e61a998c8c4d95cfdfec91c73c53a375b30b1428310f923e4/kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc978a80a0db3a66d25767b03688f1147a69e6237175c0f4ffffaaedf744055a", size = 1475533 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e4/7a/0a42d9571e35798de80aef4bb43a9b672aa7f8e58643d7bd1950398ffb0a/kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:36dbbfd34838500a31f52c9786990d00150860e46cd5041386f217101350f0d3", size = 2268898 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d9/07/1255dc8d80271400126ed8db35a1795b1a2c098ac3a72645075d06fe5c5d/kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:eaa973f1e05131de5ff3569bbba7f5fd07ea0595d3870ed4a526d486fe57fa1b", size = 2425605 },
|
||||
{ url = "https://files.pythonhosted.org/packages/84/df/5a3b4cf13780ef6f6942df67b138b03b7e79e9f1f08f57c49957d5867f6e/kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:a66f60f8d0c87ab7f59b6fb80e642ebb29fec354a4dfad687ca4092ae69d04f4", size = 2375801 },
|
||||
{ url = "https://files.pythonhosted.org/packages/8f/10/2348d068e8b0f635c8c86892788dac7a6b5c0cb12356620ab575775aad89/kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:858416b7fb777a53f0c59ca08190ce24e9abbd3cffa18886a5781b8e3e26f65d", size = 2520077 },
|
||||
{ url = "https://files.pythonhosted.org/packages/32/d8/014b89fee5d4dce157d814303b0fce4d31385a2af4c41fed194b173b81ac/kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:085940635c62697391baafaaeabdf3dd7a6c3643577dde337f4d66eba021b2b8", size = 2338410 },
|
||||
{ url = "https://files.pythonhosted.org/packages/bd/72/dfff0cc97f2a0776e1c9eb5bef1ddfd45f46246c6533b0191887a427bca5/kiwisolver-1.4.8-cp312-cp312-win_amd64.whl", hash = "sha256:01c3d31902c7db5fb6182832713d3b4122ad9317c2c5877d0539227d96bb2e50", size = 71853 },
|
||||
{ url = "https://files.pythonhosted.org/packages/dc/85/220d13d914485c0948a00f0b9eb419efaf6da81b7d72e88ce2391f7aed8d/kiwisolver-1.4.8-cp312-cp312-win_arm64.whl", hash = "sha256:a3c44cb68861de93f0c4a8175fbaa691f0aa22550c331fefef02b618a9dcb476", size = 65424 },
|
||||
{ url = "https://files.pythonhosted.org/packages/79/b3/e62464a652f4f8cd9006e13d07abad844a47df1e6537f73ddfbf1bc997ec/kiwisolver-1.4.8-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:1c8ceb754339793c24aee1c9fb2485b5b1f5bb1c2c214ff13368431e51fc9a09", size = 124156 },
|
||||
{ url = "https://files.pythonhosted.org/packages/8d/2d/f13d06998b546a2ad4f48607a146e045bbe48030774de29f90bdc573df15/kiwisolver-1.4.8-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:54a62808ac74b5e55a04a408cda6156f986cefbcf0ada13572696b507cc92fa1", size = 66555 },
|
||||
{ url = "https://files.pythonhosted.org/packages/59/e3/b8bd14b0a54998a9fd1e8da591c60998dc003618cb19a3f94cb233ec1511/kiwisolver-1.4.8-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:68269e60ee4929893aad82666821aaacbd455284124817af45c11e50a4b42e3c", size = 65071 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f0/1c/6c86f6d85ffe4d0ce04228d976f00674f1df5dc893bf2dd4f1928748f187/kiwisolver-1.4.8-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:34d142fba9c464bc3bbfeff15c96eab0e7310343d6aefb62a79d51421fcc5f1b", size = 1378053 },
|
||||
{ url = "https://files.pythonhosted.org/packages/4e/b9/1c6e9f6dcb103ac5cf87cb695845f5fa71379021500153566d8a8a9fc291/kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ddc373e0eef45b59197de815b1b28ef89ae3955e7722cc9710fb91cd77b7f47", size = 1472278 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ee/81/aca1eb176de671f8bda479b11acdc42c132b61a2ac861c883907dde6debb/kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:77e6f57a20b9bd4e1e2cedda4d0b986ebd0216236f0106e55c28aea3d3d69b16", size = 1478139 },
|
||||
{ url = "https://files.pythonhosted.org/packages/49/f4/e081522473671c97b2687d380e9e4c26f748a86363ce5af48b4a28e48d06/kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08e77738ed7538f036cd1170cbed942ef749137b1311fa2bbe2a7fda2f6bf3cc", size = 1413517 },
|
||||
{ url = "https://files.pythonhosted.org/packages/8f/e9/6a7d025d8da8c4931522922cd706105aa32b3291d1add8c5427cdcd66e63/kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a5ce1e481a74b44dd5e92ff03ea0cb371ae7a0268318e202be06c8f04f4f1246", size = 1474952 },
|
||||
{ url = "https://files.pythonhosted.org/packages/82/13/13fa685ae167bee5d94b415991c4fc7bb0a1b6ebea6e753a87044b209678/kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:fc2ace710ba7c1dfd1a3b42530b62b9ceed115f19a1656adefce7b1782a37794", size = 2269132 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ef/92/bb7c9395489b99a6cb41d502d3686bac692586db2045adc19e45ee64ed23/kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:3452046c37c7692bd52b0e752b87954ef86ee2224e624ef7ce6cb21e8c41cc1b", size = 2425997 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ed/12/87f0e9271e2b63d35d0d8524954145837dd1a6c15b62a2d8c1ebe0f182b4/kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:7e9a60b50fe8b2ec6f448fe8d81b07e40141bfced7f896309df271a0b92f80f3", size = 2376060 },
|
||||
{ url = "https://files.pythonhosted.org/packages/02/6e/c8af39288edbce8bf0fa35dee427b082758a4b71e9c91ef18fa667782138/kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:918139571133f366e8362fa4a297aeba86c7816b7ecf0bc79168080e2bd79957", size = 2520471 },
|
||||
{ url = "https://files.pythonhosted.org/packages/13/78/df381bc7b26e535c91469f77f16adcd073beb3e2dd25042efd064af82323/kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e063ef9f89885a1d68dd8b2e18f5ead48653176d10a0e324e3b0030e3a69adeb", size = 2338793 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d0/dc/c1abe38c37c071d0fc71c9a474fd0b9ede05d42f5a458d584619cfd2371a/kiwisolver-1.4.8-cp313-cp313-win_amd64.whl", hash = "sha256:a17b7c4f5b2c51bb68ed379defd608a03954a1845dfed7cc0117f1cc8a9b7fd2", size = 71855 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a0/b6/21529d595b126ac298fdd90b705d87d4c5693de60023e0efcb4f387ed99e/kiwisolver-1.4.8-cp313-cp313-win_arm64.whl", hash = "sha256:3cd3bc628b25f74aedc6d374d5babf0166a92ff1317f46267f12d2ed54bc1d30", size = 65430 },
|
||||
{ url = "https://files.pythonhosted.org/packages/34/bd/b89380b7298e3af9b39f49334e3e2a4af0e04819789f04b43d560516c0c8/kiwisolver-1.4.8-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:370fd2df41660ed4e26b8c9d6bbcad668fbe2560462cba151a721d49e5b6628c", size = 126294 },
|
||||
{ url = "https://files.pythonhosted.org/packages/83/41/5857dc72e5e4148eaac5aa76e0703e594e4465f8ab7ec0fc60e3a9bb8fea/kiwisolver-1.4.8-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:84a2f830d42707de1d191b9490ac186bf7997a9495d4e9072210a1296345f7dc", size = 67736 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e1/d1/be059b8db56ac270489fb0b3297fd1e53d195ba76e9bbb30e5401fa6b759/kiwisolver-1.4.8-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:7a3ad337add5148cf51ce0b55642dc551c0b9d6248458a757f98796ca7348712", size = 66194 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e1/83/4b73975f149819eb7dcf9299ed467eba068ecb16439a98990dcb12e63fdd/kiwisolver-1.4.8-cp313-cp313t-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7506488470f41169b86d8c9aeff587293f530a23a23a49d6bc64dab66bedc71e", size = 1465942 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c7/2c/30a5cdde5102958e602c07466bce058b9d7cb48734aa7a4327261ac8e002/kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f0121b07b356a22fb0414cec4666bbe36fd6d0d759db3d37228f496ed67c880", size = 1595341 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ff/9b/1e71db1c000385aa069704f5990574b8244cce854ecd83119c19e83c9586/kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d6d6bd87df62c27d4185de7c511c6248040afae67028a8a22012b010bc7ad062", size = 1598455 },
|
||||
{ url = "https://files.pythonhosted.org/packages/85/92/c8fec52ddf06231b31cbb779af77e99b8253cd96bd135250b9498144c78b/kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:291331973c64bb9cce50bbe871fb2e675c4331dab4f31abe89f175ad7679a4d7", size = 1522138 },
|
||||
{ url = "https://files.pythonhosted.org/packages/0b/51/9eb7e2cd07a15d8bdd976f6190c0164f92ce1904e5c0c79198c4972926b7/kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:893f5525bb92d3d735878ec00f781b2de998333659507d29ea4466208df37bed", size = 1582857 },
|
||||
{ url = "https://files.pythonhosted.org/packages/0f/95/c5a00387a5405e68ba32cc64af65ce881a39b98d73cc394b24143bebc5b8/kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b47a465040146981dc9db8647981b8cb96366fbc8d452b031e4f8fdffec3f26d", size = 2293129 },
|
||||
{ url = "https://files.pythonhosted.org/packages/44/83/eeb7af7d706b8347548313fa3a3a15931f404533cc54fe01f39e830dd231/kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:99cea8b9dd34ff80c521aef46a1dddb0dcc0283cf18bde6d756f1e6f31772165", size = 2421538 },
|
||||
{ url = "https://files.pythonhosted.org/packages/05/f9/27e94c1b3eb29e6933b6986ffc5fa1177d2cd1f0c8efc5f02c91c9ac61de/kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:151dffc4865e5fe6dafce5480fab84f950d14566c480c08a53c663a0020504b6", size = 2390661 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d9/d4/3c9735faa36ac591a4afcc2980d2691000506050b7a7e80bcfe44048daa7/kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:577facaa411c10421314598b50413aa1ebcf5126f704f1e5d72d7e4e9f020d90", size = 2546710 },
|
||||
{ url = "https://files.pythonhosted.org/packages/4c/fa/be89a49c640930180657482a74970cdcf6f7072c8d2471e1babe17a222dc/kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:be4816dc51c8a471749d664161b434912eee82f2ea66bd7628bd14583a833e85", size = 2349213 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "markupsafe"
|
||||
version = "3.0.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/b2/97/5d42485e71dfc078108a86d6de8fa46db44a1a9295e89c5d6d4a06e23a62/markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0", size = 20537 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/22/09/d1f21434c97fc42f09d290cbb6350d44eb12f09cc62c9476effdb33a18aa/MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf", size = 14274 },
|
||||
{ url = "https://files.pythonhosted.org/packages/6b/b0/18f76bba336fa5aecf79d45dcd6c806c280ec44538b3c13671d49099fdd0/MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225", size = 12348 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e0/25/dd5c0f6ac1311e9b40f4af06c78efde0f3b5cbf02502f8ef9501294c425b/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028", size = 24149 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f3/f0/89e7aadfb3749d0f52234a0c8c7867877876e0a20b60e2188e9850794c17/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8", size = 23118 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d5/da/f2eeb64c723f5e3777bc081da884b414671982008c47dcc1873d81f625b6/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c", size = 22993 },
|
||||
{ url = "https://files.pythonhosted.org/packages/da/0e/1f32af846df486dce7c227fe0f2398dc7e2e51d4a370508281f3c1c5cddc/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557", size = 24178 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c4/f6/bb3ca0532de8086cbff5f06d137064c8410d10779c4c127e0e47d17c0b71/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22", size = 23319 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a2/82/8be4c96ffee03c5b4a034e60a31294daf481e12c7c43ab8e34a1453ee48b/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48", size = 23352 },
|
||||
{ url = "https://files.pythonhosted.org/packages/51/ae/97827349d3fcffee7e184bdf7f41cd6b88d9919c80f0263ba7acd1bbcb18/MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30", size = 15097 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c1/80/a61f99dc3a936413c3ee4e1eecac96c0da5ed07ad56fd975f1a9da5bc630/MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87", size = 15601 },
|
||||
{ url = "https://files.pythonhosted.org/packages/83/0e/67eb10a7ecc77a0c2bbe2b0235765b98d164d81600746914bebada795e97/MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd", size = 14274 },
|
||||
{ url = "https://files.pythonhosted.org/packages/2b/6d/9409f3684d3335375d04e5f05744dfe7e9f120062c9857df4ab490a1031a/MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430", size = 12352 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d2/f5/6eadfcd3885ea85fe2a7c128315cc1bb7241e1987443d78c8fe712d03091/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094", size = 24122 },
|
||||
{ url = "https://files.pythonhosted.org/packages/0c/91/96cf928db8236f1bfab6ce15ad070dfdd02ed88261c2afafd4b43575e9e9/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396", size = 23085 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c2/cf/c9d56af24d56ea04daae7ac0940232d31d5a8354f2b457c6d856b2057d69/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79", size = 22978 },
|
||||
{ url = "https://files.pythonhosted.org/packages/2a/9f/8619835cd6a711d6272d62abb78c033bda638fdc54c4e7f4272cf1c0962b/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a", size = 24208 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f9/bf/176950a1792b2cd2102b8ffeb5133e1ed984547b75db47c25a67d3359f77/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca", size = 23357 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ce/4f/9a02c1d335caabe5c4efb90e1b6e8ee944aa245c1aaaab8e8a618987d816/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c", size = 23344 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ee/55/c271b57db36f748f0e04a759ace9f8f759ccf22b4960c270c78a394f58be/MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1", size = 15101 },
|
||||
{ url = "https://files.pythonhosted.org/packages/29/88/07df22d2dd4df40aba9f3e402e6dc1b8ee86297dddbad4872bd5e7b0094f/MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f", size = 15603 },
|
||||
{ url = "https://files.pythonhosted.org/packages/62/6a/8b89d24db2d32d433dffcd6a8779159da109842434f1dd2f6e71f32f738c/MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c", size = 14510 },
|
||||
{ url = "https://files.pythonhosted.org/packages/7a/06/a10f955f70a2e5a9bf78d11a161029d278eeacbd35ef806c3fd17b13060d/MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb", size = 12486 },
|
||||
{ url = "https://files.pythonhosted.org/packages/34/cf/65d4a571869a1a9078198ca28f39fba5fbb910f952f9dbc5220afff9f5e6/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c", size = 25480 },
|
||||
{ url = "https://files.pythonhosted.org/packages/0c/e3/90e9651924c430b885468b56b3d597cabf6d72be4b24a0acd1fa0e12af67/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d", size = 23914 },
|
||||
{ url = "https://files.pythonhosted.org/packages/66/8c/6c7cf61f95d63bb866db39085150df1f2a5bd3335298f14a66b48e92659c/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe", size = 23796 },
|
||||
{ url = "https://files.pythonhosted.org/packages/bb/35/cbe9238ec3f47ac9a7c8b3df7a808e7cb50fe149dc7039f5f454b3fba218/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5", size = 25473 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e6/32/7621a4382488aa283cc05e8984a9c219abad3bca087be9ec77e89939ded9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a", size = 24114 },
|
||||
{ url = "https://files.pythonhosted.org/packages/0d/80/0985960e4b89922cb5a0bac0ed39c5b96cbc1a536a99f30e8c220a996ed9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9", size = 24098 },
|
||||
{ url = "https://files.pythonhosted.org/packages/82/78/fedb03c7d5380df2427038ec8d973587e90561b2d90cd472ce9254cf348b/MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6", size = 15208 },
|
||||
{ url = "https://files.pythonhosted.org/packages/4f/65/6079a46068dfceaeabb5dcad6d674f5f5c61a6fa5673746f42a9f4c233b3/MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", size = 15739 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "matplotlib"
|
||||
version = "3.10.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "contourpy" },
|
||||
{ name = "cycler" },
|
||||
{ name = "fonttools" },
|
||||
{ name = "kiwisolver" },
|
||||
{ name = "numpy" },
|
||||
{ name = "packaging" },
|
||||
{ name = "pillow" },
|
||||
{ name = "pyparsing" },
|
||||
{ name = "python-dateutil" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/2f/08/b89867ecea2e305f408fbb417139a8dd941ecf7b23a2e02157c36da546f0/matplotlib-3.10.1.tar.gz", hash = "sha256:e8d2d0e3881b129268585bf4765ad3ee73a4591d77b9a18c214ac7e3a79fb2ba", size = 36743335 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/7c/1d/5e0dc3b59c034e43de16f94deb68f4ad8a96b3ea00f4b37c160b7474928e/matplotlib-3.10.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:66e907a06e68cb6cfd652c193311d61a12b54f56809cafbed9736ce5ad92f107", size = 8175488 },
|
||||
{ url = "https://files.pythonhosted.org/packages/7a/81/dae7e14042e74da658c3336ab9799128e09a1ee03964f2d89630b5d12106/matplotlib-3.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e9b4bb156abb8fa5e5b2b460196f7db7264fc6d62678c03457979e7d5254b7be", size = 8046264 },
|
||||
{ url = "https://files.pythonhosted.org/packages/21/c4/22516775dcde10fc9c9571d155f90710761b028fc44f660508106c363c97/matplotlib-3.10.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1985ad3d97f51307a2cbfc801a930f120def19ba22864182dacef55277102ba6", size = 8452048 },
|
||||
{ url = "https://files.pythonhosted.org/packages/63/23/c0615001f67ce7c96b3051d856baedc0c818a2ed84570b9bf9bde200f85d/matplotlib-3.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c96f2c2f825d1257e437a1482c5a2cf4fee15db4261bd6fc0750f81ba2b4ba3d", size = 8597111 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ca/c0/a07939a82aed77770514348f4568177d7dadab9787ebc618a616fe3d665e/matplotlib-3.10.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:35e87384ee9e488d8dd5a2dd7baf471178d38b90618d8ea147aced4ab59c9bea", size = 9402771 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a6/b6/a9405484fb40746fdc6ae4502b16a9d6e53282ba5baaf9ebe2da579f68c4/matplotlib-3.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:cfd414bce89cc78a7e1d25202e979b3f1af799e416010a20ab2b5ebb3a02425c", size = 8063742 },
|
||||
{ url = "https://files.pythonhosted.org/packages/60/73/6770ff5e5523d00f3bc584acb6031e29ee5c8adc2336b16cd1d003675fe0/matplotlib-3.10.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:c42eee41e1b60fd83ee3292ed83a97a5f2a8239b10c26715d8a6172226988d7b", size = 8176112 },
|
||||
{ url = "https://files.pythonhosted.org/packages/08/97/b0ca5da0ed54a3f6599c3ab568bdda65269bc27c21a2c97868c1625e4554/matplotlib-3.10.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4f0647b17b667ae745c13721602b540f7aadb2a32c5b96e924cd4fea5dcb90f1", size = 8046931 },
|
||||
{ url = "https://files.pythonhosted.org/packages/df/9a/1acbdc3b165d4ce2dcd2b1a6d4ffb46a7220ceee960c922c3d50d8514067/matplotlib-3.10.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa3854b5f9473564ef40a41bc922be978fab217776e9ae1545c9b3a5cf2092a3", size = 8453422 },
|
||||
{ url = "https://files.pythonhosted.org/packages/51/d0/2bc4368abf766203e548dc7ab57cf7e9c621f1a3c72b516cc7715347b179/matplotlib-3.10.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e496c01441be4c7d5f96d4e40f7fca06e20dcb40e44c8daa2e740e1757ad9e6", size = 8596819 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ab/1b/8b350f8a1746c37ab69dda7d7528d1fc696efb06db6ade9727b7887be16d/matplotlib-3.10.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5d45d3f5245be5b469843450617dcad9af75ca50568acf59997bed9311131a0b", size = 9402782 },
|
||||
{ url = "https://files.pythonhosted.org/packages/89/06/f570373d24d93503988ba8d04f213a372fa1ce48381c5eb15da985728498/matplotlib-3.10.1-cp313-cp313-win_amd64.whl", hash = "sha256:8e8e25b1209161d20dfe93037c8a7f7ca796ec9aa326e6e4588d8c4a5dd1e473", size = 8063812 },
|
||||
{ url = "https://files.pythonhosted.org/packages/fc/e0/8c811a925b5a7ad75135f0e5af46408b78af88bbb02a1df775100ef9bfef/matplotlib-3.10.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:19b06241ad89c3ae9469e07d77efa87041eac65d78df4fcf9cac318028009b01", size = 8214021 },
|
||||
{ url = "https://files.pythonhosted.org/packages/4a/34/319ec2139f68ba26da9d00fce2ff9f27679fb799a6c8e7358539801fd629/matplotlib-3.10.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:01e63101ebb3014e6e9f80d9cf9ee361a8599ddca2c3e166c563628b39305dbb", size = 8090782 },
|
||||
{ url = "https://files.pythonhosted.org/packages/77/ea/9812124ab9a99df5b2eec1110e9b2edc0b8f77039abf4c56e0a376e84a29/matplotlib-3.10.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f06bad951eea6422ac4e8bdebcf3a70c59ea0a03338c5d2b109f57b64eb3972", size = 8478901 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c9/db/b05bf463689134789b06dea85828f8ebe506fa1e37593f723b65b86c9582/matplotlib-3.10.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a3dfb036f34873b46978f55e240cff7a239f6c4409eac62d8145bad3fc6ba5a3", size = 8613864 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c2/04/41ccec4409f3023a7576df3b5c025f1a8c8b81fbfe922ecfd837ac36e081/matplotlib-3.10.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:dc6ab14a7ab3b4d813b88ba957fc05c79493a037f54e246162033591e770de6f", size = 9409487 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ac/c2/0d5aae823bdcc42cc99327ecdd4d28585e15ccd5218c453b7bcd827f3421/matplotlib-3.10.1-cp313-cp313t-win_amd64.whl", hash = "sha256:bc411ebd5889a78dabbc457b3fa153203e22248bfa6eedc6797be5df0164dbf9", size = 8134832 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mpmath"
|
||||
version = "1.3.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/e0/47/dd32fa426cc72114383ac549964eecb20ecfd886d1e5ccf5340b55b02f57/mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f", size = 508106 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c", size = 536198 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "networkx"
|
||||
version = "3.4.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/fd/1d/06475e1cd5264c0b870ea2cc6fdb3e37177c1e565c43f56ff17a10e3937f/networkx-3.4.2.tar.gz", hash = "sha256:307c3669428c5362aab27c8a1260aa8f47c4e91d3891f48be0141738d8d053e1", size = 2151368 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/b9/54/dd730b32ea14ea797530a4479b2ed46a6fb250f682a9cfb997e968bf0261/networkx-3.4.2-py3-none-any.whl", hash = "sha256:df5d4365b724cf81b8c6a7312509d0c22386097011ad1abe274afd5e9d3bbc5f", size = 1723263 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nodeenv"
|
||||
version = "1.9.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/43/16/fc88b08840de0e0a72a2f9d8c6bae36be573e475a6326ae854bcc549fc45/nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f", size = 47437 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/d2/1d/1b658dbd2b9fa9c4c9f32accbfc0205d532c8c6194dc0f2a4c0428e7128a/nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9", size = 22314 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "numpy"
|
||||
version = "2.2.4"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/e1/78/31103410a57bc2c2b93a3597340a8119588571f6a4539067546cb9a0bfac/numpy-2.2.4.tar.gz", hash = "sha256:9ba03692a45d3eef66559efe1d1096c4b9b75c0986b5dff5530c378fb8331d4f", size = 20270701 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/a2/30/182db21d4f2a95904cec1a6f779479ea1ac07c0647f064dea454ec650c42/numpy-2.2.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a7b9084668aa0f64e64bd00d27ba5146ef1c3a8835f3bd912e7a9e01326804c4", size = 20947156 },
|
||||
{ url = "https://files.pythonhosted.org/packages/24/6d/9483566acfbda6c62c6bc74b6e981c777229d2af93c8eb2469b26ac1b7bc/numpy-2.2.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:dbe512c511956b893d2dacd007d955a3f03d555ae05cfa3ff1c1ff6df8851854", size = 14133092 },
|
||||
{ url = "https://files.pythonhosted.org/packages/27/f6/dba8a258acbf9d2bed2525cdcbb9493ef9bae5199d7a9cb92ee7e9b2aea6/numpy-2.2.4-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:bb649f8b207ab07caebba230d851b579a3c8711a851d29efe15008e31bb4de24", size = 5163515 },
|
||||
{ url = "https://files.pythonhosted.org/packages/62/30/82116199d1c249446723c68f2c9da40d7f062551036f50b8c4caa42ae252/numpy-2.2.4-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:f34dc300df798742b3d06515aa2a0aee20941c13579d7a2f2e10af01ae4901ee", size = 6696558 },
|
||||
{ url = "https://files.pythonhosted.org/packages/0e/b2/54122b3c6df5df3e87582b2e9430f1bdb63af4023c739ba300164c9ae503/numpy-2.2.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3f7ac96b16955634e223b579a3e5798df59007ca43e8d451a0e6a50f6bfdfba", size = 14084742 },
|
||||
{ url = "https://files.pythonhosted.org/packages/02/e2/e2cbb8d634151aab9528ef7b8bab52ee4ab10e076509285602c2a3a686e0/numpy-2.2.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f92084defa704deadd4e0a5ab1dc52d8ac9e8a8ef617f3fbb853e79b0ea3592", size = 16134051 },
|
||||
{ url = "https://files.pythonhosted.org/packages/8e/21/efd47800e4affc993e8be50c1b768de038363dd88865920439ef7b422c60/numpy-2.2.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7a4e84a6283b36632e2a5b56e121961f6542ab886bc9e12f8f9818b3c266bfbb", size = 15578972 },
|
||||
{ url = "https://files.pythonhosted.org/packages/04/1e/f8bb88f6157045dd5d9b27ccf433d016981032690969aa5c19e332b138c0/numpy-2.2.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:11c43995255eb4127115956495f43e9343736edb7fcdb0d973defd9de14cd84f", size = 17898106 },
|
||||
{ url = "https://files.pythonhosted.org/packages/2b/93/df59a5a3897c1f036ae8ff845e45f4081bb06943039ae28a3c1c7c780f22/numpy-2.2.4-cp312-cp312-win32.whl", hash = "sha256:65ef3468b53269eb5fdb3a5c09508c032b793da03251d5f8722b1194f1790c00", size = 6311190 },
|
||||
{ url = "https://files.pythonhosted.org/packages/46/69/8c4f928741c2a8efa255fdc7e9097527c6dc4e4df147e3cadc5d9357ce85/numpy-2.2.4-cp312-cp312-win_amd64.whl", hash = "sha256:2aad3c17ed2ff455b8eaafe06bcdae0062a1db77cb99f4b9cbb5f4ecb13c5146", size = 12644305 },
|
||||
{ url = "https://files.pythonhosted.org/packages/2a/d0/bd5ad792e78017f5decfb2ecc947422a3669a34f775679a76317af671ffc/numpy-2.2.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1cf4e5c6a278d620dee9ddeb487dc6a860f9b199eadeecc567f777daace1e9e7", size = 20933623 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c3/bc/2b3545766337b95409868f8e62053135bdc7fa2ce630aba983a2aa60b559/numpy-2.2.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1974afec0b479e50438fc3648974268f972e2d908ddb6d7fb634598cdb8260a0", size = 14148681 },
|
||||
{ url = "https://files.pythonhosted.org/packages/6a/70/67b24d68a56551d43a6ec9fe8c5f91b526d4c1a46a6387b956bf2d64744e/numpy-2.2.4-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:79bd5f0a02aa16808fcbc79a9a376a147cc1045f7dfe44c6e7d53fa8b8a79392", size = 5148759 },
|
||||
{ url = "https://files.pythonhosted.org/packages/1c/8b/e2fc8a75fcb7be12d90b31477c9356c0cbb44abce7ffb36be39a0017afad/numpy-2.2.4-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:3387dd7232804b341165cedcb90694565a6015433ee076c6754775e85d86f1fc", size = 6683092 },
|
||||
{ url = "https://files.pythonhosted.org/packages/13/73/41b7b27f169ecf368b52533edb72e56a133f9e86256e809e169362553b49/numpy-2.2.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f527d8fdb0286fd2fd97a2a96c6be17ba4232da346931d967a0630050dfd298", size = 14081422 },
|
||||
{ url = "https://files.pythonhosted.org/packages/4b/04/e208ff3ae3ddfbafc05910f89546382f15a3f10186b1f56bd99f159689c2/numpy-2.2.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bce43e386c16898b91e162e5baaad90c4b06f9dcbe36282490032cec98dc8ae7", size = 16132202 },
|
||||
{ url = "https://files.pythonhosted.org/packages/fe/bc/2218160574d862d5e55f803d88ddcad88beff94791f9c5f86d67bd8fbf1c/numpy-2.2.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:31504f970f563d99f71a3512d0c01a645b692b12a63630d6aafa0939e52361e6", size = 15573131 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a5/78/97c775bc4f05abc8a8426436b7cb1be806a02a2994b195945600855e3a25/numpy-2.2.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:81413336ef121a6ba746892fad881a83351ee3e1e4011f52e97fba79233611fd", size = 17894270 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b9/eb/38c06217a5f6de27dcb41524ca95a44e395e6a1decdc0c99fec0832ce6ae/numpy-2.2.4-cp313-cp313-win32.whl", hash = "sha256:f486038e44caa08dbd97275a9a35a283a8f1d2f0ee60ac260a1790e76660833c", size = 6308141 },
|
||||
{ url = "https://files.pythonhosted.org/packages/52/17/d0dd10ab6d125c6d11ffb6dfa3423c3571befab8358d4f85cd4471964fcd/numpy-2.2.4-cp313-cp313-win_amd64.whl", hash = "sha256:207a2b8441cc8b6a2a78c9ddc64d00d20c303d79fba08c577752f080c4007ee3", size = 12636885 },
|
||||
{ url = "https://files.pythonhosted.org/packages/fa/e2/793288ede17a0fdc921172916efb40f3cbc2aa97e76c5c84aba6dc7e8747/numpy-2.2.4-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:8120575cb4882318c791f839a4fd66161a6fa46f3f0a5e613071aae35b5dd8f8", size = 20961829 },
|
||||
{ url = "https://files.pythonhosted.org/packages/3a/75/bb4573f6c462afd1ea5cbedcc362fe3e9bdbcc57aefd37c681be1155fbaa/numpy-2.2.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a761ba0fa886a7bb33c6c8f6f20213735cb19642c580a931c625ee377ee8bd39", size = 14161419 },
|
||||
{ url = "https://files.pythonhosted.org/packages/03/68/07b4cd01090ca46c7a336958b413cdbe75002286295f2addea767b7f16c9/numpy-2.2.4-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:ac0280f1ba4a4bfff363a99a6aceed4f8e123f8a9b234c89140f5e894e452ecd", size = 5196414 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a5/fd/d4a29478d622fedff5c4b4b4cedfc37a00691079623c0575978d2446db9e/numpy-2.2.4-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:879cf3a9a2b53a4672a168c21375166171bc3932b7e21f622201811c43cdd3b0", size = 6709379 },
|
||||
{ url = "https://files.pythonhosted.org/packages/41/78/96dddb75bb9be730b87c72f30ffdd62611aba234e4e460576a068c98eff6/numpy-2.2.4-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f05d4198c1bacc9124018109c5fba2f3201dbe7ab6e92ff100494f236209c960", size = 14051725 },
|
||||
{ url = "https://files.pythonhosted.org/packages/00/06/5306b8199bffac2a29d9119c11f457f6c7d41115a335b78d3f86fad4dbe8/numpy-2.2.4-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2f085ce2e813a50dfd0e01fbfc0c12bbe5d2063d99f8b29da30e544fb6483b8", size = 16101638 },
|
||||
{ url = "https://files.pythonhosted.org/packages/fa/03/74c5b631ee1ded596945c12027649e6344614144369fd3ec1aaced782882/numpy-2.2.4-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:92bda934a791c01d6d9d8e038363c50918ef7c40601552a58ac84c9613a665bc", size = 15571717 },
|
||||
{ url = "https://files.pythonhosted.org/packages/cb/dc/4fc7c0283abe0981e3b89f9b332a134e237dd476b0c018e1e21083310c31/numpy-2.2.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ee4d528022f4c5ff67332469e10efe06a267e32f4067dc76bb7e2cddf3cd25ff", size = 17879998 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e5/2b/878576190c5cfa29ed896b518cc516aecc7c98a919e20706c12480465f43/numpy-2.2.4-cp313-cp313t-win32.whl", hash = "sha256:05c076d531e9998e7e694c36e8b349969c56eadd2cdcd07242958489d79a7286", size = 6366896 },
|
||||
{ url = "https://files.pythonhosted.org/packages/3e/05/eb7eec66b95cf697f08c754ef26c3549d03ebd682819f794cb039574a0a6/numpy-2.2.4-cp313-cp313t-win_amd64.whl", hash = "sha256:188dcbca89834cc2e14eb2f106c96d6d46f200fe0200310fc29089657379c58d", size = 12739119 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nvidia-cublas-cu12"
|
||||
version = "12.4.5.8"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/ae/71/1c91302526c45ab494c23f61c7a84aa568b8c1f9d196efa5993957faf906/nvidia_cublas_cu12-12.4.5.8-py3-none-manylinux2014_x86_64.whl", hash = "sha256:2fc8da60df463fdefa81e323eef2e36489e1c94335b5358bcb38360adf75ac9b", size = 363438805 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nvidia-cuda-cupti-cu12"
|
||||
version = "12.4.127"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/67/42/f4f60238e8194a3106d06a058d494b18e006c10bb2b915655bd9f6ea4cb1/nvidia_cuda_cupti_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:9dec60f5ac126f7bb551c055072b69d85392b13311fcc1bcda2202d172df30fb", size = 13813957 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nvidia-cuda-nvrtc-cu12"
|
||||
version = "12.4.127"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/2c/14/91ae57cd4db3f9ef7aa99f4019cfa8d54cb4caa7e00975df6467e9725a9f/nvidia_cuda_nvrtc_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:a178759ebb095827bd30ef56598ec182b85547f1508941a3d560eb7ea1fbf338", size = 24640306 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nvidia-cuda-runtime-cu12"
|
||||
version = "12.4.127"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/ea/27/1795d86fe88ef397885f2e580ac37628ed058a92ed2c39dc8eac3adf0619/nvidia_cuda_runtime_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:64403288fa2136ee8e467cdc9c9427e0434110899d07c779f25b5c068934faa5", size = 883737 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nvidia-cudnn-cu12"
|
||||
version = "9.1.0.70"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "nvidia-cublas-cu12", marker = "sys_platform == 'linux'" },
|
||||
]
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/9f/fd/713452cd72343f682b1c7b9321e23829f00b842ceaedcda96e742ea0b0b3/nvidia_cudnn_cu12-9.1.0.70-py3-none-manylinux2014_x86_64.whl", hash = "sha256:165764f44ef8c61fcdfdfdbe769d687e06374059fbb388b6c89ecb0e28793a6f", size = 664752741 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nvidia-cufft-cu12"
|
||||
version = "11.2.1.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "nvidia-nvjitlink-cu12", marker = "sys_platform == 'linux'" },
|
||||
]
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/27/94/3266821f65b92b3138631e9c8e7fe1fb513804ac934485a8d05776e1dd43/nvidia_cufft_cu12-11.2.1.3-py3-none-manylinux2014_x86_64.whl", hash = "sha256:f083fc24912aa410be21fa16d157fed2055dab1cc4b6934a0e03cba69eb242b9", size = 211459117 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nvidia-curand-cu12"
|
||||
version = "10.3.5.147"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/8a/6d/44ad094874c6f1b9c654f8ed939590bdc408349f137f9b98a3a23ccec411/nvidia_curand_cu12-10.3.5.147-py3-none-manylinux2014_x86_64.whl", hash = "sha256:a88f583d4e0bb643c49743469964103aa59f7f708d862c3ddb0fc07f851e3b8b", size = 56305206 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nvidia-cusolver-cu12"
|
||||
version = "11.6.1.9"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "nvidia-cublas-cu12", marker = "sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cusparse-cu12", marker = "sys_platform == 'linux'" },
|
||||
{ name = "nvidia-nvjitlink-cu12", marker = "sys_platform == 'linux'" },
|
||||
]
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/3a/e1/5b9089a4b2a4790dfdea8b3a006052cfecff58139d5a4e34cb1a51df8d6f/nvidia_cusolver_cu12-11.6.1.9-py3-none-manylinux2014_x86_64.whl", hash = "sha256:19e33fa442bcfd085b3086c4ebf7e8debc07cfe01e11513cc6d332fd918ac260", size = 127936057 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nvidia-cusparse-cu12"
|
||||
version = "12.3.1.170"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "nvidia-nvjitlink-cu12", marker = "sys_platform == 'linux'" },
|
||||
]
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/db/f7/97a9ea26ed4bbbfc2d470994b8b4f338ef663be97b8f677519ac195e113d/nvidia_cusparse_cu12-12.3.1.170-py3-none-manylinux2014_x86_64.whl", hash = "sha256:ea4f11a2904e2a8dc4b1833cc1b5181cde564edd0d5cd33e3c168eff2d1863f1", size = 207454763 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nvidia-cusparselt-cu12"
|
||||
version = "0.6.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/78/a8/bcbb63b53a4b1234feeafb65544ee55495e1bb37ec31b999b963cbccfd1d/nvidia_cusparselt_cu12-0.6.2-py3-none-manylinux2014_x86_64.whl", hash = "sha256:df2c24502fd76ebafe7457dbc4716b2fec071aabaed4fb7691a201cde03704d9", size = 150057751 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nvidia-nccl-cu12"
|
||||
version = "2.21.5"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/df/99/12cd266d6233f47d00daf3a72739872bdc10267d0383508b0b9c84a18bb6/nvidia_nccl_cu12-2.21.5-py3-none-manylinux2014_x86_64.whl", hash = "sha256:8579076d30a8c24988834445f8d633c697d42397e92ffc3f63fa26766d25e0a0", size = 188654414 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nvidia-nvjitlink-cu12"
|
||||
version = "12.4.127"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/ff/ff/847841bacfbefc97a00036e0fce5a0f086b640756dc38caea5e1bb002655/nvidia_nvjitlink_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:06b3b9b25bf3f8af351d664978ca26a16d2c5127dbd53c0497e28d1fb9611d57", size = 21066810 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nvidia-nvtx-cu12"
|
||||
version = "12.4.127"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/87/20/199b8713428322a2f22b722c62b8cc278cc53dffa9705d744484b5035ee9/nvidia_nvtx_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:781e950d9b9f60d8241ccea575b32f5105a5baf4c2351cab5256a24869f12a1a", size = 99144 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "packaging"
|
||||
version = "24.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/d0/63/68dbb6eb2de9cb10ee4c9c14a0148804425e13c4fb20d61cce69f53106da/packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f", size = 163950 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/88/ef/eb23f262cca3c0c4eb7ab1933c3b1f03d021f2c48f54763065b6f0e321be/packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759", size = 65451 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pillow"
|
||||
version = "11.1.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/f3/af/c097e544e7bd278333db77933e535098c259609c4eb3b85381109602fb5b/pillow-11.1.0.tar.gz", hash = "sha256:368da70808b36d73b4b390a8ffac11069f8a5c85f29eff1f1b01bcf3ef5b2a20", size = 46742715 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/95/20/9ce6ed62c91c073fcaa23d216e68289e19d95fb8188b9fb7a63d36771db8/pillow-11.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2062ffb1d36544d42fcaa277b069c88b01bb7298f4efa06731a7fd6cc290b81a", size = 3226818 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b9/d8/f6004d98579a2596c098d1e30d10b248798cceff82d2b77aa914875bfea1/pillow-11.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a85b653980faad27e88b141348707ceeef8a1186f75ecc600c395dcac19f385b", size = 3101662 },
|
||||
{ url = "https://files.pythonhosted.org/packages/08/d9/892e705f90051c7a2574d9f24579c9e100c828700d78a63239676f960b74/pillow-11.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9409c080586d1f683df3f184f20e36fb647f2e0bc3988094d4fd8c9f4eb1b3b3", size = 4329317 },
|
||||
{ url = "https://files.pythonhosted.org/packages/8c/aa/7f29711f26680eab0bcd3ecdd6d23ed6bce180d82e3f6380fb7ae35fcf3b/pillow-11.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7fdadc077553621911f27ce206ffcbec7d3f8d7b50e0da39f10997e8e2bb7f6a", size = 4412999 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c8/c4/8f0fe3b9e0f7196f6d0bbb151f9fba323d72a41da068610c4c960b16632a/pillow-11.1.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:93a18841d09bcdd774dcdc308e4537e1f867b3dec059c131fde0327899734aa1", size = 4368819 },
|
||||
{ url = "https://files.pythonhosted.org/packages/38/0d/84200ed6a871ce386ddc82904bfadc0c6b28b0c0ec78176871a4679e40b3/pillow-11.1.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9aa9aeddeed452b2f616ff5507459e7bab436916ccb10961c4a382cd3e03f47f", size = 4496081 },
|
||||
{ url = "https://files.pythonhosted.org/packages/84/9c/9bcd66f714d7e25b64118e3952d52841a4babc6d97b6d28e2261c52045d4/pillow-11.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3cdcdb0b896e981678eee140d882b70092dac83ac1cdf6b3a60e2216a73f2b91", size = 4296513 },
|
||||
{ url = "https://files.pythonhosted.org/packages/db/61/ada2a226e22da011b45f7104c95ebda1b63dcbb0c378ad0f7c2a710f8fd2/pillow-11.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:36ba10b9cb413e7c7dfa3e189aba252deee0602c86c309799da5a74009ac7a1c", size = 4431298 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e7/c4/fc6e86750523f367923522014b821c11ebc5ad402e659d8c9d09b3c9d70c/pillow-11.1.0-cp312-cp312-win32.whl", hash = "sha256:cfd5cd998c2e36a862d0e27b2df63237e67273f2fc78f47445b14e73a810e7e6", size = 2291630 },
|
||||
{ url = "https://files.pythonhosted.org/packages/08/5c/2104299949b9d504baf3f4d35f73dbd14ef31bbd1ddc2c1b66a5b7dfda44/pillow-11.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:a697cd8ba0383bba3d2d3ada02b34ed268cb548b369943cd349007730c92bddf", size = 2626369 },
|
||||
{ url = "https://files.pythonhosted.org/packages/37/f3/9b18362206b244167c958984b57c7f70a0289bfb59a530dd8af5f699b910/pillow-11.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:4dd43a78897793f60766563969442020e90eb7847463eca901e41ba186a7d4a5", size = 2375240 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b3/31/9ca79cafdce364fd5c980cd3416c20ce1bebd235b470d262f9d24d810184/pillow-11.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ae98e14432d458fc3de11a77ccb3ae65ddce70f730e7c76140653048c71bfcbc", size = 3226640 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ac/0f/ff07ad45a1f172a497aa393b13a9d81a32e1477ef0e869d030e3c1532521/pillow-11.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cc1331b6d5a6e144aeb5e626f4375f5b7ae9934ba620c0ac6b3e43d5e683a0f0", size = 3101437 },
|
||||
{ url = "https://files.pythonhosted.org/packages/08/2f/9906fca87a68d29ec4530be1f893149e0cb64a86d1f9f70a7cfcdfe8ae44/pillow-11.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:758e9d4ef15d3560214cddbc97b8ef3ef86ce04d62ddac17ad39ba87e89bd3b1", size = 4326605 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b0/0f/f3547ee15b145bc5c8b336401b2d4c9d9da67da9dcb572d7c0d4103d2c69/pillow-11.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b523466b1a31d0dcef7c5be1f20b942919b62fd6e9a9be199d035509cbefc0ec", size = 4411173 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b1/df/bf8176aa5db515c5de584c5e00df9bab0713548fd780c82a86cba2c2fedb/pillow-11.1.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:9044b5e4f7083f209c4e35aa5dd54b1dd5b112b108648f5c902ad586d4f945c5", size = 4369145 },
|
||||
{ url = "https://files.pythonhosted.org/packages/de/7c/7433122d1cfadc740f577cb55526fdc39129a648ac65ce64db2eb7209277/pillow-11.1.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:3764d53e09cdedd91bee65c2527815d315c6b90d7b8b79759cc48d7bf5d4f114", size = 4496340 },
|
||||
{ url = "https://files.pythonhosted.org/packages/25/46/dd94b93ca6bd555588835f2504bd90c00d5438fe131cf01cfa0c5131a19d/pillow-11.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:31eba6bbdd27dde97b0174ddf0297d7a9c3a507a8a1480e1e60ef914fe23d352", size = 4296906 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a8/28/2f9d32014dfc7753e586db9add35b8a41b7a3b46540e965cb6d6bc607bd2/pillow-11.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b5d658fbd9f0d6eea113aea286b21d3cd4d3fd978157cbf2447a6035916506d3", size = 4431759 },
|
||||
{ url = "https://files.pythonhosted.org/packages/33/48/19c2cbe7403870fbe8b7737d19eb013f46299cdfe4501573367f6396c775/pillow-11.1.0-cp313-cp313-win32.whl", hash = "sha256:f86d3a7a9af5d826744fabf4afd15b9dfef44fe69a98541f666f66fbb8d3fef9", size = 2291657 },
|
||||
{ url = "https://files.pythonhosted.org/packages/3b/ad/285c556747d34c399f332ba7c1a595ba245796ef3e22eae190f5364bb62b/pillow-11.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:593c5fd6be85da83656b93ffcccc2312d2d149d251e98588b14fbc288fd8909c", size = 2626304 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e5/7b/ef35a71163bf36db06e9c8729608f78dedf032fc8313d19bd4be5c2588f3/pillow-11.1.0-cp313-cp313-win_arm64.whl", hash = "sha256:11633d58b6ee5733bde153a8dafd25e505ea3d32e261accd388827ee987baf65", size = 2375117 },
|
||||
{ url = "https://files.pythonhosted.org/packages/79/30/77f54228401e84d6791354888549b45824ab0ffde659bafa67956303a09f/pillow-11.1.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:70ca5ef3b3b1c4a0812b5c63c57c23b63e53bc38e758b37a951e5bc466449861", size = 3230060 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ce/b1/56723b74b07dd64c1010fee011951ea9c35a43d8020acd03111f14298225/pillow-11.1.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:8000376f139d4d38d6851eb149b321a52bb8893a88dae8ee7d95840431977081", size = 3106192 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e1/cd/7bf7180e08f80a4dcc6b4c3a0aa9e0b0ae57168562726a05dc8aa8fa66b0/pillow-11.1.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ee85f0696a17dd28fbcfceb59f9510aa71934b483d1f5601d1030c3c8304f3c", size = 4446805 },
|
||||
{ url = "https://files.pythonhosted.org/packages/97/42/87c856ea30c8ed97e8efbe672b58c8304dee0573f8c7cab62ae9e31db6ae/pillow-11.1.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:dd0e081319328928531df7a0e63621caf67652c8464303fd102141b785ef9547", size = 4530623 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ff/41/026879e90c84a88e33fb00cc6bd915ac2743c67e87a18f80270dfe3c2041/pillow-11.1.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:e63e4e5081de46517099dc30abe418122f54531a6ae2ebc8680bcd7096860eab", size = 4465191 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e5/fb/a7960e838bc5df57a2ce23183bfd2290d97c33028b96bde332a9057834d3/pillow-11.1.0-cp313-cp313t-win32.whl", hash = "sha256:dda60aa465b861324e65a78c9f5cf0f4bc713e4309f83bc387be158b077963d9", size = 2295494 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d7/6c/6ec83ee2f6f0fda8d4cf89045c6be4b0373ebfc363ba8538f8c999f63fcd/pillow-11.1.0-cp313-cp313t-win_amd64.whl", hash = "sha256:ad5db5781c774ab9a9b2c4302bbf0c1014960a0a7be63278d13ae6fdf88126fe", size = 2631595 },
|
||||
{ url = "https://files.pythonhosted.org/packages/cf/6c/41c21c6c8af92b9fea313aa47c75de49e2f9a467964ee33eb0135d47eb64/pillow-11.1.0-cp313-cp313t-win_arm64.whl", hash = "sha256:67cd427c68926108778a9005f2a04adbd5e67c442ed21d95389fe1d595458756", size = 2377651 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "platformdirs"
|
||||
version = "4.3.7"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/b6/2d/7d512a3913d60623e7eb945c6d1b4f0bddf1d0b7ada5225274c87e5b53d1/platformdirs-4.3.7.tar.gz", hash = "sha256:eb437d586b6a0986388f0d6f74aa0cde27b48d0e3d66843640bfb6bdcdb6e351", size = 21291 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/6d/45/59578566b3275b8fd9157885918fcd0c4d74162928a5310926887b856a51/platformdirs-4.3.7-py3-none-any.whl", hash = "sha256:a03875334331946f13c549dbd8f4bac7a13a50a895a0eb1e8c6a8ace80d40a94", size = 18499 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pluggy"
|
||||
version = "1.5.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/96/2d/02d4312c973c6050a18b314a5ad0b3210edb65a906f868e31c111dede4a6/pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", size = 67955 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", size = 20556 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pre-commit"
|
||||
version = "4.2.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "cfgv" },
|
||||
{ name = "identify" },
|
||||
{ name = "nodeenv" },
|
||||
{ name = "pyyaml" },
|
||||
{ name = "virtualenv" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/08/39/679ca9b26c7bb2999ff122d50faa301e49af82ca9c066ec061cfbc0c6784/pre_commit-4.2.0.tar.gz", hash = "sha256:601283b9757afd87d40c4c4a9b2b5de9637a8ea02eaff7adc2d0fb4e04841146", size = 193424 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/88/74/a88bf1b1efeae488a0c0b7bdf71429c313722d1fc0f377537fbe554e6180/pre_commit-4.2.0-py2.py3-none-any.whl", hash = "sha256:a009ca7205f1eb497d10b845e52c838a98b6cdd2102a6c8e4540e94ee75c58bd", size = 220707 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pyparsing"
|
||||
version = "3.2.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/bb/22/f1129e69d94ffff626bdb5c835506b3a5b4f3d070f17ea295e12c2c6f60f/pyparsing-3.2.3.tar.gz", hash = "sha256:b9c13f1ab8b3b542f72e28f634bad4de758ab3ce4546e4301970ad6fa77c38be", size = 1088608 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/05/e7/df2285f3d08fee213f2d041540fa4fc9ca6c2d44cf36d3a035bf2a8d2bcc/pyparsing-3.2.3-py3-none-any.whl", hash = "sha256:a749938e02d6fd0b59b356ca504a24982314bb090c383e3cf201c95ef7e2bfcf", size = 111120 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pytest"
|
||||
version = "8.3.5"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "colorama", marker = "sys_platform == 'win32'" },
|
||||
{ name = "iniconfig" },
|
||||
{ name = "packaging" },
|
||||
{ name = "pluggy" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ae/3c/c9d525a414d506893f0cd8a8d0de7706446213181570cdbd766691164e40/pytest-8.3.5.tar.gz", hash = "sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845", size = 1450891 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/30/3d/64ad57c803f1fa1e963a7946b6e0fea4a70df53c1a7fed304586539c2bac/pytest-8.3.5-py3-none-any.whl", hash = "sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820", size = 343634 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "python-dateutil"
|
||||
version = "2.9.0.post0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "six" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pyyaml"
|
||||
version = "6.0.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/86/0c/c581167fc46d6d6d7ddcfb8c843a4de25bdd27e4466938109ca68492292c/PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", size = 183873 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a8/0c/38374f5bb272c051e2a69281d71cba6fdb983413e6758b84482905e29a5d/PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", size = 173302 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c3/93/9916574aa8c00aa06bbac729972eb1071d002b8e158bd0e83a3b9a20a1f7/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", size = 739154 },
|
||||
{ url = "https://files.pythonhosted.org/packages/95/0f/b8938f1cbd09739c6da569d172531567dbcc9789e0029aa070856f123984/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", size = 766223 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b9/2b/614b4752f2e127db5cc206abc23a8c19678e92b23c3db30fc86ab731d3bd/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", size = 767542 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d4/00/dd137d5bcc7efea1836d6264f049359861cf548469d18da90cd8216cf05f/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", size = 731164 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c9/1f/4f998c900485e5c0ef43838363ba4a9723ac0ad73a9dc42068b12aaba4e4/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", size = 756611 },
|
||||
{ url = "https://files.pythonhosted.org/packages/df/d1/f5a275fdb252768b7a11ec63585bc38d0e87c9e05668a139fea92b80634c/PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", size = 140591 },
|
||||
{ url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309 },
|
||||
{ url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679 },
|
||||
{ url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361 },
|
||||
{ url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523 },
|
||||
{ url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660 },
|
||||
{ url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597 },
|
||||
{ url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527 },
|
||||
{ url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.11.5"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/45/71/5759b2a6b2279bb77fe15b1435b89473631c2cd6374d45ccdb6b785810be/ruff-0.11.5.tar.gz", hash = "sha256:cae2e2439cb88853e421901ec040a758960b576126dab520fa08e9de431d1bef", size = 3976488 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/23/db/6efda6381778eec7f35875b5cbefd194904832a1153d68d36d6b269d81a8/ruff-0.11.5-py3-none-linux_armv6l.whl", hash = "sha256:2561294e108eb648e50f210671cc56aee590fb6167b594144401532138c66c7b", size = 10103150 },
|
||||
{ url = "https://files.pythonhosted.org/packages/44/f2/06cd9006077a8db61956768bc200a8e52515bf33a8f9b671ee527bb10d77/ruff-0.11.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:ac12884b9e005c12d0bd121f56ccf8033e1614f736f766c118ad60780882a077", size = 10898637 },
|
||||
{ url = "https://files.pythonhosted.org/packages/18/f5/af390a013c56022fe6f72b95c86eb7b2585c89cc25d63882d3bfe411ecf1/ruff-0.11.5-py3-none-macosx_11_0_arm64.whl", hash = "sha256:4bfd80a6ec559a5eeb96c33f832418bf0fb96752de0539905cf7b0cc1d31d779", size = 10236012 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b8/ca/b9bf954cfed165e1a0c24b86305d5c8ea75def256707f2448439ac5e0d8b/ruff-0.11.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0947c0a1afa75dcb5db4b34b070ec2bccee869d40e6cc8ab25aca11a7d527794", size = 10415338 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d9/4d/2522dde4e790f1b59885283f8786ab0046958dfd39959c81acc75d347467/ruff-0.11.5-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ad871ff74b5ec9caa66cb725b85d4ef89b53f8170f47c3406e32ef040400b038", size = 9965277 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e5/7a/749f56f150eef71ce2f626a2f6988446c620af2f9ba2a7804295ca450397/ruff-0.11.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e6cf918390cfe46d240732d4d72fa6e18e528ca1f60e318a10835cf2fa3dc19f", size = 11541614 },
|
||||
{ url = "https://files.pythonhosted.org/packages/89/b2/7d9b8435222485b6aac627d9c29793ba89be40b5de11584ca604b829e960/ruff-0.11.5-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:56145ee1478582f61c08f21076dc59153310d606ad663acc00ea3ab5b2125f82", size = 12198873 },
|
||||
{ url = "https://files.pythonhosted.org/packages/00/e0/a1a69ef5ffb5c5f9c31554b27e030a9c468fc6f57055886d27d316dfbabd/ruff-0.11.5-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e5f66f8f1e8c9fc594cbd66fbc5f246a8d91f916cb9667e80208663ec3728304", size = 11670190 },
|
||||
{ url = "https://files.pythonhosted.org/packages/05/61/c1c16df6e92975072c07f8b20dad35cd858e8462b8865bc856fe5d6ccb63/ruff-0.11.5-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80b4df4d335a80315ab9afc81ed1cff62be112bd165e162b5eed8ac55bfc8470", size = 13902301 },
|
||||
{ url = "https://files.pythonhosted.org/packages/79/89/0af10c8af4363304fd8cb833bd407a2850c760b71edf742c18d5a87bb3ad/ruff-0.11.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3068befab73620b8a0cc2431bd46b3cd619bc17d6f7695a3e1bb166b652c382a", size = 11350132 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b9/e1/ecb4c687cbf15164dd00e38cf62cbab238cad05dd8b6b0fc68b0c2785e15/ruff-0.11.5-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:f5da2e710a9641828e09aa98b92c9ebbc60518fdf3921241326ca3e8f8e55b8b", size = 10312937 },
|
||||
{ url = "https://files.pythonhosted.org/packages/cf/4f/0e53fe5e500b65934500949361e3cd290c5ba60f0324ed59d15f46479c06/ruff-0.11.5-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ef39f19cb8ec98cbc762344921e216f3857a06c47412030374fffd413fb8fd3a", size = 9936683 },
|
||||
{ url = "https://files.pythonhosted.org/packages/04/a8/8183c4da6d35794ae7f76f96261ef5960853cd3f899c2671961f97a27d8e/ruff-0.11.5-py3-none-musllinux_1_2_i686.whl", hash = "sha256:b2a7cedf47244f431fd11aa5a7e2806dda2e0c365873bda7834e8f7d785ae159", size = 10950217 },
|
||||
{ url = "https://files.pythonhosted.org/packages/26/88/9b85a5a8af21e46a0639b107fcf9bfc31da4f1d263f2fc7fbe7199b47f0a/ruff-0.11.5-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:81be52e7519f3d1a0beadcf8e974715b2dfc808ae8ec729ecfc79bddf8dbb783", size = 11404521 },
|
||||
{ url = "https://files.pythonhosted.org/packages/fc/52/047f35d3b20fd1ae9ccfe28791ef0f3ca0ef0b3e6c1a58badd97d450131b/ruff-0.11.5-py3-none-win32.whl", hash = "sha256:e268da7b40f56e3eca571508a7e567e794f9bfcc0f412c4b607931d3af9c4afe", size = 10320697 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b9/fe/00c78010e3332a6e92762424cf4c1919065707e962232797d0b57fd8267e/ruff-0.11.5-py3-none-win_amd64.whl", hash = "sha256:6c6dc38af3cfe2863213ea25b6dc616d679205732dc0fb673356c2d69608f800", size = 11378665 },
|
||||
{ url = "https://files.pythonhosted.org/packages/43/7c/c83fe5cbb70ff017612ff36654edfebec4b1ef79b558b8e5fd933bab836b/ruff-0.11.5-py3-none-win_arm64.whl", hash = "sha256:67e241b4314f4eacf14a601d586026a962f4002a475aa702c69980a38087aa4e", size = 10460287 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "setuptools"
|
||||
version = "78.1.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/a9/5a/0db4da3bc908df06e5efae42b44e75c81dd52716e10192ff36d0c1c8e379/setuptools-78.1.0.tar.gz", hash = "sha256:18fd474d4a82a5f83dac888df697af65afa82dec7323d09c3e37d1f14288da54", size = 1367827 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/54/21/f43f0a1fa8b06b32812e0975981f4677d28e0f3271601dc88ac5a5b83220/setuptools-78.1.0-py3-none-any.whl", hash = "sha256:3e386e96793c8702ae83d17b853fb93d3e09ef82ec62722e61da5cd22376dcd8", size = 1256108 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "six"
|
||||
version = "1.17.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sympy"
|
||||
version = "1.13.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "mpmath" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ca/99/5a5b6f19ff9f083671ddf7b9632028436167cd3d33e11015754e41b249a4/sympy-1.13.1.tar.gz", hash = "sha256:9cebf7e04ff162015ce31c9c6c9144daa34a93bd082f54fd8f12deca4f47515f", size = 7533040 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/b2/fe/81695a1aa331a842b582453b605175f419fe8540355886031328089d840a/sympy-1.13.1-py3-none-any.whl", hash = "sha256:db36cdc64bf61b9b24578b6f7bab1ecdd2452cf008f34faa33776680c26d66f8", size = 6189177 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "torch"
|
||||
version = "2.6.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
resolution-markers = [
|
||||
"sys_platform != 'linux'",
|
||||
]
|
||||
dependencies = [
|
||||
{ name = "filelock", marker = "sys_platform != 'linux'" },
|
||||
{ name = "fsspec", marker = "sys_platform != 'linux'" },
|
||||
{ name = "jinja2", marker = "sys_platform != 'linux'" },
|
||||
{ name = "networkx", marker = "sys_platform != 'linux'" },
|
||||
{ name = "setuptools", marker = "sys_platform != 'linux'" },
|
||||
{ name = "sympy", marker = "sys_platform != 'linux'" },
|
||||
{ name = "typing-extensions", marker = "sys_platform != 'linux'" },
|
||||
]
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/18/cf/ae99bd066571656185be0d88ee70abc58467b76f2f7c8bfeb48735a71fe6/torch-2.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:7e1448426d0ba3620408218b50aa6ada88aeae34f7a239ba5431f6c8774b1239", size = 204120469 },
|
||||
{ url = "https://files.pythonhosted.org/packages/81/b4/605ae4173aa37fb5aa14605d100ff31f4f5d49f617928c9f486bb3aaec08/torch-2.6.0-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:9a610afe216a85a8b9bc9f8365ed561535c93e804c2a317ef7fabcc5deda0989", size = 66532538 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c2/9c/fc5224e9770c83faed3a087112d73147cd7c7bfb7557dcf9ad87e1dda163/torch-2.6.0-cp313-cp313-win_amd64.whl", hash = "sha256:510c73251bee9ba02ae1cb6c9d4ee0907b3ce6020e62784e2d7598e0cfa4d6cc", size = 204126475 },
|
||||
{ url = "https://files.pythonhosted.org/packages/88/8b/d60c0491ab63634763be1537ad488694d316ddc4a20eaadd639cedc53971/torch-2.6.0-cp313-none-macosx_11_0_arm64.whl", hash = "sha256:ff96f4038f8af9f7ec4231710ed4549da1bdebad95923953a25045dcf6fd87e2", size = 66536783 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "torch"
|
||||
version = "2.6.0+cu124"
|
||||
source = { registry = "https://download.pytorch.org/whl/cu124" }
|
||||
resolution-markers = [
|
||||
"sys_platform == 'linux'",
|
||||
]
|
||||
dependencies = [
|
||||
{ name = "filelock", marker = "sys_platform == 'linux'" },
|
||||
{ name = "fsspec", marker = "sys_platform == 'linux'" },
|
||||
{ name = "jinja2", marker = "sys_platform == 'linux'" },
|
||||
{ name = "networkx", marker = "sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cublas-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cuda-cupti-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cuda-nvrtc-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cuda-runtime-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cudnn-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cufft-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-curand-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cusolver-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cusparse-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cusparselt-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-nccl-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-nvjitlink-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-nvtx-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "setuptools", marker = "sys_platform == 'linux'" },
|
||||
{ name = "sympy", marker = "sys_platform == 'linux'" },
|
||||
{ name = "triton", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "typing-extensions", marker = "sys_platform == 'linux'" },
|
||||
]
|
||||
wheels = [
|
||||
{ url = "https://download.pytorch.org/whl/cu124/torch-2.6.0%2Bcu124-cp312-cp312-linux_x86_64.whl", hash = "sha256:a393b506844035c0dac2f30ea8478c343b8e95a429f06f3b3cadfc7f53adb597" },
|
||||
{ url = "https://download.pytorch.org/whl/cu124/torch-2.6.0%2Bcu124-cp313-cp313-linux_x86_64.whl", hash = "sha256:0f3bc53c988ce9568cd876a2a5316761e84a8704135ec8068f5f81b4417979cb" },
|
||||
{ url = "https://download.pytorch.org/whl/cu124/torch-2.6.0%2Bcu124-cp313-cp313t-linux_x86_64.whl", hash = "sha256:35cba404c0d742406cdcba1609085874bc60facdfbc50e910c47a92405fef44c" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "torchvision"
|
||||
version = "0.21.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
resolution-markers = [
|
||||
"sys_platform != 'linux'",
|
||||
]
|
||||
dependencies = [
|
||||
{ name = "numpy", marker = "sys_platform != 'linux'" },
|
||||
{ name = "pillow", marker = "sys_platform != 'linux'" },
|
||||
{ name = "torch", version = "2.6.0", source = { registry = "https://pypi.org/simple" }, marker = "sys_platform != 'linux'" },
|
||||
]
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/6e/1b/28f527b22d5e8800184d0bc847f801ae92c7573a8c15979d92b7091c0751/torchvision-0.21.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:97a5814a93c793aaf0179cfc7f916024f4b63218929aee977b645633d074a49f", size = 1784140 },
|
||||
{ url = "https://files.pythonhosted.org/packages/4c/6a/c7752603060d076dfed95135b78b047dc71792630cbcb022e3693d6f32ef/torchvision-0.21.0-cp312-cp312-win_amd64.whl", hash = "sha256:6eb75d41e3bbfc2f7642d0abba9383cc9ae6c5a4ca8d6b00628c225e1eaa63b3", size = 1560520 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f9/56/47d456b61c3bbce7bed4af3925c83d405bb87468e659fd3cf3d9840c3b51/torchvision-0.21.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:659b76c86757cb2ee4ca2db245e0740cfc3081fef46f0f1064d11adb4a8cee31", size = 1784141 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ed/b4/fc60e3bc003879d3de842baea258fffc3586f4b49cd435a5ba1e09c33315/torchvision-0.21.0-cp313-cp313-win_amd64.whl", hash = "sha256:9147f5e096a9270684e3befdee350f3cacafd48e0c54ab195f45790a9c146d67", size = 1560519 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "torchvision"
|
||||
version = "0.21.0+cu124"
|
||||
source = { registry = "https://download.pytorch.org/whl/cu124" }
|
||||
resolution-markers = [
|
||||
"sys_platform == 'linux'",
|
||||
]
|
||||
dependencies = [
|
||||
{ name = "numpy", marker = "sys_platform == 'linux'" },
|
||||
{ name = "pillow", marker = "sys_platform == 'linux'" },
|
||||
{ name = "torch", version = "2.6.0+cu124", source = { registry = "https://download.pytorch.org/whl/cu124" }, marker = "sys_platform == 'linux'" },
|
||||
]
|
||||
wheels = [
|
||||
{ url = "https://download.pytorch.org/whl/cu124/torchvision-0.21.0%2Bcu124-cp312-cp312-linux_x86_64.whl", hash = "sha256:efb53ea0af7bf09b7b53e2a18b9be6d245f7d46a90b51d5cf97f37e9b929a991" },
|
||||
{ url = "https://download.pytorch.org/whl/cu124/torchvision-0.21.0%2Bcu124-cp313-cp313-linux_x86_64.whl", hash = "sha256:4b70acf3b4b96a0ceb1374116626c9bef9e8be016b57b1284e482260ca1896d6" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "torchvision-vibecoding-project"
|
||||
version = "0.1.0"
|
||||
source = { virtual = "." }
|
||||
dependencies = [
|
||||
{ name = "matplotlib" },
|
||||
{ name = "numpy" },
|
||||
{ name = "pillow" },
|
||||
{ name = "pytest" },
|
||||
{ name = "ruff" },
|
||||
{ name = "torch", version = "2.6.0", source = { registry = "https://pypi.org/simple" }, marker = "sys_platform != 'linux'" },
|
||||
{ name = "torch", version = "2.6.0+cu124", source = { registry = "https://download.pytorch.org/whl/cu124" }, marker = "sys_platform == 'linux'" },
|
||||
{ name = "torchvision", version = "0.21.0", source = { registry = "https://pypi.org/simple" }, marker = "sys_platform != 'linux'" },
|
||||
{ name = "torchvision", version = "0.21.0+cu124", source = { registry = "https://download.pytorch.org/whl/cu124" }, marker = "sys_platform == 'linux'" },
|
||||
]
|
||||
|
||||
[package.dev-dependencies]
|
||||
dev = [
|
||||
{ name = "pre-commit" },
|
||||
]
|
||||
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "matplotlib", specifier = ">=3.10.1" },
|
||||
{ name = "numpy", specifier = ">=2.2.4" },
|
||||
{ name = "pillow", specifier = ">=11.1.0" },
|
||||
{ name = "pytest", specifier = ">=8.3.5" },
|
||||
{ name = "ruff", specifier = ">=0.11.5" },
|
||||
{ name = "torch", marker = "sys_platform != 'linux'", specifier = ">=2.6.0" },
|
||||
{ name = "torch", marker = "sys_platform == 'linux'", specifier = ">=2.6.0", index = "https://download.pytorch.org/whl/cu124" },
|
||||
{ name = "torchvision", marker = "sys_platform != 'linux'", specifier = ">=0.21.0" },
|
||||
{ name = "torchvision", marker = "sys_platform == 'linux'", specifier = ">=0.21.0", index = "https://download.pytorch.org/whl/cu124" },
|
||||
]
|
||||
|
||||
[package.metadata.requires-dev]
|
||||
dev = [{ name = "pre-commit", specifier = ">=4.2.0" }]
|
||||
|
||||
[[package]]
|
||||
name = "triton"
|
||||
version = "3.2.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/06/00/59500052cb1cf8cf5316be93598946bc451f14072c6ff256904428eaf03c/triton-3.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d9b215efc1c26fa7eefb9a157915c92d52e000d2bf83e5f69704047e63f125c", size = 253159365 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c7/30/37a3384d1e2e9320331baca41e835e90a3767303642c7a80d4510152cbcf/triton-3.2.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5dfa23ba84541d7c0a531dfce76d8bcd19159d50a4a8b14ad01e91734a5c1b0", size = 253154278 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "typing-extensions"
|
||||
version = "4.13.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/f6/37/23083fcd6e35492953e8d2aaaa68b860eb422b34627b13f2ce3eb6106061/typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef", size = 106967 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/8b/54/b1ae86c0973cc6f0210b53d508ca3641fb6d0c56823f288d108bc7ab3cc8/typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c", size = 45806 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "virtualenv"
|
||||
version = "20.30.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "distlib" },
|
||||
{ name = "filelock" },
|
||||
{ name = "platformdirs" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/38/e0/633e369b91bbc664df47dcb5454b6c7cf441e8f5b9d0c250ce9f0546401e/virtualenv-20.30.0.tar.gz", hash = "sha256:800863162bcaa5450a6e4d721049730e7f2dae07720e0902b0e4040bd6f9ada8", size = 4346945 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/4c/ed/3cfeb48175f0671ec430ede81f628f9fb2b1084c9064ca67ebe8c0ed6a05/virtualenv-20.30.0-py3-none-any.whl", hash = "sha256:e34302959180fca3af42d1800df014b35019490b119eba981af27f2fa486e5d6", size = 4329461 },
|
||||
]
|
||||
Reference in New Issue
Block a user