HaLim
commited on
Commit
Β·
cd87ae5
0
Parent(s):
first push with real optimizer
Browse files- .gitignore +69 -0
- README.md +62 -0
- notebook/__init__.py +1 -0
- pyproject.toml +26 -0
- requirements.txt +256 -0
- src/__init__.py +1 -0
- src/config/__init__.py +0 -0
- src/config/optimization_config.py +37 -0
- src/models/__init__.py +1 -0
- src/models/optimizer_real.py +336 -0
- src/utils/__init__.py +1 -0
- src/utils/excel_to_csv_converter.py +126 -0
- src/utils/file_utils.py +21 -0
- src/visualization/Home.py +73 -0
- src/visualization/pages/1_optimize_viz.py +424 -0
- src/visualization/pages/2_metadata.py +300 -0
.gitignore
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Python
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
*.so
|
| 6 |
+
*.egg
|
| 7 |
+
*.egg-info/
|
| 8 |
+
dist/
|
| 9 |
+
build/
|
| 10 |
+
.Python
|
| 11 |
+
|
| 12 |
+
# Virtual Environment
|
| 13 |
+
env/
|
| 14 |
+
venv/
|
| 15 |
+
.venv/
|
| 16 |
+
.env
|
| 17 |
+
.ENV/
|
| 18 |
+
|
| 19 |
+
# IDE
|
| 20 |
+
.vscode/
|
| 21 |
+
.idea/
|
| 22 |
+
*.swp
|
| 23 |
+
*.swo
|
| 24 |
+
*~
|
| 25 |
+
|
| 26 |
+
# OS
|
| 27 |
+
.DS_Store
|
| 28 |
+
.DS_Store?
|
| 29 |
+
._*
|
| 30 |
+
.Spotlight-V100
|
| 31 |
+
.Trashes
|
| 32 |
+
ehthumbs.db
|
| 33 |
+
Thumbs.db
|
| 34 |
+
|
| 35 |
+
# Data files (usually large)
|
| 36 |
+
*.csv
|
| 37 |
+
*.xlsx
|
| 38 |
+
*.xls
|
| 39 |
+
data/*.csv
|
| 40 |
+
data/*.xlsx
|
| 41 |
+
data/*.xls
|
| 42 |
+
|
| 43 |
+
# Results and outputs
|
| 44 |
+
results/
|
| 45 |
+
outputs/
|
| 46 |
+
logs/
|
| 47 |
+
*.log
|
| 48 |
+
|
| 49 |
+
# Jupyter
|
| 50 |
+
.ipynb_checkpoints/
|
| 51 |
+
*.ipynb
|
| 52 |
+
|
| 53 |
+
# Model files
|
| 54 |
+
*.h5
|
| 55 |
+
*.pkl
|
| 56 |
+
*.joblib
|
| 57 |
+
|
| 58 |
+
# Testing
|
| 59 |
+
.pytest_cache/
|
| 60 |
+
.coverage
|
| 61 |
+
htmlcov/
|
| 62 |
+
|
| 63 |
+
# Documentation
|
| 64 |
+
docs/_build/
|
| 65 |
+
|
| 66 |
+
# Large files
|
| 67 |
+
*.zip
|
| 68 |
+
*.tar.gz
|
| 69 |
+
*.rar
|
README.md
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Supply Roster Optimization Tool
|
| 2 |
+
|
| 3 |
+
A Python-based tool for optimizing supply roster scheduling using linear programming.
|
| 4 |
+
|
| 5 |
+
## Features
|
| 6 |
+
|
| 7 |
+
- Multi-day optimization with employee scheduling
|
| 8 |
+
- Support for multiple product types and shift patterns
|
| 9 |
+
- Configurable constraints for labor costs and productivity
|
| 10 |
+
- OR-Tools integration for optimization
|
| 11 |
+
- Streamlit and Gradio interfaces for visualization
|
| 12 |
+
|
| 13 |
+
## Project Structure
|
| 14 |
+
|
| 15 |
+
```
|
| 16 |
+
βββ src/
|
| 17 |
+
β βββ config/ # Configuration files
|
| 18 |
+
β βββ models/ # Optimization models
|
| 19 |
+
β βββ utils/ # Utility functions
|
| 20 |
+
β βββ visualization/ # Web interfaces
|
| 21 |
+
βββ data/ # Data files (not tracked)
|
| 22 |
+
βββ results/ # Output files (not tracked)
|
| 23 |
+
βββ requirements.txt # Python dependencies
|
| 24 |
+
βββ pyproject.toml # Project configuration
|
| 25 |
+
βββ README.md
|
| 26 |
+
|
| 27 |
+
```
|
| 28 |
+
|
| 29 |
+
## Setup
|
| 30 |
+
|
| 31 |
+
1. Create virtual environment:
|
| 32 |
+
```bash
|
| 33 |
+
python -m venv venv
|
| 34 |
+
source venv/bin/activate # On Windows: venv\Scripts\activate
|
| 35 |
+
```
|
| 36 |
+
|
| 37 |
+
2. Install dependencies:
|
| 38 |
+
```bash
|
| 39 |
+
pip install -r requirements.txt
|
| 40 |
+
```
|
| 41 |
+
|
| 42 |
+
## Usage
|
| 43 |
+
|
| 44 |
+
```bash
|
| 45 |
+
# Run the optimization
|
| 46 |
+
python src/models/optimizer_real.py
|
| 47 |
+
|
| 48 |
+
# Start web interface
|
| 49 |
+
streamlit run src/visualization/Home.py
|
| 50 |
+
```
|
| 51 |
+
|
| 52 |
+
## Dependencies
|
| 53 |
+
|
| 54 |
+
- Python >= 3.8
|
| 55 |
+
- OR-Tools >= 9.4.0
|
| 56 |
+
- Pandas >= 1.3.0
|
| 57 |
+
- Streamlit >= 1.18.0
|
| 58 |
+
- Plotly >= 5.8.0
|
| 59 |
+
|
| 60 |
+
## License
|
| 61 |
+
|
| 62 |
+
MIT License
|
notebook/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
import src
|
pyproject.toml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[project]
|
| 2 |
+
name = "supply-roster-tool-real"
|
| 3 |
+
version = "0.1.0"
|
| 4 |
+
description = ""
|
| 5 |
+
authors = [
|
| 6 |
+
{name = "HaLim Jun",email = "[email protected]"}
|
| 7 |
+
]
|
| 8 |
+
license = {text = "MIT"}
|
| 9 |
+
readme = "README.md"
|
| 10 |
+
requires-python = ">=3.10,<3.11"
|
| 11 |
+
dependencies = [
|
| 12 |
+
"pandas>=2.1.3",
|
| 13 |
+
"or-tools>=10.0.0",
|
| 14 |
+
"numpy>=1.26.4",
|
| 15 |
+
"matplotlib>=3.8.0",
|
| 16 |
+
"seaborn>=0.13.2",
|
| 17 |
+
"scipy>=1.13.0",
|
| 18 |
+
"scikit-learn>=1.3.2",
|
| 19 |
+
"statsmodels>=0.14.4",
|
| 20 |
+
"plotly>=5.19.0",
|
| 21 |
+
]
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
[build-system]
|
| 25 |
+
requires = ["poetry-core>=2.0.0,<3.0.0"]
|
| 26 |
+
build-backend = "poetry.core.masonry.api"
|
requirements.txt
ADDED
|
@@ -0,0 +1,256 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
absl-py==2.2.2
|
| 2 |
+
affine==2.4.0
|
| 3 |
+
aiofiles==24.1.0
|
| 4 |
+
aiohappyeyeballs==2.6.1
|
| 5 |
+
aiohttp==3.12.6
|
| 6 |
+
aiosignal==1.3.2
|
| 7 |
+
altair==5.5.0
|
| 8 |
+
annotated-types==0.7.0
|
| 9 |
+
anyio==4.8.0
|
| 10 |
+
appnope==0.1.4
|
| 11 |
+
argon2-cffi==23.1.0
|
| 12 |
+
argon2-cffi-bindings==21.2.0
|
| 13 |
+
arrow==1.3.0
|
| 14 |
+
asgiref==3.8.1
|
| 15 |
+
asttokens==3.0.0
|
| 16 |
+
async-lru==2.0.5
|
| 17 |
+
async-timeout==5.0.1
|
| 18 |
+
attrs==25.3.0
|
| 19 |
+
babel==2.17.0
|
| 20 |
+
backports.tarfile==1.2.0
|
| 21 |
+
beautifulsoup4==4.13.3
|
| 22 |
+
bleach==6.2.0
|
| 23 |
+
blinker==1.9.0
|
| 24 |
+
boto3==1.38.5
|
| 25 |
+
botocore==1.38.5
|
| 26 |
+
branca==0.8.1
|
| 27 |
+
build==1.2.2.post1
|
| 28 |
+
CacheControl==0.14.3
|
| 29 |
+
cachetools==5.5.2
|
| 30 |
+
cdsapi==0.7.6
|
| 31 |
+
certifi==2025.1.31
|
| 32 |
+
cffi==1.17.1
|
| 33 |
+
cftime==1.6.4.post1
|
| 34 |
+
chardet==5.2.0
|
| 35 |
+
charset-normalizer==3.4.1
|
| 36 |
+
cleo==2.1.0
|
| 37 |
+
click==8.1.8
|
| 38 |
+
click-plugins==1.1.1
|
| 39 |
+
cligj==0.7.2
|
| 40 |
+
comm==0.2.2
|
| 41 |
+
contourpy==1.3.1
|
| 42 |
+
crashtest==0.4.1
|
| 43 |
+
cycler==0.12.1
|
| 44 |
+
datasets==3.6.0
|
| 45 |
+
debugpy==1.8.11
|
| 46 |
+
decorator==5.1.1
|
| 47 |
+
defusedxml==0.7.1
|
| 48 |
+
dill==0.3.8
|
| 49 |
+
distlib==0.3.9
|
| 50 |
+
distro==1.9.0
|
| 51 |
+
dulwich==0.22.8
|
| 52 |
+
ecmwf-datastores-client==0.2.0
|
| 53 |
+
et_xmlfile==2.0.0
|
| 54 |
+
exactextract==0.2.1
|
| 55 |
+
exceptiongroup==1.2.2
|
| 56 |
+
executing==2.1.0
|
| 57 |
+
Faker==37.1.0
|
| 58 |
+
fastapi==0.115.12
|
| 59 |
+
fastcore==1.8.0
|
| 60 |
+
fastjsonschema==2.21.1
|
| 61 |
+
fastprogress==1.0.3
|
| 62 |
+
ffmpy==0.5.0
|
| 63 |
+
filelock==3.18.0
|
| 64 |
+
findpython==0.6.3
|
| 65 |
+
fiona==1.10.1
|
| 66 |
+
Flask==3.0.2
|
| 67 |
+
Flask-Cors==4.0.0
|
| 68 |
+
folium==0.19.5
|
| 69 |
+
fonttools==4.56.0
|
| 70 |
+
fqdn==1.5.1
|
| 71 |
+
frozenlist==1.6.0
|
| 72 |
+
fsspec==2025.3.0
|
| 73 |
+
geopandas==1.0.1
|
| 74 |
+
geowrangler==0.5.1
|
| 75 |
+
gitdb==4.0.12
|
| 76 |
+
GitPython==3.1.44
|
| 77 |
+
gradio==5.29.1
|
| 78 |
+
gradio_client==1.10.1
|
| 79 |
+
groovy==0.1.2
|
| 80 |
+
h11==0.14.0
|
| 81 |
+
h3==4.2.2
|
| 82 |
+
httpcore==1.0.7
|
| 83 |
+
httpx==0.28.1
|
| 84 |
+
huggingface-hub==0.31.2
|
| 85 |
+
idna==3.10
|
| 86 |
+
immutabledict==4.2.1
|
| 87 |
+
importlib_metadata==8.7.0
|
| 88 |
+
installer==0.7.0
|
| 89 |
+
ipykernel==6.29.5
|
| 90 |
+
ipython==8.31.0
|
| 91 |
+
ipywidgets==8.1.5
|
| 92 |
+
isoduration==20.11.0
|
| 93 |
+
itsdangerous==2.2.0
|
| 94 |
+
jaraco.classes==3.4.0
|
| 95 |
+
jaraco.context==6.0.1
|
| 96 |
+
jaraco.functools==4.1.0
|
| 97 |
+
jedi==0.19.2
|
| 98 |
+
Jinja2==3.1.5
|
| 99 |
+
jiter==0.9.0
|
| 100 |
+
jmespath==1.0.1
|
| 101 |
+
joblib==1.4.2
|
| 102 |
+
json5==0.12.0
|
| 103 |
+
jsonpointer==3.0.0
|
| 104 |
+
jsonschema==4.23.0
|
| 105 |
+
jsonschema-specifications==2024.10.1
|
| 106 |
+
jupyter==1.1.1
|
| 107 |
+
jupyter-console==6.6.3
|
| 108 |
+
jupyter-events==0.12.0
|
| 109 |
+
jupyter-lsp==2.2.5
|
| 110 |
+
jupyter_client==8.6.3
|
| 111 |
+
jupyter_core==5.7.2
|
| 112 |
+
jupyter_server==2.15.0
|
| 113 |
+
jupyter_server_terminals==0.5.3
|
| 114 |
+
jupyterlab==4.3.6
|
| 115 |
+
jupyterlab_pygments==0.3.0
|
| 116 |
+
jupyterlab_server==2.27.3
|
| 117 |
+
jupyterlab_widgets==3.0.13
|
| 118 |
+
keyring==25.6.0
|
| 119 |
+
kiwisolver==1.4.8
|
| 120 |
+
loguru==0.7.3
|
| 121 |
+
mangum==0.19.0
|
| 122 |
+
markdown-it-py==3.0.0
|
| 123 |
+
MarkupSafe==3.0.2
|
| 124 |
+
matplotlib==3.10.1
|
| 125 |
+
matplotlib-inline==0.1.7
|
| 126 |
+
mdurl==0.1.2
|
| 127 |
+
mistune==3.1.3
|
| 128 |
+
more-itertools==10.7.0
|
| 129 |
+
morecantile==6.2.0
|
| 130 |
+
msgpack==1.1.0
|
| 131 |
+
multidict==6.4.4
|
| 132 |
+
multiprocess==0.70.16
|
| 133 |
+
multiurl==0.3.6
|
| 134 |
+
narwhals==1.35.0
|
| 135 |
+
nbclient==0.10.2
|
| 136 |
+
nbconvert==7.16.6
|
| 137 |
+
nbformat==5.10.4
|
| 138 |
+
nest-asyncio==1.6.0
|
| 139 |
+
netCDF4==1.7.2
|
| 140 |
+
notebook==7.3.3
|
| 141 |
+
notebook_shim==0.2.4
|
| 142 |
+
numpy==2.2.5
|
| 143 |
+
openai==1.66.3
|
| 144 |
+
opencv-python==4.11.0.86
|
| 145 |
+
openpyxl==3.1.5
|
| 146 |
+
orjson==3.10.18
|
| 147 |
+
ortools==9.12.4544
|
| 148 |
+
overrides==7.7.0
|
| 149 |
+
packaging==24.2
|
| 150 |
+
pandas==2.2.3
|
| 151 |
+
pandocfilters==1.5.1
|
| 152 |
+
parso==0.8.4
|
| 153 |
+
patsy==1.0.1
|
| 154 |
+
pbs-installer==2025.5.17
|
| 155 |
+
pexpect==4.9.0
|
| 156 |
+
pillow==11.1.0
|
| 157 |
+
pkginfo==1.12.1.2
|
| 158 |
+
platformdirs==4.3.6
|
| 159 |
+
plotly==6.1.0
|
| 160 |
+
poetry==2.1.3
|
| 161 |
+
poetry-core==2.1.3
|
| 162 |
+
polars==1.26.0
|
| 163 |
+
-e git+https://github.com/halim-jun/StudyMLOps.git@2e1b97c9d8196552a23dd5a4c536f25e53c033dc#egg=project
|
| 164 |
+
prometheus_client==0.21.1
|
| 165 |
+
prompt_toolkit==3.0.48
|
| 166 |
+
propcache==0.3.1
|
| 167 |
+
protobuf==5.29.4
|
| 168 |
+
psutil==6.1.1
|
| 169 |
+
ptyprocess==0.7.0
|
| 170 |
+
pure_eval==0.2.3
|
| 171 |
+
pyarrow==19.0.1
|
| 172 |
+
pycparser==2.22
|
| 173 |
+
pydantic==2.10.6
|
| 174 |
+
pydantic_core==2.27.2
|
| 175 |
+
pydeck==0.9.1
|
| 176 |
+
pydub==0.25.1
|
| 177 |
+
Pygments==2.19.1
|
| 178 |
+
pyogrio==0.10.0
|
| 179 |
+
pyparsing==3.2.3
|
| 180 |
+
pyproj==3.7.1
|
| 181 |
+
pyproject_hooks==1.2.0
|
| 182 |
+
python-dateutil==2.9.0.post0
|
| 183 |
+
python-dotenv==1.1.0
|
| 184 |
+
python-json-logger==3.3.0
|
| 185 |
+
python-multipart==0.0.20
|
| 186 |
+
pytz==2025.2
|
| 187 |
+
PyYAML==6.0.1
|
| 188 |
+
pyzmq==26.2.0
|
| 189 |
+
RapidFuzz==3.13.0
|
| 190 |
+
rasterio==1.4.3
|
| 191 |
+
rasterstats==0.20.0
|
| 192 |
+
referencing==0.36.2
|
| 193 |
+
regex==2024.11.6
|
| 194 |
+
requests==2.32.3
|
| 195 |
+
requests-toolbelt==1.0.0
|
| 196 |
+
rfc3339-validator==0.1.4
|
| 197 |
+
rfc3986-validator==0.1.1
|
| 198 |
+
rich==14.0.0
|
| 199 |
+
rpds-py==0.24.0
|
| 200 |
+
ruff==0.11.10
|
| 201 |
+
s3transfer==0.12.0
|
| 202 |
+
safehttpx==0.1.6
|
| 203 |
+
safetensors==0.5.3
|
| 204 |
+
scikit-learn==1.6.1
|
| 205 |
+
scipy==1.15.2
|
| 206 |
+
seaborn==0.13.2
|
| 207 |
+
semantic-version==2.10.0
|
| 208 |
+
Send2Trash==1.8.3
|
| 209 |
+
sentencepiece==0.2.0
|
| 210 |
+
shapely==2.0.7
|
| 211 |
+
shellingham==1.5.4
|
| 212 |
+
simplejson==3.20.1
|
| 213 |
+
six==1.17.0
|
| 214 |
+
smmap==5.0.2
|
| 215 |
+
sniffio==1.3.1
|
| 216 |
+
soupsieve==2.6
|
| 217 |
+
stack-data==0.6.3
|
| 218 |
+
starlette==0.46.2
|
| 219 |
+
statsmodels==0.14.4
|
| 220 |
+
streamlit==1.44.1
|
| 221 |
+
streamlit_folium==0.25.0
|
| 222 |
+
tenacity==9.1.2
|
| 223 |
+
terminado==0.18.1
|
| 224 |
+
threadpoolctl==3.6.0
|
| 225 |
+
tinycss2==1.4.0
|
| 226 |
+
tokenizers==0.21.1
|
| 227 |
+
toml==0.10.2
|
| 228 |
+
tomli==2.2.1
|
| 229 |
+
tomlkit==0.13.2
|
| 230 |
+
tornado==6.4.2
|
| 231 |
+
tqdm==4.67.1
|
| 232 |
+
traitlets==5.14.3
|
| 233 |
+
transformers==4.52.4
|
| 234 |
+
trove-classifiers==2025.5.9.12
|
| 235 |
+
typer==0.15.4
|
| 236 |
+
types-python-dateutil==2.9.0.20241206
|
| 237 |
+
typing_extensions==4.12.2
|
| 238 |
+
tzdata==2025.2
|
| 239 |
+
uri-template==1.3.0
|
| 240 |
+
urllib3==2.3.0
|
| 241 |
+
uvicorn==0.34.2
|
| 242 |
+
virtualenv==20.31.2
|
| 243 |
+
wcwidth==0.2.13
|
| 244 |
+
webcolors==24.11.1
|
| 245 |
+
webencodings==0.5.1
|
| 246 |
+
websocket-client==1.8.0
|
| 247 |
+
websockets==15.0.1
|
| 248 |
+
Werkzeug==3.1.3
|
| 249 |
+
widgetsnbextension==4.0.13
|
| 250 |
+
xarray==2025.6.1
|
| 251 |
+
xattr==1.1.4
|
| 252 |
+
xxhash==3.5.0
|
| 253 |
+
xyzservices==2025.1.0
|
| 254 |
+
yarl==1.20.0
|
| 255 |
+
zipp==3.21.0
|
| 256 |
+
zstandard==0.23.0
|
src/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
src/config/__init__.py
ADDED
|
File without changes
|
src/config/optimization_config.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
DATE_SPAN = list(range(1, 8))
|
| 2 |
+
PRODUCT_LIST = ["a", "b", "c"]
|
| 3 |
+
EMPLOYEE_LIST = ["x", "y"]
|
| 4 |
+
SHIFT_LIST = [1, 2, 3]
|
| 5 |
+
LINE_LIST = ["long", "short"]
|
| 6 |
+
LINE_LIST_PER_TYPE = {"long": 2, "short": 3}
|
| 7 |
+
DEMAND_LIST = {"a": 1000, "b": 600, "c": 400}
|
| 8 |
+
COST_LIST_PER_EMP_SHIFT = {
|
| 9 |
+
"x": {1: 15, 2: 22, 3: 18},
|
| 10 |
+
"y": {1: 19, 2: 27, 3: 23},
|
| 11 |
+
}
|
| 12 |
+
PRODUCTIVITY_LIST_PER_EMP_PRODUCT = {
|
| 13 |
+
"x": {
|
| 14 |
+
1: {"a": 10, "b": 8, "c": 7},
|
| 15 |
+
2: {"a": 9, "b": 7, "c": 6},
|
| 16 |
+
3: {"a": 9, "b": 7, "c": 6},
|
| 17 |
+
},
|
| 18 |
+
"y": {
|
| 19 |
+
1: {"a": 8, "b": 6, "c": 5},
|
| 20 |
+
2: {"a": 7, "b": 6, "c": 5},
|
| 21 |
+
3: {"a": 7, "b": 6, "c": 5},
|
| 22 |
+
},
|
| 23 |
+
}
|
| 24 |
+
MAX_EMPLOYEE_PER_TYPE_ON_DAY = {
|
| 25 |
+
"x": {t: 5 for t in DATE_SPAN}, # EDIT: e.g., {'x': {1:5,2:5,...}, 'y':{1:6,...}}
|
| 26 |
+
"y": {t: 6 for t in DATE_SPAN},
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
MAX_HOUR_PER_PERSON_PER_DAY = 14 # legal standard
|
| 30 |
+
MAX_HOUR_PER_SHIFT_PER_PERSON = {1: 8, 2: 4, 3: 6}
|
| 31 |
+
CAP_PER_LINE_PER_HOUR = {
|
| 32 |
+
("long", 1): 22,
|
| 33 |
+
("long", 2): 22,
|
| 34 |
+
("short", 1): 16,
|
| 35 |
+
("short", 2): 16,
|
| 36 |
+
("short", 3): 16,
|
| 37 |
+
}
|
src/models/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
# This file makes the models directory a Python package
|
src/models/optimizer_real.py
ADDED
|
@@ -0,0 +1,336 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Option A (with lines) + 7-day horizon (weekly demand only)
|
| 2 |
+
# Generalized: arbitrary products (P_all) and day-varying headcount N_day[e][t]
|
| 3 |
+
# -----------------------------------------------------------------------------
|
| 4 |
+
# pip install ortools
|
| 5 |
+
from ortools.linear_solver import pywraplp
|
| 6 |
+
import pandas as pd
|
| 7 |
+
import sys
|
| 8 |
+
import os
|
| 9 |
+
|
| 10 |
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
|
| 11 |
+
|
| 12 |
+
from src.config import optimization_config
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class OptimizerReal:
|
| 16 |
+
def __init__(self):
|
| 17 |
+
self.config = optimization_config
|
| 18 |
+
|
| 19 |
+
def solve_option_A_multi_day_generalized(self):
|
| 20 |
+
# -----------------------------
|
| 21 |
+
# 1) SETS
|
| 22 |
+
# -----------------------------
|
| 23 |
+
# Days
|
| 24 |
+
D = self.config.DATE_SPAN
|
| 25 |
+
|
| 26 |
+
# Products (master set; you can have many)
|
| 27 |
+
# Fill with all SKUs that may appear over the week
|
| 28 |
+
P_all = self.config.PRODUCT_LIST # EDIT: add/remove products freely
|
| 29 |
+
|
| 30 |
+
# Employee types (fixed to two types x,y; headcount varies by day)
|
| 31 |
+
E = self.config.EMPLOYEE_LIST
|
| 32 |
+
|
| 33 |
+
# Shifts: 1=usual, 2=overtime, 3=evening
|
| 34 |
+
S = self.config.SHIFT_LIST
|
| 35 |
+
|
| 36 |
+
# Line types and explicit line list
|
| 37 |
+
T_line = self.config.LINE_LIST
|
| 38 |
+
K = self.config.LINE_LIST_PER_TYPE # number of physical lines per type (EDIT)
|
| 39 |
+
L = [
|
| 40 |
+
(t, i) for t in T_line for i in range(1, K[t] + 1)
|
| 41 |
+
] # pair of line type and line number (e.g., ('long', 1))
|
| 42 |
+
|
| 43 |
+
# -----------------------------
|
| 44 |
+
# 2) PARAMETERS (EDIT THESE)
|
| 45 |
+
# -----------------------------
|
| 46 |
+
# Weekly demand (units) for each product in P_all
|
| 47 |
+
d_week = self.config.DEMAND_LIST
|
| 48 |
+
|
| 49 |
+
# Daily activity toggle for each product (1=can be produced on day t; 0=cannot)
|
| 50 |
+
# If a product is not active on a day, we force its production and hours to 0 that day.
|
| 51 |
+
active = {
|
| 52 |
+
t: {p: 1 for p in P_all} for t in D
|
| 53 |
+
} # EDIT per day if some SKUs are not available
|
| 54 |
+
|
| 55 |
+
# Per-hour labor cost by employee type & shift
|
| 56 |
+
c = self.config.COST_LIST_PER_EMP_SHIFT
|
| 57 |
+
|
| 58 |
+
# Productivity q[e][s][p] = units per hour (assumed line-independent here)
|
| 59 |
+
# Provide entries for ALL products in P_all
|
| 60 |
+
q = self.config.PRODUCTIVITY_LIST_PER_EMP_PRODUCT
|
| 61 |
+
# If productivity depends on line, switch to q_line[(e,s,p,ell)] and use it in constraints.
|
| 62 |
+
|
| 63 |
+
# Day-varying available headcount per type
|
| 64 |
+
# N_day[e][t] = number of employees of type e available on day t
|
| 65 |
+
N_day = self.config.MAX_EMPLOYEE_PER_TYPE_ON_DAY
|
| 66 |
+
|
| 67 |
+
# Limits
|
| 68 |
+
Hmax_daily_per_person = (
|
| 69 |
+
self.config.MAX_HOUR_PER_PERSON_PER_DAY
|
| 70 |
+
) # per person per day
|
| 71 |
+
Hmax_s = self.config.MAX_HOUR_PER_SHIFT_PER_PERSON # per-shift hour caps
|
| 72 |
+
# Per-line unit/hour capacity (physical)
|
| 73 |
+
Cap = self.config.CAP_PER_LINE_PER_HOUR
|
| 74 |
+
|
| 75 |
+
# Fixed regular hours for type x on shift 1
|
| 76 |
+
# Choose either PER-DAY values or a single PER-WEEK total.
|
| 77 |
+
# Common in practice: per-day fixed hours (regulars show up daily).
|
| 78 |
+
F_x1_day = {
|
| 79 |
+
t: 8 * N_day["x"][t] for t in D
|
| 80 |
+
} # EDIT if different from "all regulars do full usual shift"
|
| 81 |
+
F_x1_week = None # e.g., sum(F_x1_day.values()) if you want weekly instead (then set F_x1_day=None)
|
| 82 |
+
|
| 83 |
+
# Optional skill/compatibility: allow[(e,p,ell)] = 1/0 (1=allowed; 0=forbid)
|
| 84 |
+
allow = {}
|
| 85 |
+
for e in E:
|
| 86 |
+
for p in P_all:
|
| 87 |
+
for ell in L:
|
| 88 |
+
allow[(e, p, ell)] = 1 # EDIT as needed
|
| 89 |
+
|
| 90 |
+
# -----------------------------
|
| 91 |
+
# 3) SOLVER
|
| 92 |
+
# -----------------------------
|
| 93 |
+
solver = pywraplp.Solver.CreateSolver("CBC") # or 'SCIP' if available
|
| 94 |
+
if not solver:
|
| 95 |
+
raise RuntimeError("Failed to create solver. Check OR-Tools installation.")
|
| 96 |
+
INF = solver.infinity()
|
| 97 |
+
|
| 98 |
+
# -----------------------------
|
| 99 |
+
# 4) DECISION VARIABLES
|
| 100 |
+
# -----------------------------
|
| 101 |
+
# h[e,s,p,ell,t] = worker-hours of type e on shift s for product p on line ell on day t (integer)
|
| 102 |
+
h = {}
|
| 103 |
+
for e in E:
|
| 104 |
+
for s in S:
|
| 105 |
+
for p in P_all:
|
| 106 |
+
for ell in L:
|
| 107 |
+
for t in D:
|
| 108 |
+
# Upper bound per (e,s,t): shift cap * available headcount that day
|
| 109 |
+
ub = Hmax_s[s] * N_day[e][t]
|
| 110 |
+
h[e, s, p, ell, t] = solver.IntVar(
|
| 111 |
+
0, ub, f"h_{e}_{s}_{p}_{ell[0]}{ell[1]}_d{t}"
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
# u[p,ell,s,t] = units of product p produced on line ell during shift s on day t
|
| 115 |
+
u = {}
|
| 116 |
+
for p in P_all:
|
| 117 |
+
for ell in L:
|
| 118 |
+
for s in S:
|
| 119 |
+
for t in D:
|
| 120 |
+
u[p, ell, s, t] = solver.NumVar(
|
| 121 |
+
0, INF, f"u_{p}_{ell[0]}{ell[1]}_{s}_d{t}"
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
# tline[ell,s,t] = operating hours of line ell during shift s on day t
|
| 125 |
+
tline = {}
|
| 126 |
+
for ell in L:
|
| 127 |
+
for s in S:
|
| 128 |
+
for t in D:
|
| 129 |
+
tline[ell, s, t] = solver.NumVar(
|
| 130 |
+
0, Hmax_s[s], f"t_{ell[0]}{ell[1]}_{s}_d{t}"
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
# ybin[e,s,t] = shift usage binaries per type/day (to gate OT after usual)
|
| 134 |
+
ybin = {}
|
| 135 |
+
for e in E:
|
| 136 |
+
for s in S:
|
| 137 |
+
for t in D:
|
| 138 |
+
ybin[e, s, t] = solver.BoolVar(f"y_{e}_{s}_d{t}")
|
| 139 |
+
|
| 140 |
+
# -----------------------------
|
| 141 |
+
# 5) OBJECTIVE: Minimize total labor cost over the week
|
| 142 |
+
# -----------------------------
|
| 143 |
+
solver.Minimize(
|
| 144 |
+
solver.Sum(
|
| 145 |
+
c[e][s] * h[e, s, p, ell, t]
|
| 146 |
+
for e in E
|
| 147 |
+
for s in S
|
| 148 |
+
for p in P_all
|
| 149 |
+
for ell in L
|
| 150 |
+
for t in D
|
| 151 |
+
)
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
# -----------------------------
|
| 155 |
+
# 6) CONSTRAINTS
|
| 156 |
+
# -----------------------------
|
| 157 |
+
|
| 158 |
+
# 6.1 Weekly demand (no daily demand)
|
| 159 |
+
for p in P_all:
|
| 160 |
+
solver.Add(
|
| 161 |
+
solver.Sum(u[p, ell, s, t] for ell in L for s in S for t in D)
|
| 162 |
+
>= d_week.get(p, 0)
|
| 163 |
+
)
|
| 164 |
+
|
| 165 |
+
# 6.2 If a product is inactive on a day, force zero production and hours for that day
|
| 166 |
+
# This makes "varying products per day" explicit.
|
| 167 |
+
BIG_H = max(Hmax_s.values()) * sum(N_day[e][t] for e in E for t in D)
|
| 168 |
+
for p in P_all:
|
| 169 |
+
for t in D:
|
| 170 |
+
if active[t][p] == 0:
|
| 171 |
+
for ell in L:
|
| 172 |
+
for s in S:
|
| 173 |
+
solver.Add(u[p, ell, s, t] == 0)
|
| 174 |
+
for e in E:
|
| 175 |
+
solver.Add(h[e, s, p, ell, t] == 0)
|
| 176 |
+
|
| 177 |
+
# 6.3 Labor -> units (per line/shift/day)
|
| 178 |
+
# If productivity depends on line, swap q[e][s][p] with q_line[(e,s,p,ell)] here.
|
| 179 |
+
for p in P_all:
|
| 180 |
+
for ell in L:
|
| 181 |
+
for s in S:
|
| 182 |
+
for t in D:
|
| 183 |
+
# Gate by activity (if inactive, both sides are already 0 from 6.2)
|
| 184 |
+
solver.Add(
|
| 185 |
+
u[p, ell, s, t]
|
| 186 |
+
<= solver.Sum(q[e][s][p] * h[e, s, p, ell, t] for e in E)
|
| 187 |
+
)
|
| 188 |
+
|
| 189 |
+
# 6.4 Per-line throughput cap (units/hour Γ line-hours)
|
| 190 |
+
for ell in L:
|
| 191 |
+
for s in S:
|
| 192 |
+
for t in D:
|
| 193 |
+
solver.Add(
|
| 194 |
+
solver.Sum(u[p, ell, s, t] for p in P_all)
|
| 195 |
+
<= Cap[ell] * tline[ell, s, t]
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
# 6.5 Couple line hours & worker-hours (single-operator lines β tight equality)
|
| 199 |
+
for ell in L:
|
| 200 |
+
for s in S:
|
| 201 |
+
for t in D:
|
| 202 |
+
solver.Add(
|
| 203 |
+
tline[ell, s, t]
|
| 204 |
+
== solver.Sum(h[e, s, p, ell, t] for e in E for p in P_all)
|
| 205 |
+
)
|
| 206 |
+
# If multi-operator lines (up to Wmax[ell] concurrent workers), replace above with:
|
| 207 |
+
# Wmax = {ell: 2, ...}
|
| 208 |
+
# for ell in L:
|
| 209 |
+
# for s in S:
|
| 210 |
+
# for t in D:
|
| 211 |
+
# solver.Add(
|
| 212 |
+
# solver.Sum(h[e, s, p, ell, t] for e in E for p in P_all) <= Wmax[ell] * tline[ell, s, t]
|
| 213 |
+
# )
|
| 214 |
+
|
| 215 |
+
# 6.6 Fixed regular hours for type x on shift 1
|
| 216 |
+
if F_x1_day is not None:
|
| 217 |
+
# Per-day fixed hours
|
| 218 |
+
for t in D:
|
| 219 |
+
solver.Add(
|
| 220 |
+
solver.Sum(h["x", 1, p, ell, t] for p in P_all for ell in L)
|
| 221 |
+
== F_x1_day[t]
|
| 222 |
+
)
|
| 223 |
+
elif F_x1_week is not None:
|
| 224 |
+
# Per-week fixed hours
|
| 225 |
+
solver.Add(
|
| 226 |
+
solver.Sum(h["x", 1, p, ell, t] for p in P_all for ell in L for t in D)
|
| 227 |
+
== F_x1_week
|
| 228 |
+
)
|
| 229 |
+
else:
|
| 230 |
+
raise ValueError(
|
| 231 |
+
"Specify either F_x1_day (dict by day) or F_x1_week (scalar)."
|
| 232 |
+
)
|
| 233 |
+
|
| 234 |
+
# 6.7 Daily hours cap per employee type (14h per person per day)
|
| 235 |
+
for e in E:
|
| 236 |
+
for t in D:
|
| 237 |
+
solver.Add(
|
| 238 |
+
solver.Sum(
|
| 239 |
+
h[e, s, p, ell, t] for s in S for p in P_all for ell in L
|
| 240 |
+
)
|
| 241 |
+
<= Hmax_daily_per_person * N_day[e][t]
|
| 242 |
+
)
|
| 243 |
+
|
| 244 |
+
# 6.8 Link hours to shift-usage binaries (per type/day)
|
| 245 |
+
# Use a type/day-specific Big-M: M_e_s_t = Hmax_s[s] * N_day[e][t]
|
| 246 |
+
for e in E:
|
| 247 |
+
for s in S:
|
| 248 |
+
for t in D:
|
| 249 |
+
M_e_s_t = Hmax_s[s] * N_day[e][t]
|
| 250 |
+
solver.Add(
|
| 251 |
+
solver.Sum(h[e, s, p, ell, t] for p in P_all for ell in L)
|
| 252 |
+
<= M_e_s_t * ybin[e, s, t]
|
| 253 |
+
)
|
| 254 |
+
|
| 255 |
+
# 6.9 Overtime only after usual (per day). Also bound OT hours <= usual hours
|
| 256 |
+
for e in E:
|
| 257 |
+
for t in D:
|
| 258 |
+
solver.Add(ybin[e, 2, t] <= ybin[e, 1, t])
|
| 259 |
+
solver.Add(
|
| 260 |
+
solver.Sum(h[e, 2, p, ell, t] for p in P_all for ell in L)
|
| 261 |
+
<= solver.Sum(h[e, 1, p, ell, t] for p in P_all for ell in L)
|
| 262 |
+
)
|
| 263 |
+
# (Optional) evening only after usual:
|
| 264 |
+
# for e in E:
|
| 265 |
+
# for t in D:
|
| 266 |
+
# solver.Add(ybin[e, 3, t] <= ybin[e, 1, t])
|
| 267 |
+
|
| 268 |
+
# 6.10 Skill/compatibility mask
|
| 269 |
+
for e in E:
|
| 270 |
+
for p in P_all:
|
| 271 |
+
for ell in L:
|
| 272 |
+
if allow[(e, p, ell)] == 0:
|
| 273 |
+
for s in S:
|
| 274 |
+
for t in D:
|
| 275 |
+
solver.Add(h[e, s, p, ell, t] == 0)
|
| 276 |
+
|
| 277 |
+
# -----------------------------
|
| 278 |
+
# 7) SOLVE
|
| 279 |
+
# -----------------------------
|
| 280 |
+
status = solver.Solve()
|
| 281 |
+
if status != pywraplp.Solver.OPTIMAL:
|
| 282 |
+
print("No optimal solution. Status:", status)
|
| 283 |
+
return
|
| 284 |
+
|
| 285 |
+
# -----------------------------
|
| 286 |
+
# 8) REPORT
|
| 287 |
+
# -----------------------------
|
| 288 |
+
print("Objective (min cost):", solver.Objective().Value())
|
| 289 |
+
|
| 290 |
+
print("\n--- Weekly production by product ---")
|
| 291 |
+
for p in P_all:
|
| 292 |
+
produced = sum(
|
| 293 |
+
u[p, ell, s, t].solution_value() for ell in L for s in S for t in D
|
| 294 |
+
)
|
| 295 |
+
print(f"{p}: {produced:.1f} (weekly demand {d_week.get(p,0)})")
|
| 296 |
+
|
| 297 |
+
print("\n--- Line operating hours by shift/day ---")
|
| 298 |
+
for ell in L:
|
| 299 |
+
for s in S:
|
| 300 |
+
hours = [tline[ell, s, t].solution_value() for t in D]
|
| 301 |
+
if sum(hours) > 1e-6:
|
| 302 |
+
print(
|
| 303 |
+
f"Line {ell} Shift {s}: "
|
| 304 |
+
+ ", ".join([f"D{t}={hours[t-1]:.2f}h" for t in D])
|
| 305 |
+
)
|
| 306 |
+
|
| 307 |
+
print("\n--- Hours by employee type / shift / day ---")
|
| 308 |
+
for e in E:
|
| 309 |
+
for s in S:
|
| 310 |
+
day_hours = [
|
| 311 |
+
sum(h[e, s, p, ell, t].solution_value() for p in P_all for ell in L)
|
| 312 |
+
for t in D
|
| 313 |
+
]
|
| 314 |
+
if sum(day_hours) > 1e-6:
|
| 315 |
+
print(
|
| 316 |
+
f"e={e}, s={s}: "
|
| 317 |
+
+ ", ".join([f"D{t}={day_hours[t-1]:.2f}h" for t in D])
|
| 318 |
+
)
|
| 319 |
+
|
| 320 |
+
print("\n--- Implied headcount by type / shift / day ---")
|
| 321 |
+
for e in E:
|
| 322 |
+
for s in S:
|
| 323 |
+
row = []
|
| 324 |
+
for t in D:
|
| 325 |
+
hours = sum(
|
| 326 |
+
h[e, s, p, ell, t].solution_value() for p in P_all for ell in L
|
| 327 |
+
)
|
| 328 |
+
need = int((hours + Hmax_s[s] - 1) // Hmax_s[s]) # ceil
|
| 329 |
+
row.append(f"D{t}={need}")
|
| 330 |
+
if any("=0" not in x for x in row):
|
| 331 |
+
print(f"e={e}, s={s}: " + ", ".join(row))
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
if __name__ == "__main__":
|
| 335 |
+
optimizer = OptimizerReal()
|
| 336 |
+
optimizer.solve_option_A_multi_day_generalized()
|
src/utils/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
"""Utility functions and classes for the supply roster tool."""
|
src/utils/excel_to_csv_converter.py
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import os
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
|
| 5 |
+
def analyze_excel_structure(excel_path):
|
| 6 |
+
"""
|
| 7 |
+
Analyze the structure of an Excel file and return sheet information.
|
| 8 |
+
|
| 9 |
+
Args:
|
| 10 |
+
excel_path (str): Path to the Excel file
|
| 11 |
+
|
| 12 |
+
Returns:
|
| 13 |
+
dict: Dictionary with sheet names and their basic info
|
| 14 |
+
"""
|
| 15 |
+
try:
|
| 16 |
+
# Read Excel file to get all sheet names
|
| 17 |
+
excel_file = pd.ExcelFile(excel_path)
|
| 18 |
+
sheet_info = {}
|
| 19 |
+
|
| 20 |
+
print(f"π Analyzing Excel file: {excel_path}")
|
| 21 |
+
print(f"π Found {len(excel_file.sheet_names)} sheets:")
|
| 22 |
+
print("-" * 50)
|
| 23 |
+
|
| 24 |
+
for i, sheet_name in enumerate(excel_file.sheet_names, 1):
|
| 25 |
+
# Read each sheet to get basic information
|
| 26 |
+
df = pd.read_excel(excel_path, sheet_name=sheet_name)
|
| 27 |
+
|
| 28 |
+
sheet_info[sheet_name] = {
|
| 29 |
+
'rows': len(df),
|
| 30 |
+
'columns': len(df.columns),
|
| 31 |
+
'column_names': list(df.columns)
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
print(f"{i}. Sheet: '{sheet_name}'")
|
| 35 |
+
print(f" - Rows: {len(df)}")
|
| 36 |
+
print(f" - Columns: {len(df.columns)}")
|
| 37 |
+
print(f" - Column names: {list(df.columns)}")
|
| 38 |
+
print()
|
| 39 |
+
|
| 40 |
+
return sheet_info
|
| 41 |
+
|
| 42 |
+
except Exception as e:
|
| 43 |
+
print(f"β Error analyzing Excel file: {e}")
|
| 44 |
+
return None
|
| 45 |
+
|
| 46 |
+
def convert_excel_to_csv(excel_path, output_dir=None):
|
| 47 |
+
"""
|
| 48 |
+
Convert each sheet of an Excel file to a separate CSV file.
|
| 49 |
+
|
| 50 |
+
Args:
|
| 51 |
+
excel_path (str): Path to the Excel file
|
| 52 |
+
output_dir (str): Output directory for CSV files. If None, uses same directory as Excel file
|
| 53 |
+
"""
|
| 54 |
+
try:
|
| 55 |
+
# Set up output directory
|
| 56 |
+
if output_dir is None:
|
| 57 |
+
output_dir = os.path.dirname(excel_path)
|
| 58 |
+
|
| 59 |
+
# Create output directory if it doesn't exist
|
| 60 |
+
Path(output_dir).mkdir(parents=True, exist_ok=True)
|
| 61 |
+
|
| 62 |
+
# Read Excel file
|
| 63 |
+
excel_file = pd.ExcelFile(excel_path)
|
| 64 |
+
|
| 65 |
+
print(f"π Converting Excel sheets to CSV...")
|
| 66 |
+
print(f"π Output directory: {output_dir}")
|
| 67 |
+
print("-" * 50)
|
| 68 |
+
|
| 69 |
+
converted_files = []
|
| 70 |
+
|
| 71 |
+
for i, sheet_name in enumerate(excel_file.sheet_names, 1):
|
| 72 |
+
# Read the sheet
|
| 73 |
+
df = pd.read_excel(excel_path, sheet_name=sheet_name)
|
| 74 |
+
|
| 75 |
+
# Create a safe filename for the CSV
|
| 76 |
+
safe_filename = "".join(c for c in sheet_name if c.isalnum() or c in (' ', '-', '_')).rstrip()
|
| 77 |
+
safe_filename = safe_filename.replace(' ', '_')
|
| 78 |
+
csv_filename = f"{safe_filename}.csv"
|
| 79 |
+
csv_path = os.path.join(output_dir, csv_filename)
|
| 80 |
+
|
| 81 |
+
# Save as CSV
|
| 82 |
+
df.to_csv(csv_path, index=False, encoding='utf-8')
|
| 83 |
+
converted_files.append(csv_path)
|
| 84 |
+
|
| 85 |
+
print(f"β
{i}. '{sheet_name}' β {csv_filename}")
|
| 86 |
+
print(f" - Saved {len(df)} rows, {len(df.columns)} columns")
|
| 87 |
+
|
| 88 |
+
print(f"\nπ Successfully converted {len(converted_files)} sheets to CSV files!")
|
| 89 |
+
return converted_files
|
| 90 |
+
|
| 91 |
+
except Exception as e:
|
| 92 |
+
print(f"β Error converting Excel to CSV: {e}")
|
| 93 |
+
return None
|
| 94 |
+
|
| 95 |
+
def main():
|
| 96 |
+
"""Main function to analyze and convert Excel file"""
|
| 97 |
+
|
| 98 |
+
# Define paths
|
| 99 |
+
excel_path = "data/real_data_excel/AI Project document.xlsx"
|
| 100 |
+
output_dir = "data/converted_csv"
|
| 101 |
+
|
| 102 |
+
# Check if Excel file exists
|
| 103 |
+
if not os.path.exists(excel_path):
|
| 104 |
+
print(f"β Excel file not found: {excel_path}")
|
| 105 |
+
return
|
| 106 |
+
|
| 107 |
+
print("=" * 60)
|
| 108 |
+
print("π EXCEL TO CSV CONVERTER")
|
| 109 |
+
print("=" * 60)
|
| 110 |
+
|
| 111 |
+
# Step 1: Analyze Excel structure
|
| 112 |
+
sheet_info = analyze_excel_structure(excel_path)
|
| 113 |
+
|
| 114 |
+
if sheet_info is None:
|
| 115 |
+
return
|
| 116 |
+
|
| 117 |
+
# Step 2: Convert to CSV
|
| 118 |
+
converted_files = convert_excel_to_csv(excel_path, output_dir)
|
| 119 |
+
|
| 120 |
+
if converted_files:
|
| 121 |
+
print("\nπ Converted files:")
|
| 122 |
+
for file_path in converted_files:
|
| 123 |
+
print(f" - {file_path}")
|
| 124 |
+
|
| 125 |
+
if __name__ == "__main__":
|
| 126 |
+
main()
|
src/utils/file_utils.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def save_dataframes(dataframes, target, save_path):
|
| 5 |
+
"""
|
| 6 |
+
Save pandas DataFrames to CSV files.
|
| 7 |
+
|
| 8 |
+
Args:
|
| 9 |
+
dataframes (dict): Dictionary mapping dataset names to DataFrames
|
| 10 |
+
target (str): Target dataset name to save, or 'all' to save all
|
| 11 |
+
save_path (str): Path to save the CSV files
|
| 12 |
+
"""
|
| 13 |
+
os.makedirs(save_path, exist_ok=True)
|
| 14 |
+
|
| 15 |
+
if target == "all":
|
| 16 |
+
for name, df in dataframes.items():
|
| 17 |
+
df.to_csv(os.path.join(save_path, f"{name}.csv"), index=False)
|
| 18 |
+
elif target in dataframes:
|
| 19 |
+
dataframes[target].to_csv(os.path.join(save_path, f"{target}.csv"), index=False)
|
| 20 |
+
else:
|
| 21 |
+
raise ValueError(f"Unknown target: {target}")
|
src/visualization/Home.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
|
| 3 |
+
# Page configuration
|
| 4 |
+
st.set_page_config(
|
| 5 |
+
page_title="Supply Roster Tool",
|
| 6 |
+
page_icon="π ",
|
| 7 |
+
layout="wide",
|
| 8 |
+
initial_sidebar_state="expanded"
|
| 9 |
+
)
|
| 10 |
+
|
| 11 |
+
# Initialize session state for shared variables
|
| 12 |
+
if 'data_path' not in st.session_state:
|
| 13 |
+
st.session_state.data_path = "data/my_roster_data"
|
| 14 |
+
if 'target_date' not in st.session_state:
|
| 15 |
+
st.session_state.target_date = ""
|
| 16 |
+
|
| 17 |
+
# Main page content
|
| 18 |
+
st.title("π Supply Roster Optimization Tool")
|
| 19 |
+
st.markdown("---")
|
| 20 |
+
|
| 21 |
+
# Welcome section
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
col1, col2 = st.columns([1, 1])
|
| 25 |
+
|
| 26 |
+
with col1:
|
| 27 |
+
st.markdown("""
|
| 28 |
+
## π Welcome to Supply Roster Tool
|
| 29 |
+
|
| 30 |
+
""")
|
| 31 |
+
|
| 32 |
+
with col2:
|
| 33 |
+
st.image("images/POC_page/POC_SupplyRoster_image.png",
|
| 34 |
+
caption="Supply Roster Tool Overview",
|
| 35 |
+
use_container_width=True)
|
| 36 |
+
|
| 37 |
+
# Global settings in sidebar
|
| 38 |
+
with st.sidebar:
|
| 39 |
+
st.markdown("## π Global Settings")
|
| 40 |
+
st.markdown("The setting will be shared across all pages")
|
| 41 |
+
|
| 42 |
+
# Data path setting
|
| 43 |
+
new_data_path = st.text_input(
|
| 44 |
+
"π Data Path",
|
| 45 |
+
value=st.session_state.data_path,
|
| 46 |
+
help="The data path will be shared across all pages"
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
if new_data_path != st.session_state.data_path:
|
| 50 |
+
st.session_state.data_path = new_data_path
|
| 51 |
+
st.success("β
Data path updated!")
|
| 52 |
+
|
| 53 |
+
st.markdown(f"**Current data path:** `{st.session_state.data_path}`")
|
| 54 |
+
|
| 55 |
+
# Quick navigation
|
| 56 |
+
st.markdown("## π§ Quick Navigation")
|
| 57 |
+
if st.button("π― Go to Optimization", use_container_width=True):
|
| 58 |
+
st.switch_page("pages/optimize_viz.py")
|
| 59 |
+
|
| 60 |
+
if st.button("π Go to Dataset Overview", use_container_width=True):
|
| 61 |
+
st.switch_page("pages/metadata.py")
|
| 62 |
+
|
| 63 |
+
# Main content area
|
| 64 |
+
st.markdown("---")
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
# Footer
|
| 68 |
+
st.markdown("---")
|
| 69 |
+
st.markdown("""
|
| 70 |
+
<div style='text-align: center; color: gray;'>
|
| 71 |
+
<small>Supply Roster Optimization Tool | Built with Streamlit</small>
|
| 72 |
+
</div>
|
| 73 |
+
""", unsafe_allow_html=True)
|
src/visualization/pages/1_optimize_viz.py
ADDED
|
@@ -0,0 +1,424 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import os
|
| 3 |
+
import pandas as pd
|
| 4 |
+
import streamlit as st
|
| 5 |
+
import plotly.express as px
|
| 6 |
+
from datetime import datetime
|
| 7 |
+
|
| 8 |
+
# Add parent directory to path to import LaborOptimizer
|
| 9 |
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
|
| 10 |
+
from optimization.labor_optimizer import LaborOptimizer
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def get_available_dates(data_path):
|
| 16 |
+
"""Load the orders data and extract unique dates"""
|
| 17 |
+
try:
|
| 18 |
+
orders_file = os.path.join(data_path, "orders.csv")
|
| 19 |
+
if os.path.exists(orders_file):
|
| 20 |
+
orders_df = pd.read_csv(orders_file)
|
| 21 |
+
if "due_date" in orders_df.columns:
|
| 22 |
+
# Convert to datetime and extract unique dates
|
| 23 |
+
dates = pd.to_datetime(orders_df["due_date"]).dt.date.unique()
|
| 24 |
+
# Sort dates in descending order (most recent first)
|
| 25 |
+
dates = sorted(dates, reverse=True)
|
| 26 |
+
return dates
|
| 27 |
+
except Exception as e:
|
| 28 |
+
st.error(f"Error loading dates: {str(e)}")
|
| 29 |
+
return []
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def get_metadata_stats(optimizer, target_date=None):
|
| 33 |
+
"""
|
| 34 |
+
Aggregate metadata statistics about employee costs and availability
|
| 35 |
+
|
| 36 |
+
Args:
|
| 37 |
+
optimizer: LaborOptimizer instance
|
| 38 |
+
target_date: Target date for availability analysis
|
| 39 |
+
|
| 40 |
+
Returns:
|
| 41 |
+
dict: Dictionary containing various statistics
|
| 42 |
+
"""
|
| 43 |
+
try:
|
| 44 |
+
# Employee type costs
|
| 45 |
+
employee_types_df = optimizer.employee_types_df
|
| 46 |
+
costs_data = []
|
| 47 |
+
for _, row in employee_types_df.iterrows():
|
| 48 |
+
costs_data.append({
|
| 49 |
+
'Employee Type': row['type_name'].title(),
|
| 50 |
+
'Usual Cost ($/hr)': f"${row['usual_cost']:.2f}",
|
| 51 |
+
'Overtime Cost ($/hr)': f"${row['overtime_cost']:.2f}",
|
| 52 |
+
'Evening Shift Cost ($/hr)': f"${row['evening_shift_cost']:.2f}",
|
| 53 |
+
'Max Hours': row['max_hours'],
|
| 54 |
+
'Unit Manpower/Hr': row['unit_productivity_per_hour']
|
| 55 |
+
})
|
| 56 |
+
|
| 57 |
+
# Shift hours information
|
| 58 |
+
shift_hours = optimizer._get_shift_hours()
|
| 59 |
+
shift_data = []
|
| 60 |
+
for shift_type, hours in shift_hours.items():
|
| 61 |
+
shift_data.append({
|
| 62 |
+
'Shift Type': shift_type.replace('_', ' ').title(),
|
| 63 |
+
'Duration (hours)': f"{hours:.1f}"
|
| 64 |
+
})
|
| 65 |
+
|
| 66 |
+
# Employee availability for target date
|
| 67 |
+
availability_data = []
|
| 68 |
+
if target_date:
|
| 69 |
+
target_date_str = pd.to_datetime(target_date).strftime("%Y-%m-%d")
|
| 70 |
+
else:
|
| 71 |
+
# Use most recent date if no target date specified, but show warning
|
| 72 |
+
target_date_str = pd.to_datetime(optimizer.orders_df["due_date"]).max().strftime("%Y-%m-%d")
|
| 73 |
+
st.warning("β οΈ No target date specified. Using the most recent order date for analysis. Please select a specific target date for accurate availability data.")
|
| 74 |
+
|
| 75 |
+
availability_target_date = optimizer.employee_availability_df[
|
| 76 |
+
optimizer.employee_availability_df["date"] == target_date_str
|
| 77 |
+
]
|
| 78 |
+
|
| 79 |
+
employee_availability = optimizer.employees_df.merge(
|
| 80 |
+
availability_target_date, left_on="id", right_on="employee_id", how="left"
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
for emp_type in optimizer.employee_types_df["type_name"]:
|
| 84 |
+
emp_type_data = employee_availability[
|
| 85 |
+
employee_availability["type_name"] == emp_type
|
| 86 |
+
]
|
| 87 |
+
|
| 88 |
+
if not emp_type_data.empty:
|
| 89 |
+
first_shift_available = emp_type_data["first_shift_available"].sum()
|
| 90 |
+
second_shift_available = emp_type_data["second_shift_available"].sum()
|
| 91 |
+
overtime_available = emp_type_data["overtime_available"].sum()
|
| 92 |
+
total_employees = len(emp_type_data)
|
| 93 |
+
else:
|
| 94 |
+
first_shift_available = second_shift_available = overtime_available = total_employees = 0
|
| 95 |
+
|
| 96 |
+
availability_data.append({
|
| 97 |
+
'Employee Type': emp_type.title(),
|
| 98 |
+
'Total Employees': total_employees,
|
| 99 |
+
'Usual Time Available': first_shift_available,
|
| 100 |
+
'Evening Shift Available': second_shift_available,
|
| 101 |
+
'Overtime Available': overtime_available
|
| 102 |
+
})
|
| 103 |
+
|
| 104 |
+
# Overall statistics
|
| 105 |
+
total_employees = len(optimizer.employees_df)
|
| 106 |
+
total_employee_types = len(optimizer.employee_types_df)
|
| 107 |
+
total_orders = len(optimizer.orders_df)
|
| 108 |
+
|
| 109 |
+
return {
|
| 110 |
+
'costs_data': costs_data,
|
| 111 |
+
'shift_data': shift_data,
|
| 112 |
+
'availability_data': availability_data,
|
| 113 |
+
'overall_stats': {
|
| 114 |
+
'Total Employees': total_employees,
|
| 115 |
+
'Employee Types': total_employee_types,
|
| 116 |
+
'Total Orders': total_orders,
|
| 117 |
+
'Analysis Date': target_date_str,
|
| 118 |
+
'is_default_date': not bool(target_date)
|
| 119 |
+
}
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
except Exception as e:
|
| 123 |
+
st.error(f"Error generating metadata: {str(e)}")
|
| 124 |
+
return None
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
def display_metadata_section(metadata):
|
| 128 |
+
"""Display metadata in organized sections"""
|
| 129 |
+
if not metadata:
|
| 130 |
+
return
|
| 131 |
+
|
| 132 |
+
# Make the entire Dataset Overview section collapsible
|
| 133 |
+
with st.expander("π Dataset Overview", expanded=False):
|
| 134 |
+
# Overall statistics
|
| 135 |
+
st.write("Information on the date chosen - not an optimization report") # df, err, func, keras!
|
| 136 |
+
col1, col2, col3, col4 = st.columns(4)
|
| 137 |
+
with col1:
|
| 138 |
+
st.metric("Total Employees Available", metadata['overall_stats']['Total Employees'])
|
| 139 |
+
with col2:
|
| 140 |
+
st.metric("Employee Types Available", metadata['overall_stats']['Employee Types'])
|
| 141 |
+
with col3:
|
| 142 |
+
st.metric("Total Orders", metadata['overall_stats']['Total Orders'])
|
| 143 |
+
with col4:
|
| 144 |
+
analysis_date = metadata['overall_stats']['Analysis Date']
|
| 145 |
+
if metadata['overall_stats'].get('is_default_date', False):
|
| 146 |
+
st.metric("Analysis Date", f"{analysis_date} β οΈ", help="Using most recent order date - select specific date for accurate analysis")
|
| 147 |
+
else:
|
| 148 |
+
st.metric("Analysis Date", analysis_date)
|
| 149 |
+
|
| 150 |
+
# Create tabs for different metadata sections
|
| 151 |
+
tab1, tab2, tab3 = st.tabs(["π° Employee Costs", "π Shift Information", "π₯ Availability"])
|
| 152 |
+
|
| 153 |
+
with tab1:
|
| 154 |
+
st.subheader("Employee Type Costs")
|
| 155 |
+
costs_df = pd.DataFrame(metadata['costs_data'])
|
| 156 |
+
st.dataframe(costs_df, use_container_width=True)
|
| 157 |
+
|
| 158 |
+
# Cost comparison chart
|
| 159 |
+
costs_for_chart = []
|
| 160 |
+
for item in metadata['costs_data']:
|
| 161 |
+
emp_type = item['Employee Type']
|
| 162 |
+
costs_for_chart.extend([
|
| 163 |
+
{'Employee Type': emp_type, 'Cost Type': 'Usual', 'Cost': float(item['Usual Cost ($/hr)'].replace('$', ''))},
|
| 164 |
+
{'Employee Type': emp_type, 'Cost Type': 'Overtime', 'Cost': float(item['Overtime Cost ($/hr)'].replace('$', ''))},
|
| 165 |
+
{'Employee Type': emp_type, 'Cost Type': 'Evening', 'Cost': float(item['Evening Shift Cost ($/hr)'].replace('$', ''))}
|
| 166 |
+
])
|
| 167 |
+
|
| 168 |
+
chart_df = pd.DataFrame(costs_for_chart)
|
| 169 |
+
fig = px.bar(chart_df, x='Employee Type', y='Cost', color='Cost Type',
|
| 170 |
+
title='Hourly Costs by Employee Type and Shift',
|
| 171 |
+
barmode='group')
|
| 172 |
+
st.plotly_chart(fig, use_container_width=True)
|
| 173 |
+
|
| 174 |
+
with tab2:
|
| 175 |
+
st.subheader("Shift Duration Information")
|
| 176 |
+
shift_df = pd.DataFrame(metadata['shift_data'])
|
| 177 |
+
st.dataframe(shift_df, use_container_width=True)
|
| 178 |
+
|
| 179 |
+
# Shift duration chart
|
| 180 |
+
fig2 = px.bar(shift_df, x='Shift Type', y='Duration (hours)',
|
| 181 |
+
title='Shift Duration by Type')
|
| 182 |
+
st.plotly_chart(fig2, use_container_width=True)
|
| 183 |
+
|
| 184 |
+
with tab3:
|
| 185 |
+
st.subheader("Employee Availability")
|
| 186 |
+
availability_df = pd.DataFrame(metadata['availability_data'])
|
| 187 |
+
st.dataframe(availability_df, use_container_width=True)
|
| 188 |
+
|
| 189 |
+
# # Availability chart
|
| 190 |
+
# availability_chart_data = []
|
| 191 |
+
# for item in metadata['availability_data']:
|
| 192 |
+
# emp_type = item['Employee Type']
|
| 193 |
+
# availability_chart_data.extend([
|
| 194 |
+
# {'Employee Type': emp_type, 'Shift Type': 'Usual Time', 'Available': item['Usual Time Available']},
|
| 195 |
+
# {'Employee Type': emp_type, 'Shift Type': 'Evening Shift', 'Available': item['Evening Shift Available']},
|
| 196 |
+
# {'Employee Type': emp_type, 'Shift Type': 'Overtime', 'Available': item['Overtime Available']}
|
| 197 |
+
# ])
|
| 198 |
+
|
| 199 |
+
# chart_df2 = pd.DataFrame(availability_chart_data)
|
| 200 |
+
# fig3 = px.bar(chart_df2, x='Employee Type', y='Available', color='Shift Type',
|
| 201 |
+
# title='Available Workers by Employee Type and Shift',
|
| 202 |
+
# barmode='group')
|
| 203 |
+
# st.plotly_chart(fig3, use_container_width=True)
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
def main():
|
| 207 |
+
st.set_page_config(page_title="Labor Optimization Tool", layout="wide")
|
| 208 |
+
st.title("Labor Optimization Visualization Tool")
|
| 209 |
+
|
| 210 |
+
# Initialize session state
|
| 211 |
+
if 'data_path' not in st.session_state:
|
| 212 |
+
st.session_state.data_path = "data/my_roster_data"
|
| 213 |
+
|
| 214 |
+
# Sidebar for inputs
|
| 215 |
+
with st.sidebar:
|
| 216 |
+
st.header("Optimization Parameters")
|
| 217 |
+
data_path = st.text_input("Data Path", value=st.session_state.data_path)
|
| 218 |
+
# Update session state when user changes data_path
|
| 219 |
+
st.session_state.data_path = data_path
|
| 220 |
+
|
| 221 |
+
# Load available dates from the dataset
|
| 222 |
+
available_dates = get_available_dates(data_path)
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
if available_dates:
|
| 226 |
+
date_options = [""] + [str(date) for date in available_dates]
|
| 227 |
+
target_date = st.selectbox(
|
| 228 |
+
"Target Date (select empty for latest date)",
|
| 229 |
+
options=date_options,
|
| 230 |
+
index=0,
|
| 231 |
+
)
|
| 232 |
+
st.session_state.target_date = target_date
|
| 233 |
+
else:
|
| 234 |
+
target_date = st.text_input(
|
| 235 |
+
"Target Date (YYYY-MM-DD, leave empty for latest)"
|
| 236 |
+
)
|
| 237 |
+
if available_dates == []:
|
| 238 |
+
st.warning("No order dates found in dataset. Check the data path.")
|
| 239 |
+
|
| 240 |
+
st.header("Advanced Options")
|
| 241 |
+
st.caption("Set to 0 to use all available workers")
|
| 242 |
+
max_workers_permanent = st.number_input(
|
| 243 |
+
"Max Permanent Workers", min_value=0, value=0
|
| 244 |
+
)
|
| 245 |
+
max_workers_contract = st.number_input(
|
| 246 |
+
"Max Contract Workers", min_value=0, value=0
|
| 247 |
+
)
|
| 248 |
+
max_workers_temporary = st.number_input(
|
| 249 |
+
"Max Temporary Workers", min_value=0, value=0
|
| 250 |
+
)
|
| 251 |
+
|
| 252 |
+
# Add button to show metadata
|
| 253 |
+
show_metadata = st.checkbox("Show Dataset Overview", value=True)
|
| 254 |
+
optimize_btn = st.button("Run Optimization")
|
| 255 |
+
|
| 256 |
+
# Main area for optimization results
|
| 257 |
+
if optimize_btn:
|
| 258 |
+
try:
|
| 259 |
+
with st.spinner("Running optimization..."):
|
| 260 |
+
optimizer = LaborOptimizer(data_path)
|
| 261 |
+
|
| 262 |
+
# Prepare override dict if values are provided
|
| 263 |
+
max_workers_override = {}
|
| 264 |
+
if max_workers_permanent > 0:
|
| 265 |
+
max_workers_override["permanent"] = max_workers_permanent
|
| 266 |
+
if max_workers_contract > 0:
|
| 267 |
+
max_workers_override["contract"] = max_workers_contract
|
| 268 |
+
if max_workers_temporary > 0:
|
| 269 |
+
max_workers_override["temporary"] = max_workers_temporary
|
| 270 |
+
|
| 271 |
+
# If no overrides provided, pass None instead of empty dict
|
| 272 |
+
if not max_workers_override:
|
| 273 |
+
max_workers_override = None
|
| 274 |
+
|
| 275 |
+
results = optimizer.optimize(target_date, max_workers_override)
|
| 276 |
+
|
| 277 |
+
if isinstance(results, str):
|
| 278 |
+
st.error(results)
|
| 279 |
+
else:
|
| 280 |
+
# Wrap optimization results in an expander
|
| 281 |
+
with st.expander("π― Optimization Results", expanded=True):
|
| 282 |
+
# Split the page into sections
|
| 283 |
+
summary_col, allocation_col = st.columns([1, 1])
|
| 284 |
+
|
| 285 |
+
with summary_col:
|
| 286 |
+
st.subheader("Optimization Summary")
|
| 287 |
+
st.write(f"**Target Date:** {results['target_date']}")
|
| 288 |
+
st.write(
|
| 289 |
+
f"**Total Labor Hours:** {results['total_labor_hours_needed']:.2f}"
|
| 290 |
+
)
|
| 291 |
+
st.write(f"**Total Cost:** ${results['total_cost']:.2f}")
|
| 292 |
+
|
| 293 |
+
with allocation_col:
|
| 294 |
+
st.subheader("Employee Allocation")
|
| 295 |
+
allocation_data = results["allocation"]
|
| 296 |
+
|
| 297 |
+
# Create a DataFrame for easier visualization
|
| 298 |
+
allocation_df = pd.DataFrame.from_dict(
|
| 299 |
+
{
|
| 300 |
+
emp_type: {
|
| 301 |
+
shift: int(val) for shift, val in shifts.items()
|
| 302 |
+
}
|
| 303 |
+
for emp_type, shifts in allocation_data.items()
|
| 304 |
+
},
|
| 305 |
+
orient="index",
|
| 306 |
+
)
|
| 307 |
+
allocation_df.index.name = "Employee Type"
|
| 308 |
+
allocation_df.columns = [
|
| 309 |
+
col.replace("_", " ").title()
|
| 310 |
+
for col in allocation_df.columns
|
| 311 |
+
]
|
| 312 |
+
|
| 313 |
+
st.dataframe(allocation_df)
|
| 314 |
+
|
| 315 |
+
# Cost visualization
|
| 316 |
+
st.subheader("Cost Visualization")
|
| 317 |
+
|
| 318 |
+
# Prepare data for visualization
|
| 319 |
+
cost_data = []
|
| 320 |
+
for emp_type, shifts in allocation_data.items():
|
| 321 |
+
shift_hours = results["shift_hours"]
|
| 322 |
+
costs = optimizer.employee_types_df.set_index("type_name")
|
| 323 |
+
|
| 324 |
+
shift_cost_mapping = {
|
| 325 |
+
"usual_time": "usual_cost",
|
| 326 |
+
"overtime": "overtime_cost",
|
| 327 |
+
"evening_shift": "evening_shift_cost",
|
| 328 |
+
}
|
| 329 |
+
|
| 330 |
+
for shift in shifts:
|
| 331 |
+
cost = (
|
| 332 |
+
shifts[shift]
|
| 333 |
+
* shift_hours[shift]
|
| 334 |
+
* costs.loc[emp_type, shift_cost_mapping[shift]]
|
| 335 |
+
)
|
| 336 |
+
if cost > 0: # Only add non-zero costs
|
| 337 |
+
cost_data.append(
|
| 338 |
+
{
|
| 339 |
+
"Employee Type": emp_type.title(),
|
| 340 |
+
"Shift": shift.replace("_", " ").title(),
|
| 341 |
+
"Cost": cost,
|
| 342 |
+
"Workers": int(shifts[shift]),
|
| 343 |
+
}
|
| 344 |
+
)
|
| 345 |
+
|
| 346 |
+
cost_df = pd.DataFrame(cost_data)
|
| 347 |
+
|
| 348 |
+
col1, col2 = st.columns([3, 2])
|
| 349 |
+
|
| 350 |
+
with col1:
|
| 351 |
+
# Bar chart for costs
|
| 352 |
+
if not cost_df.empty:
|
| 353 |
+
fig = px.bar(
|
| 354 |
+
cost_df,
|
| 355 |
+
x="Shift",
|
| 356 |
+
y="Cost",
|
| 357 |
+
color="Employee Type",
|
| 358 |
+
title="Labor Cost by Employee Type and Shift",
|
| 359 |
+
labels={"Cost": "Cost ($)"},
|
| 360 |
+
)
|
| 361 |
+
st.plotly_chart(fig, use_container_width=True)
|
| 362 |
+
|
| 363 |
+
with col2:
|
| 364 |
+
# Pie chart for total cost by employee type
|
| 365 |
+
if not cost_df.empty:
|
| 366 |
+
total_by_type = (
|
| 367 |
+
cost_df.groupby("Employee Type")["Cost"]
|
| 368 |
+
.sum()
|
| 369 |
+
.reset_index()
|
| 370 |
+
)
|
| 371 |
+
fig2 = px.pie(
|
| 372 |
+
total_by_type,
|
| 373 |
+
values="Cost",
|
| 374 |
+
names="Employee Type",
|
| 375 |
+
title="Total Cost by Employee Type",
|
| 376 |
+
)
|
| 377 |
+
st.plotly_chart(fig2, use_container_width=True)
|
| 378 |
+
|
| 379 |
+
# Worker allocation visualization
|
| 380 |
+
st.subheader("Worker Allocation")
|
| 381 |
+
worker_data = []
|
| 382 |
+
for emp_type, shifts in allocation_data.items():
|
| 383 |
+
for shift, count in shifts.items():
|
| 384 |
+
if count > 0: # Only add non-zero allocations
|
| 385 |
+
worker_data.append(
|
| 386 |
+
{
|
| 387 |
+
"Employee Type": emp_type.title(),
|
| 388 |
+
"Shift": shift.replace("_", " ").title(),
|
| 389 |
+
"Workers": int(count),
|
| 390 |
+
}
|
| 391 |
+
)
|
| 392 |
+
|
| 393 |
+
worker_df = pd.DataFrame(worker_data)
|
| 394 |
+
|
| 395 |
+
if not worker_df.empty:
|
| 396 |
+
fig3 = px.bar(
|
| 397 |
+
worker_df,
|
| 398 |
+
x="Shift",
|
| 399 |
+
y="Workers",
|
| 400 |
+
color="Employee Type",
|
| 401 |
+
title="Worker Allocation by Shift and Employee Type",
|
| 402 |
+
barmode="group",
|
| 403 |
+
)
|
| 404 |
+
st.plotly_chart(fig3, use_container_width=True)
|
| 405 |
+
|
| 406 |
+
except Exception as e:
|
| 407 |
+
st.error(f"Error: {str(e)}")
|
| 408 |
+
st.exception(e)
|
| 409 |
+
|
| 410 |
+
# Display metadata section if requested - moved below optimization results
|
| 411 |
+
if show_metadata:
|
| 412 |
+
try:
|
| 413 |
+
optimizer = LaborOptimizer(data_path)
|
| 414 |
+
|
| 415 |
+
# Show warning if no target date is selected
|
| 416 |
+
if not target_date:
|
| 417 |
+
st.info("π‘ **Tip**: Select a specific target date from the sidebar to see accurate availability data for that date. Currently showing data for the most recent order date.")
|
| 418 |
+
|
| 419 |
+
except Exception as e:
|
| 420 |
+
st.error(f"Error loading metadata: {str(e)}")
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
if __name__ == "__main__":
|
| 424 |
+
main()
|
src/visualization/pages/2_metadata.py
ADDED
|
@@ -0,0 +1,300 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import os
|
| 3 |
+
import pandas as pd
|
| 4 |
+
import streamlit as st
|
| 5 |
+
import plotly.express as px
|
| 6 |
+
from datetime import datetime
|
| 7 |
+
|
| 8 |
+
# Add parent directory to path to import LaborOptimizer
|
| 9 |
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
| 10 |
+
from optimization.labor_optimizer import LaborOptimizer
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def get_available_dates(data_path):
|
| 14 |
+
"""Load the orders data and extract unique dates"""
|
| 15 |
+
try:
|
| 16 |
+
orders_file = os.path.join(data_path, "orders.csv")
|
| 17 |
+
if os.path.exists(orders_file):
|
| 18 |
+
orders_df = pd.read_csv(orders_file)
|
| 19 |
+
if "due_date" in orders_df.columns:
|
| 20 |
+
# Convert to datetime and extract unique dates
|
| 21 |
+
dates = pd.to_datetime(orders_df["due_date"]).dt.date.unique()
|
| 22 |
+
# Sort dates in descending order (most recent first)
|
| 23 |
+
dates = sorted(dates, reverse=True)
|
| 24 |
+
return dates
|
| 25 |
+
except Exception as e:
|
| 26 |
+
st.error(f"Error loading dates: {str(e)}")
|
| 27 |
+
return []
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def get_metadata_stats(optimizer, target_date=None):
|
| 31 |
+
"""
|
| 32 |
+
Aggregate metadata statistics about employee costs and availability
|
| 33 |
+
|
| 34 |
+
Args:
|
| 35 |
+
optimizer: LaborOptimizer instance
|
| 36 |
+
target_date: Target date for availability analysis
|
| 37 |
+
|
| 38 |
+
Returns:
|
| 39 |
+
dict: Dictionary containing various statistics
|
| 40 |
+
"""
|
| 41 |
+
try:
|
| 42 |
+
# Employee type costs
|
| 43 |
+
employee_types_df = optimizer.employee_types_df
|
| 44 |
+
costs_data = []
|
| 45 |
+
for _, row in employee_types_df.iterrows():
|
| 46 |
+
costs_data.append({
|
| 47 |
+
'Employee Type': row['type_name'].title(),
|
| 48 |
+
'Usual Cost ($/hr)': f"${row['usual_cost']:.2f}",
|
| 49 |
+
'Overtime Cost ($/hr)': f"${row['overtime_cost']:.2f}",
|
| 50 |
+
'Evening Shift Cost ($/hr)': f"${row['evening_shift_cost']:.2f}",
|
| 51 |
+
'Max Hours': row['max_hours'],
|
| 52 |
+
'Unit Manpower/Hr': row['unit_productivity_per_hour']
|
| 53 |
+
})
|
| 54 |
+
|
| 55 |
+
# Shift hours information
|
| 56 |
+
shift_hours = optimizer._get_shift_hours()
|
| 57 |
+
shift_data = []
|
| 58 |
+
for shift_type, hours in shift_hours.items():
|
| 59 |
+
shift_data.append({
|
| 60 |
+
'Shift Type': shift_type.replace('_', ' ').title(),
|
| 61 |
+
'Duration (hours)': f"{hours:.1f}"
|
| 62 |
+
})
|
| 63 |
+
|
| 64 |
+
# Employee availability for target date
|
| 65 |
+
availability_data = []
|
| 66 |
+
if target_date:
|
| 67 |
+
target_date_str = pd.to_datetime(target_date).strftime("%Y-%m-%d")
|
| 68 |
+
else:
|
| 69 |
+
# Use most recent date if no target date specified, but show warning
|
| 70 |
+
target_date_str = pd.to_datetime(optimizer.orders_df["due_date"]).max().strftime("%Y-%m-%d")
|
| 71 |
+
st.warning("β οΈ No target date specified. Using the most recent order date for analysis. Please select a specific target date for accurate availability data.")
|
| 72 |
+
|
| 73 |
+
availability_target_date = optimizer.employee_availability_df[
|
| 74 |
+
optimizer.employee_availability_df["date"] == target_date_str
|
| 75 |
+
]
|
| 76 |
+
|
| 77 |
+
employee_availability = optimizer.employees_df.merge(
|
| 78 |
+
availability_target_date, left_on="id", right_on="employee_id", how="left"
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
for emp_type in optimizer.employee_types_df["type_name"]:
|
| 82 |
+
emp_type_data = employee_availability[
|
| 83 |
+
employee_availability["type_name"] == emp_type
|
| 84 |
+
]
|
| 85 |
+
|
| 86 |
+
if not emp_type_data.empty:
|
| 87 |
+
first_shift_available = emp_type_data["first_shift_available"].sum()
|
| 88 |
+
second_shift_available = emp_type_data["second_shift_available"].sum()
|
| 89 |
+
overtime_available = emp_type_data["overtime_available"].sum()
|
| 90 |
+
total_employees = len(emp_type_data)
|
| 91 |
+
else:
|
| 92 |
+
first_shift_available = second_shift_available = overtime_available = total_employees = 0
|
| 93 |
+
|
| 94 |
+
availability_data.append({
|
| 95 |
+
'Employee Type': emp_type.title(),
|
| 96 |
+
'Total Employees': total_employees,
|
| 97 |
+
'Usual Time Available': first_shift_available,
|
| 98 |
+
'Evening Shift Available': second_shift_available,
|
| 99 |
+
'Overtime Available': overtime_available
|
| 100 |
+
})
|
| 101 |
+
|
| 102 |
+
# Overall statistics
|
| 103 |
+
total_employees = len(optimizer.employees_df)
|
| 104 |
+
total_employee_types = len(optimizer.employee_types_df)
|
| 105 |
+
total_orders = len(optimizer.orders_df)
|
| 106 |
+
|
| 107 |
+
return {
|
| 108 |
+
'costs_data': costs_data,
|
| 109 |
+
'shift_data': shift_data,
|
| 110 |
+
'availability_data': availability_data,
|
| 111 |
+
'overall_stats': {
|
| 112 |
+
'Total Employees': total_employees,
|
| 113 |
+
'Employee Types': total_employee_types,
|
| 114 |
+
'Total Orders': total_orders,
|
| 115 |
+
'Analysis Date': target_date_str,
|
| 116 |
+
'is_default_date': not bool(target_date)
|
| 117 |
+
}
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
except Exception as e:
|
| 121 |
+
st.error(f"Error generating metadata: {str(e)}")
|
| 122 |
+
return None
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def display_metadata_section(metadata):
|
| 126 |
+
"""Display metadata in organized sections"""
|
| 127 |
+
if not metadata:
|
| 128 |
+
return
|
| 129 |
+
|
| 130 |
+
# Make the entire Dataset Overview section collapsible
|
| 131 |
+
# with st.expander("π Dataset Overview", expanded=False):
|
| 132 |
+
|
| 133 |
+
with st.expander("π Dataset Overview", expanded=False):
|
| 134 |
+
st.write(f"Data path: {st.session_state.data_path}")
|
| 135 |
+
# Overall statistics
|
| 136 |
+
st.write("Information on the date chosen - not an optimization report") # df, err, func, keras!
|
| 137 |
+
col1, col2, col3, col4 = st.columns(4)
|
| 138 |
+
with col1:
|
| 139 |
+
st.metric("Total Employees Available", metadata['overall_stats']['Total Employees'])
|
| 140 |
+
with col2:
|
| 141 |
+
st.metric("Employee Types Available", metadata['overall_stats']['Employee Types'])
|
| 142 |
+
with col3:
|
| 143 |
+
st.metric("Total Orders", metadata['overall_stats']['Total Orders'])
|
| 144 |
+
with col4:
|
| 145 |
+
analysis_date = metadata['overall_stats']['Analysis Date']
|
| 146 |
+
if metadata['overall_stats'].get('is_default_date', False):
|
| 147 |
+
st.metric("Analysis Date", f"{analysis_date} β οΈ", help="Using most recent order date - select specific date for accurate analysis")
|
| 148 |
+
else:
|
| 149 |
+
st.metric("Analysis Date", analysis_date)
|
| 150 |
+
|
| 151 |
+
# Create tabs for different metadata sections
|
| 152 |
+
tab1, tab2, tab3 = st.tabs(["π° Employee Costs", "π Shift Information", "π₯ Availability"])
|
| 153 |
+
|
| 154 |
+
with tab1:
|
| 155 |
+
st.subheader("Employee Type Costs")
|
| 156 |
+
costs_df = pd.DataFrame(metadata['costs_data'])
|
| 157 |
+
st.dataframe(costs_df, use_container_width=True)
|
| 158 |
+
|
| 159 |
+
# Cost comparison chart
|
| 160 |
+
costs_for_chart = []
|
| 161 |
+
for item in metadata['costs_data']:
|
| 162 |
+
emp_type = item['Employee Type']
|
| 163 |
+
costs_for_chart.extend([
|
| 164 |
+
{'Employee Type': emp_type, 'Cost Type': 'Usual', 'Cost': float(item['Usual Cost ($/hr)'].replace('$', ''))},
|
| 165 |
+
{'Employee Type': emp_type, 'Cost Type': 'Overtime', 'Cost': float(item['Overtime Cost ($/hr)'].replace('$', ''))},
|
| 166 |
+
{'Employee Type': emp_type, 'Cost Type': 'Evening', 'Cost': float(item['Evening Shift Cost ($/hr)'].replace('$', ''))}
|
| 167 |
+
])
|
| 168 |
+
|
| 169 |
+
chart_df = pd.DataFrame(costs_for_chart)
|
| 170 |
+
fig = px.bar(chart_df, x='Employee Type', y='Cost', color='Cost Type',
|
| 171 |
+
title='Hourly Costs by Employee Type and Shift',
|
| 172 |
+
barmode='group')
|
| 173 |
+
st.plotly_chart(fig, use_container_width=True)
|
| 174 |
+
|
| 175 |
+
with tab2:
|
| 176 |
+
st.subheader("Shift Duration Information")
|
| 177 |
+
shift_df = pd.DataFrame(metadata['shift_data'])
|
| 178 |
+
st.dataframe(shift_df, use_container_width=True)
|
| 179 |
+
|
| 180 |
+
# Shift duration chart
|
| 181 |
+
fig2 = px.bar(shift_df, x='Shift Type', y='Duration (hours)',
|
| 182 |
+
title='Shift Duration by Type')
|
| 183 |
+
st.plotly_chart(fig2, use_container_width=True)
|
| 184 |
+
|
| 185 |
+
with tab3:
|
| 186 |
+
st.subheader("Employee Availability")
|
| 187 |
+
availability_df = pd.DataFrame(metadata['availability_data'])
|
| 188 |
+
st.dataframe(availability_df, use_container_width=True)
|
| 189 |
+
|
| 190 |
+
# # Availability chart
|
| 191 |
+
# availability_chart_data = []
|
| 192 |
+
# for item in metadata['availability_data']:
|
| 193 |
+
# emp_type = item['Employee Type']
|
| 194 |
+
# availability_chart_data.extend([
|
| 195 |
+
# {'Employee Type': emp_type, 'Shift Type': 'Usual Time', 'Available': item['Usual Time Available']},
|
| 196 |
+
# {'Employee Type': emp_type, 'Shift Type': 'Evening Shift', 'Available': item['Evening Shift Available']},
|
| 197 |
+
# {'Employee Type': emp_type, 'Shift Type': 'Overtime', 'Available': item['Overtime Available']}
|
| 198 |
+
# ])
|
| 199 |
+
|
| 200 |
+
# chart_df2 = pd.DataFrame(availability_chart_data)
|
| 201 |
+
# fig3 = px.bar(chart_df2, x='Employee Type', y='Available', color='Shift Type',
|
| 202 |
+
# title='Available Workers by Employee Type and Shift',
|
| 203 |
+
# barmode='group')
|
| 204 |
+
# st.plotly_chart(fig3, use_container_width=True)
|
| 205 |
+
def display_demand(optimizer):
|
| 206 |
+
with st.expander("π Demand", expanded=False):
|
| 207 |
+
demand_df = optimizer.orders_df
|
| 208 |
+
st.header("Demand")
|
| 209 |
+
daily_demand = demand_df.groupby('date_of_order').sum()['order_amount'].reset_index()
|
| 210 |
+
st.plotly_chart(px.bar(daily_demand, x='date_of_order', y='order_amount', title='Demand by Date'), use_container_width=True)
|
| 211 |
+
st.markdown("### Demand for the selected date")
|
| 212 |
+
st.dataframe(demand_df[demand_df['date_of_order']==st.session_state.target_date], use_container_width=True)
|
| 213 |
+
|
| 214 |
+
def display_employee_availability(optimizer):
|
| 215 |
+
with st.expander("π₯ Employee Availability", expanded=False):
|
| 216 |
+
st.header("Employee Availability")
|
| 217 |
+
employee_availability_df = optimizer.employee_availability_df
|
| 218 |
+
employee_availability_df['date'] = pd.to_datetime(employee_availability_df['date'])
|
| 219 |
+
employee_availability_target_date = employee_availability_df[employee_availability_df['date']==st.session_state.target_date]
|
| 220 |
+
employee_availability_target_date = pd.merge(employee_availability_target_date, optimizer.employees_df, left_on='employee_id', right_on='id', how='left')
|
| 221 |
+
st.dataframe(employee_availability_target_date[['name', 'employee_id', 'type_name', 'first_shift_available', 'second_shift_available', 'overtime_available']], use_container_width=True)
|
| 222 |
+
# Group by type_name and sum the availability columns
|
| 223 |
+
available_employee_grouped = employee_availability_target_date.groupby('type_name')[
|
| 224 |
+
['first_shift_available', 'second_shift_available', 'overtime_available']
|
| 225 |
+
].sum().reset_index()
|
| 226 |
+
|
| 227 |
+
st.markdown("### Employee Availability for the selected date")
|
| 228 |
+
# Create non-stacked (grouped) bar chart using plotly
|
| 229 |
+
fig = px.bar(
|
| 230 |
+
available_employee_grouped.melt(id_vars=['type_name'], var_name='shift_type', value_name='count'),
|
| 231 |
+
x='type_name',
|
| 232 |
+
y='count',
|
| 233 |
+
color='shift_type',
|
| 234 |
+
barmode='group', # This makes it non-stacked
|
| 235 |
+
title='Available Employee Count by Type and Shift',
|
| 236 |
+
labels={'type_name': 'Employee Type', 'count': 'Available Count', 'shift_type': 'Shift Type'}
|
| 237 |
+
)
|
| 238 |
+
st.plotly_chart(fig, use_container_width=True)
|
| 239 |
+
|
| 240 |
+
# st.dataframe(employee_availability_target_date, use_container_width=True)
|
| 241 |
+
# st.plotly_chart(px.bar(employee_availability_target_date, x='employee_id', y='availability', title='Employee Availability by Date'), use_container_width=True)
|
| 242 |
+
|
| 243 |
+
# st.dataframe(employee_availability_df[employee_availability_df['date']==st.session_state.target_date], use_container_width=True)
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
def main():
|
| 248 |
+
"""Main function for metadata page"""
|
| 249 |
+
st.set_page_config(page_title="Dataset Metadata", layout="wide")
|
| 250 |
+
st.title("π Dataset Metadata Overview")
|
| 251 |
+
|
| 252 |
+
# Get data_path from session state if available, otherwise create input
|
| 253 |
+
if 'data_path' in st.session_state:
|
| 254 |
+
# Using shared data_path from optimize_viz.py
|
| 255 |
+
data_path = st.session_state.data_path
|
| 256 |
+
|
| 257 |
+
st.sidebar.info(f"π Using shared data path: `{data_path}`")
|
| 258 |
+
else:
|
| 259 |
+
st.error("No data path found. Please select a data path in the sidebar.")
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
if 'target_date' in st.session_state:
|
| 263 |
+
target_date = st.session_state.target_date
|
| 264 |
+
st.sidebar.info(f"π
Using shared target date: `{target_date}`")
|
| 265 |
+
else:
|
| 266 |
+
st.error("No target date found. Please select a target date in the sidebar.")
|
| 267 |
+
|
| 268 |
+
#If the date selection needs to be individualized per page, uncomment the following code
|
| 269 |
+
# with st.sidebar:
|
| 270 |
+
# # Date selection
|
| 271 |
+
# available_dates = get_available_dates(data_path)
|
| 272 |
+
# if available_dates:
|
| 273 |
+
# date_options = [""] + [str(date) for date in available_dates]
|
| 274 |
+
# target_date = st.selectbox(
|
| 275 |
+
# "Target Date (select empty for latest date)",
|
| 276 |
+
# options=date_options,
|
| 277 |
+
# index=0,
|
| 278 |
+
# )
|
| 279 |
+
# else:
|
| 280 |
+
# target_date = st.text_input(
|
| 281 |
+
# "Target Date (YYYY-MM-DD, leave empty for latest)"
|
| 282 |
+
# )
|
| 283 |
+
|
| 284 |
+
try:
|
| 285 |
+
optimizer = LaborOptimizer(data_path)
|
| 286 |
+
|
| 287 |
+
# Show warning if no target date is selected
|
| 288 |
+
if not target_date:
|
| 289 |
+
st.info("π‘ **Tip**: Select a specific target date from the sidebar to see accurate availability data for that date. Currently showing data for the most recent order date.")
|
| 290 |
+
|
| 291 |
+
metadata = get_metadata_stats(optimizer, target_date if target_date else None)
|
| 292 |
+
display_metadata_section(metadata)
|
| 293 |
+
display_demand(optimizer)
|
| 294 |
+
display_employee_availability(optimizer)
|
| 295 |
+
except Exception as e:
|
| 296 |
+
st.error(f"Error loading metadata: {str(e)}")
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
if __name__ == "__main__":
|
| 300 |
+
main()
|