#!/usr/bin/env bash
# Neural Lab Model Workshop CLI
# Usage: workshop <command> [args]

VENV="/home/pmello/.openclaw/neural-lab-env"
WORKSHOP="/home/pmello/.openclaw/workspace-main/tools/neural-lab"
export PYTHONPATH="$WORKSHOP:$PYTHONPATH"

run() {
  source "$VENV/bin/activate"
  python3 "$@"
}

case "${1:-help}" in
  scan|ls)
    run -c "
from model_workshop import scan_models
models = scan_models()
print(f'\n  {'Name':<35} {'Arch':<15} {'Size':>10}  Format')
print('  ' + '-'*75)
for m in sorted(models, key=lambda x: x['size_mb']):
    sz = f'{m[\"size_mb\"]/1024:.1f}GB' if m['size_mb'] >= 1024 else f'{m[\"size_mb\"]:.0f}MB'
    print(f'  {m[\"name\"]:<35} {m.get(\"architecture\",\"\"):<15} {sz:>10}  {m[\"format\"]}')
print(f'\n  Total: {len(models)} models')
"
    ;;
    
  inspect|info)
    if [ -z "$2" ]; then echo "Usage: workshop inspect <path-or-name>"; exit 1; fi
    run -c "
import sys, glob
from model_workshop import inspect_model, scan_models

target = sys.argv[1]

# If not a file path, search by name
if '/' not in target:
    models = scan_models()
    matches = [m for m in models if target.lower() in m['name'].lower()]
    if not matches:
        print(f'No model matching \"{target}\"'); sys.exit(1)
    target = matches[0]['path']
    print(f'Found: {matches[0][\"name\"]} → {target}')

info = inspect_model(target)
if 'error' in info:
    print(f'Error: {info[\"error\"]}'); sys.exit(1)

print(f'\n  Format:     {info[\"format\"]}')
print(f'  Size:       {info[\"size_mb\"]}MB')
print(f'  Parameters: {info[\"total_params\"]:,}')
print(f'  Layers:     {len(info[\"layers\"])}')
if info.get('config'):
    print(f'  Config:     {info[\"config\"]}')

print(f'\n  {\"Layer\":<45} {\"Type\":<10} {\"Shape\":<20} {\"Params\":>12}')
print('  ' + '-'*90)
for l in info['layers']:
    stats = ''
    if l.get('stats'):
        stats = f'μ={l[\"stats\"][\"mean\"]:.4f} σ={l[\"stats\"][\"std\"]:.4f}'
    print(f'  {l[\"name\"]:<45} {l[\"type\"]:<10} {str(l[\"shape\"]):<20} {l[\"params\"]:>12,}  {stats}')

print(f'\n  Total: {info[\"total_params\"]:,} parameters')
" "$2"
    ;;
    
  duplicate|dup|copy)
    if [ -z "$2" ]; then echo "Usage: workshop duplicate <path-or-name> [new-name]"; exit 1; fi
    run -c "
import sys
from model_workshop import duplicate_model, scan_models

target = sys.argv[1]
new_name = sys.argv[2] if len(sys.argv) > 2 else ''

if '/' not in target:
    models = scan_models()
    matches = [m for m in models if target.lower() in m['name'].lower()]
    if not matches:
        print(f'No model matching \"{target}\"'); sys.exit(1)
    target = matches[0]['path']

result = duplicate_model(target, new_name)
if result.get('ok'):
    print(f'✅ Created: {result[\"name\"]}')
    print(f'   Path: {result[\"path\"]}')
    print(f'   Files: {result[\"files\"]}')
else:
    print(f'❌ {result.get(\"error\",\"failed\")}')
" "$2" "${3:-}"
    ;;
    
  explain)
    if [ -z "$2" ]; then echo "Usage: workshop explain <path-or-name>"; exit 1; fi
    run -c "
import sys
from model_workshop import inspect_model, generate_architecture_explanation, scan_models

target = sys.argv[1]
if '/' not in target:
    models = scan_models()
    matches = [m for m in models if target.lower() in m['name'].lower()]
    if not matches:
        print(f'No model matching \"{target}\"'); sys.exit(1)
    target = matches[0]['path']

info = inspect_model(target)
print('Analyzing architecture...\n')
explanation = generate_architecture_explanation(info)
print(explanation)
" "$2"
    ;;
    
  layers)
    if [ -z "$2" ]; then echo "Usage: workshop layers <path-or-name> [filter]"; exit 1; fi
    run -c "
import sys
from model_workshop import inspect_model, scan_models

target = sys.argv[1]
filt = sys.argv[2] if len(sys.argv) > 2 else ''

if '/' not in target:
    models = scan_models()
    matches = [m for m in models if target.lower() in m['name'].lower()]
    if not matches:
        print(f'No model matching \"{target}\"'); sys.exit(1)
    target = matches[0]['path']

info = inspect_model(target)
for l in info['layers']:
    if filt and filt.lower() not in l['name'].lower():
        continue
    s = l.get('stats', {})
    print(f'{l[\"name\"]}  {l[\"type\"]}  {l[\"shape\"]}  {l[\"params\"]:,}p  ' + 
          (f'μ={s[\"mean\"]:.4f} σ={s[\"std\"]:.4f} [{s[\"min\"]:.4f}, {s[\"max\"]:.4f}]' if s else ''))
" "$2" "${3:-}"
    ;;

  python|py|shell)
    echo "Dropping into Python with workshop tools loaded..."
    run -i -c "
from model_workshop import *
import torch
from safetensors.torch import load_file, save_file
print('Workshop tools loaded:')
print('  inspect_model(path) — full layer inspection')
print('  scan_models()       — find all models')
print('  duplicate_model(path, name) — safe copy')
print('  load_file(path)     — load safetensors as dict')
print('  save_file(dict, path) — save modified weights')
print()
"
    ;;

  help|*)
    echo "Neural Lab Model Workshop CLI"
    echo ""
    echo "Commands:"
    echo "  workshop scan              List all models"
    echo "  workshop inspect <name>    Inspect model layers & stats"
    echo "  workshop layers <name> [filter]  List layers (optional filter)"
    echo "  workshop explain <name>    AI architecture explanation"
    echo "  workshop duplicate <name> [new-name]  Safe copy for editing"
    echo "  workshop python            Interactive Python with tools"
    echo ""
    echo "Examples:"
    echo "  workshop scan"
    echo "  workshop inspect gpt2"
    echo "  workshop layers gpt2 attn"
    echo "  workshop duplicate gpt2 my-gpt2-experiment"
    echo "  workshop explain whisper"
    echo "  workshop python"
    ;;
esac