"""
Store Sales V6 - KILLER MODEL
=============================
Obiettivo: TOP 10% per medaglia bronzo

Tecniche vincenti combinate:
1. Multi-lag: 7, 14, 28, 63, 365 giorni
2. Ensemble: LightGBM + XGBoost + CatBoost
3. Zero forecasting (21 giorni)
4. Target encoding
5. Cyclic features
6. Advanced rolling statistics
"""

import numpy as np
import pandas as pd
import lightgbm as lgb
import xgboost as xgb
from catboost import CatBoostRegressor
from sklearn.model_selection import TimeSeriesSplit
from sklearn.metrics import mean_squared_log_error
import warnings
warnings.filterwarnings('ignore')

print("="*70)
print("   🏆 STORE SALES V6 - KILLER MODEL 🏆")
print("="*70)

# =============================================================================
# LOAD DATA
# =============================================================================
print("\n[1/8] Loading data...")
PATH = "/kaggle/input/store-sales-time-series-forecasting"

train = pd.read_csv(f"{PATH}/train.csv", parse_dates=['date'])
test = pd.read_csv(f"{PATH}/test.csv", parse_dates=['date'])
stores = pd.read_csv(f"{PATH}/stores.csv")
oil = pd.read_csv(f"{PATH}/oil.csv", parse_dates=['date'])
holidays = pd.read_csv(f"{PATH}/holidays_events.csv", parse_dates=['date'])
transactions = pd.read_csv(f"{PATH}/transactions.csv", parse_dates=['date'])

print(f"  Train: {train.shape}, Test: {test.shape}")

# =============================================================================
# OIL PROCESSING
# =============================================================================
print("\n[2/8] Processing oil...")
oil = oil.rename(columns={'dcoilwtico': 'oil_price'})
oil['oil_price'] = oil['oil_price'].interpolate(method='linear').ffill().bfill()

# Oil moving averages
oil['oil_ma7'] = oil['oil_price'].rolling(7, min_periods=1).mean()
oil['oil_ma30'] = oil['oil_price'].rolling(30, min_periods=1).mean()
oil['oil_std7'] = oil['oil_price'].rolling(7, min_periods=1).std().fillna(0)

# =============================================================================
# HOLIDAY PROCESSING
# =============================================================================
print("\n[3/8] Processing holidays...")
holidays_nat = holidays[(holidays['locale'] == 'National') &
                        (holidays['transferred'] == False)].copy()

# Key holidays
holidays_nat['is_christmas'] = holidays_nat['description'].str.contains('Navidad', case=False).astype(int)
holidays_nat['is_newyear'] = holidays_nat['description'].str.contains('Año Nuevo|Primer', case=False).astype(int)
holidays_nat['is_carnival'] = holidays_nat['description'].str.contains('Carnaval', case=False).astype(int)

holiday_agg = holidays_nat.groupby('date').agg({
    'is_christmas': 'max',
    'is_newyear': 'max',
    'is_carnival': 'max'
}).reset_index()
holiday_agg['is_holiday'] = 1

# =============================================================================
# FEATURE ENGINEERING
# =============================================================================
print("\n[4/8] Feature engineering...")

def create_features(df, is_train=True):
    df = df.copy()

    # Date features
    df['year'] = df['date'].dt.year
    df['month'] = df['date'].dt.month
    df['day'] = df['date'].dt.day
    df['dayofweek'] = df['date'].dt.dayofweek
    df['dayofyear'] = df['date'].dt.dayofyear
    df['weekofyear'] = df['date'].dt.isocalendar().week.astype(int)
    df['quarter'] = df['date'].dt.quarter

    # Boolean features
    df['is_weekend'] = (df['dayofweek'] >= 5).astype(int)
    df['is_month_start'] = df['date'].dt.is_month_start.astype(int)
    df['is_month_end'] = df['date'].dt.is_month_end.astype(int)
    df['is_quarter_end'] = df['date'].dt.is_quarter_end.astype(int)
    df['is_payday'] = ((df['day'] == 15) | (df['day'] >= 28)).astype(int)

    # Cyclic encoding (importante per stagionalità)
    df['month_sin'] = np.sin(2 * np.pi * df['month'] / 12)
    df['month_cos'] = np.cos(2 * np.pi * df['month'] / 12)
    df['day_sin'] = np.sin(2 * np.pi * df['dayofweek'] / 7)
    df['day_cos'] = np.cos(2 * np.pi * df['dayofweek'] / 7)
    df['dayofyear_sin'] = np.sin(2 * np.pi * df['dayofyear'] / 365)
    df['dayofyear_cos'] = np.cos(2 * np.pi * df['dayofyear'] / 365)

    return df

train = create_features(train)
test = create_features(test)

# Merge stores
train = train.merge(stores, on='store_nbr', how='left')
test = test.merge(stores, on='store_nbr', how='left')

# Merge oil
train = train.merge(oil, on='date', how='left')
test = test.merge(oil, on='date', how='left')
train[['oil_price', 'oil_ma7', 'oil_ma30', 'oil_std7']] = train[['oil_price', 'oil_ma7', 'oil_ma30', 'oil_std7']].ffill().bfill()
test[['oil_price', 'oil_ma7', 'oil_ma30', 'oil_std7']] = test[['oil_price', 'oil_ma7', 'oil_ma30', 'oil_std7']].ffill().bfill()

# Merge holidays
train = train.merge(holiday_agg, on='date', how='left')
test = test.merge(holiday_agg, on='date', how='left')
for col in ['is_holiday', 'is_christmas', 'is_newyear', 'is_carnival']:
    train[col] = train[col].fillna(0)
    test[col] = test[col].fillna(0)

# Merge transactions
train = train.merge(transactions, on=['date', 'store_nbr'], how='left')
test = test.merge(transactions, on=['date', 'store_nbr'], how='left')
train['transactions'] = train['transactions'].fillna(0)
test['transactions'] = test['transactions'].fillna(0)

# Encode categoricals
for col in ['family', 'city', 'state', 'type']:
    train[f'{col}_encoded'] = train[col].astype('category').cat.codes
    mapping = dict(zip(train[col].astype('category').cat.categories,
                       range(len(train[col].astype('category').cat.categories))))
    test[f'{col}_encoded'] = test[col].map(mapping).fillna(-1).astype(int)

print(f"  Features created: {len(train.columns)} columns")

# =============================================================================
# LAG FEATURES (CRITICAL)
# =============================================================================
print("\n[5/8] Creating lag features...")

train = train.sort_values(['store_nbr', 'family', 'date']).reset_index(drop=True)

# Multi-lag: 7, 14, 28, 63, 365 giorni
lags = [7, 14, 28, 63]
for lag in lags:
    train[f'sales_lag_{lag}'] = train.groupby(['store_nbr', 'family'])['sales'].shift(lag)

# Rolling statistics
for window in [7, 14, 28, 63]:
    train[f'sales_roll_mean_{window}'] = train.groupby(['store_nbr', 'family'])['sales'].transform(
        lambda x: x.shift(1).rolling(window, min_periods=1).mean()
    )
    train[f'sales_roll_std_{window}'] = train.groupby(['store_nbr', 'family'])['sales'].transform(
        lambda x: x.shift(1).rolling(window, min_periods=1).std()
    )

# Exponential moving averages
for span in [7, 14, 28]:
    train[f'sales_ewm_{span}'] = train.groupby(['store_nbr', 'family'])['sales'].transform(
        lambda x: x.shift(1).ewm(span=span, min_periods=1).mean()
    )

# Zero detection (21-day window)
train['zero_21'] = train.groupby(['store_nbr', 'family'])['sales'].transform(
    lambda x: x.shift(1).rolling(21, min_periods=21).apply(lambda y: (y == 0).all() if len(y) == 21 else 0)
).fillna(0)

# Yearly lag (365 giorni) - solo per dati con storico
train['sales_lag_365'] = train.groupby(['store_nbr', 'family'])['sales'].shift(365)

# For test: use last known values
last_stats = train.groupby(['store_nbr', 'family']).agg({
    'sales': ['last', 'mean', 'std', lambda x: (x.tail(21) == 0).all()],
    'zero_21': 'last'
}).reset_index()
last_stats.columns = ['store_nbr', 'family', 'last_sales', 'avg_sales', 'std_sales', 'all_zero', 'zero_21']

test = test.merge(last_stats, on=['store_nbr', 'family'], how='left')

# Fill test lag features
for lag in lags:
    test[f'sales_lag_{lag}'] = test['last_sales']
for window in [7, 14, 28, 63]:
    test[f'sales_roll_mean_{window}'] = test['avg_sales']
    test[f'sales_roll_std_{window}'] = test['std_sales'].fillna(0)
for span in [7, 14, 28]:
    test[f'sales_ewm_{span}'] = test['avg_sales']
test['sales_lag_365'] = test['avg_sales']

print(f"  Lag features added")

# =============================================================================
# TARGET ENCODING
# =============================================================================
print("\n[6/8] Target encoding...")

# Store-family mean
store_family_mean = train.groupby(['store_nbr', 'family'])['sales'].mean().reset_index()
store_family_mean.columns = ['store_nbr', 'family', 'store_family_mean']
train = train.merge(store_family_mean, on=['store_nbr', 'family'], how='left')
test = test.merge(store_family_mean, on=['store_nbr', 'family'], how='left')

# Dayofweek-family mean
dow_family_mean = train.groupby(['dayofweek', 'family'])['sales'].mean().reset_index()
dow_family_mean.columns = ['dayofweek', 'family', 'dow_family_mean']
train = train.merge(dow_family_mean, on=['dayofweek', 'family'], how='left')
test = test.merge(dow_family_mean, on=['dayofweek', 'family'], how='left')

# Fill missing
test['store_family_mean'] = test['store_family_mean'].fillna(test['avg_sales'])
test['dow_family_mean'] = test['dow_family_mean'].fillna(test['avg_sales'])

# =============================================================================
# PREPARE TRAINING DATA
# =============================================================================
print("\n[7/8] Preparing training data...")

# Use data from 2015 onwards
train_recent = train[train['date'] >= '2015-01-01'].dropna().copy()

# Feature columns
feature_cols = [
    'store_nbr', 'onpromotion', 'cluster',
    'year', 'month', 'day', 'dayofweek', 'dayofyear', 'weekofyear', 'quarter',
    'is_weekend', 'is_month_start', 'is_month_end', 'is_quarter_end', 'is_payday',
    'month_sin', 'month_cos', 'day_sin', 'day_cos', 'dayofyear_sin', 'dayofyear_cos',
    'oil_price', 'oil_ma7', 'oil_ma30', 'oil_std7',
    'is_holiday', 'is_christmas', 'is_newyear', 'is_carnival',
    'transactions',
    'family_encoded', 'city_encoded', 'state_encoded', 'type_encoded',
    'sales_lag_7', 'sales_lag_14', 'sales_lag_28', 'sales_lag_63', 'sales_lag_365',
    'sales_roll_mean_7', 'sales_roll_mean_14', 'sales_roll_mean_28', 'sales_roll_mean_63',
    'sales_roll_std_7', 'sales_roll_std_14', 'sales_roll_std_28', 'sales_roll_std_63',
    'sales_ewm_7', 'sales_ewm_14', 'sales_ewm_28',
    'store_family_mean', 'dow_family_mean'
]

# Filter features that exist
feature_cols = [c for c in feature_cols if c in train_recent.columns and c in test.columns]

X_train = train_recent[feature_cols].fillna(0)
y_train = train_recent['sales']
y_train_log = np.log1p(y_train)

X_test = test[feature_cols].fillna(0)

print(f"  Training samples: {len(X_train)}, Features: {len(feature_cols)}")

# =============================================================================
# TRAIN ENSEMBLE
# =============================================================================
print("\n[8/8] Training KILLER ensemble...")

tscv = TimeSeriesSplit(n_splits=3)

# Storage
lgb_models, xgb_models, cat_models = [], [], []
lgb_scores, xgb_scores, cat_scores = [], [], []

for fold, (train_idx, val_idx) in enumerate(tscv.split(X_train), 1):
    print(f"\n  --- Fold {fold} ---")
    X_tr, X_val = X_train.iloc[train_idx], X_train.iloc[val_idx]
    y_tr, y_val = y_train_log.iloc[train_idx], y_train_log.iloc[val_idx]

    # === LightGBM ===
    lgb_params = {
        'objective': 'regression', 'metric': 'rmse', 'boosting_type': 'gbdt',
        'learning_rate': 0.02, 'num_leaves': 255, 'max_depth': 12,
        'min_child_samples': 20, 'feature_fraction': 0.8, 'bagging_fraction': 0.8,
        'bagging_freq': 5, 'lambda_l1': 0.1, 'lambda_l2': 0.1,
        'verbose': -1, 'n_jobs': -1, 'seed': 42
    }

    lgb_train = lgb.Dataset(X_tr, label=y_tr)
    lgb_val = lgb.Dataset(X_val, label=y_val, reference=lgb_train)

    lgb_model = lgb.train(lgb_params, lgb_train, num_boost_round=3000,
                          valid_sets=[lgb_val], callbacks=[lgb.early_stopping(100), lgb.log_evaluation(500)])

    lgb_pred = np.expm1(lgb_model.predict(X_val))
    lgb_pred = np.maximum(lgb_pred, 0)
    lgb_rmsle = np.sqrt(mean_squared_log_error(np.expm1(y_val) + 1, lgb_pred + 1))
    lgb_scores.append(lgb_rmsle)
    lgb_models.append(lgb_model)
    print(f"  LGB RMSLE: {lgb_rmsle:.5f}")

    # === XGBoost ===
    xgb_params = {
        'objective': 'reg:squarederror', 'eval_metric': 'rmse',
        'learning_rate': 0.02, 'max_depth': 10, 'subsample': 0.8,
        'colsample_bytree': 0.8, 'tree_method': 'hist', 'seed': 42
    }

    dtrain = xgb.DMatrix(X_tr, label=y_tr)
    dval = xgb.DMatrix(X_val, label=y_val)

    xgb_model = xgb.train(xgb_params, dtrain, num_boost_round=3000,
                          evals=[(dval, 'val')], early_stopping_rounds=100, verbose_eval=500)

    xgb_pred = np.expm1(xgb_model.predict(dval))
    xgb_pred = np.maximum(xgb_pred, 0)
    xgb_rmsle = np.sqrt(mean_squared_log_error(np.expm1(y_val) + 1, xgb_pred + 1))
    xgb_scores.append(xgb_rmsle)
    xgb_models.append(xgb_model)
    print(f"  XGB RMSLE: {xgb_rmsle:.5f}")

    # === CatBoost ===
    cat_model = CatBoostRegressor(
        iterations=2000, learning_rate=0.03, depth=8,
        early_stopping_rounds=100, verbose=500, random_seed=42
    )
    cat_model.fit(X_tr, y_tr, eval_set=(X_val, y_val))

    cat_pred = np.expm1(cat_model.predict(X_val))
    cat_pred = np.maximum(cat_pred, 0)
    cat_rmsle = np.sqrt(mean_squared_log_error(np.expm1(y_val) + 1, cat_pred + 1))
    cat_scores.append(cat_rmsle)
    cat_models.append(cat_model)
    print(f"  CAT RMSLE: {cat_rmsle:.5f}")

print(f"\n  === CV Summary ===")
print(f"  LGB Mean: {np.mean(lgb_scores):.5f}")
print(f"  XGB Mean: {np.mean(xgb_scores):.5f}")
print(f"  CAT Mean: {np.mean(cat_scores):.5f}")

# =============================================================================
# ENSEMBLE PREDICTIONS
# =============================================================================
print("\n[9/9] Making ensemble predictions...")

# Weighted average based on CV scores (lower is better)
total_inv = 1/np.mean(lgb_scores) + 1/np.mean(xgb_scores) + 1/np.mean(cat_scores)
w_lgb = (1/np.mean(lgb_scores)) / total_inv
w_xgb = (1/np.mean(xgb_scores)) / total_inv
w_cat = (1/np.mean(cat_scores)) / total_inv
print(f"  Weights - LGB: {w_lgb:.3f}, XGB: {w_xgb:.3f}, CAT: {w_cat:.3f}")

# Predictions
lgb_preds = np.zeros(len(X_test))
xgb_preds = np.zeros(len(X_test))
cat_preds = np.zeros(len(X_test))

for m in lgb_models:
    lgb_preds += np.expm1(m.predict(X_test)) / len(lgb_models)

dtest = xgb.DMatrix(X_test)
for m in xgb_models:
    xgb_preds += np.expm1(m.predict(dtest)) / len(xgb_models)

for m in cat_models:
    cat_preds += np.expm1(m.predict(X_test)) / len(cat_models)

# Ensemble
final_preds = w_lgb * lgb_preds + w_xgb * xgb_preds + w_cat * cat_preds
final_preds = np.maximum(final_preds, 0)

# Zero forecasting
zero_mask = test['all_zero'].fillna(False).astype(bool)
final_preds[zero_mask.values] = 0
print(f"  Zero-forecasted: {zero_mask.sum()}")

# Submission
submission = pd.DataFrame({'id': test['id'].astype(int), 'sales': final_preds})
submission.to_csv('submission.csv', index=False)
print(f"\n  Submission: {len(submission)} rows")
print(f"  Sales range: {final_preds.min():.2f} - {final_preds.max():.2f}")
print(f"  Sales mean: {final_preds.mean():.2f}")

# Best CV
best_cv = min(np.mean(lgb_scores), np.mean(xgb_scores), np.mean(cat_scores))
ensemble_cv = (np.mean(lgb_scores) + np.mean(xgb_scores) + np.mean(cat_scores)) / 3

print("\n" + "="*70)
print(f"   🏆 KILLER MODEL COMPLETE!")
print(f"   Best Single CV: {best_cv:.5f}")
print(f"   Ensemble CV: {ensemble_cv:.5f}")
print("="*70)
