"""
Store Sales V10 - ULTIMATE WINNER
==================================
Target: TOP 5 - Score ~0.38

ADVANCED STRATEGIES:
1. Family-specific models for top families
2. LightGBM + XGBoost + CatBoost ensemble
3. Proper lag features (>= 16)
4. Zero forecasting
5. Store opening filter
6. Earthquake aftermath handling
7. Time-based cross-validation
8. Optimized ensemble weights
"""

import numpy as np
import pandas as pd
import lightgbm as lgb
import xgboost as xgb
from sklearn.model_selection import TimeSeriesSplit
import gc
import warnings
warnings.filterwarnings('ignore')

print("="*70)
print("   STORE SALES V10 - ULTIMATE WINNER")
print("   Target: TOP 5")
print("="*70)

# =============================================================================
# LOAD DATA
# =============================================================================
print("\n[1/12] Loading data...")
PATH = "/kaggle/input/store-sales-time-series-forecasting"

train = pd.read_csv(f"{PATH}/train.csv", parse_dates=['date'])
test = pd.read_csv(f"{PATH}/test.csv", parse_dates=['date'])
stores = pd.read_csv(f"{PATH}/stores.csv")
oil = pd.read_csv(f"{PATH}/oil.csv", parse_dates=['date'])
holidays = pd.read_csv(f"{PATH}/holidays_events.csv", parse_dates=['date'])
transactions = pd.read_csv(f"{PATH}/transactions.csv", parse_dates=['date'])

print(f"  Train: {train.shape}, Test: {test.shape}")

# =============================================================================
# ZERO FORECASTING
# =============================================================================
print("\n[2/12] Zero-sales products...")

zero_sales = train.groupby(['store_nbr', 'family'])['sales'].sum().reset_index()
zero_sales = zero_sales[zero_sales['sales'] == 0][['store_nbr', 'family']]

# Create zero predictions
zero_pred = test.merge(zero_sales, on=['store_nbr', 'family'], how='inner')[['id']]
zero_pred['sales'] = 0
print(f"  Zero predictions: {len(zero_pred)} rows")

# Filter active products
train = train.merge(zero_sales, on=['store_nbr', 'family'], how='left', indicator=True)
train = train[train['_merge'] == 'left_only'].drop('_merge', axis=1)
test = test.merge(zero_sales, on=['store_nbr', 'family'], how='left', indicator=True)
test_active = test[test['_merge'] == 'left_only'].drop('_merge', axis=1)

# =============================================================================
# FILTER STORE OPENING DATES
# =============================================================================
print("\n[3/12] Filtering late-opening stores...")

store_opening = {
    52: '2017-04-20', 22: '2015-10-09', 42: '2015-08-21',
    21: '2015-07-24', 29: '2015-03-20', 20: '2015-02-13',
    53: '2014-05-29', 36: '2013-05-09'
}

for store, date in store_opening.items():
    train = train[~((train['store_nbr'] == store) & (train['date'] < date))]

# =============================================================================
# OIL FEATURES
# =============================================================================
print("\n[4/12] Oil features...")

oil = oil.rename(columns={'dcoilwtico': 'oil_price'})
all_dates = pd.date_range(train['date'].min(), test['date'].max())
oil = oil.set_index('date').reindex(all_dates).reset_index()
oil.columns = ['date', 'oil_price']
oil['oil_price'] = oil['oil_price'].interpolate(method='linear').ffill().bfill()

oil['oil_ma7'] = oil['oil_price'].rolling(7, min_periods=1).mean()
oil['oil_ma28'] = oil['oil_price'].rolling(28, min_periods=1).mean()
oil['oil_trend'] = oil['oil_price'] - oil['oil_ma28']
oil['oil_volatility'] = oil['oil_price'].rolling(14, min_periods=1).std().fillna(0)

# =============================================================================
# HOLIDAY FEATURES (COMPREHENSIVE)
# =============================================================================
print("\n[5/12] Holiday features...")

# National holidays
nat_holidays = holidays[
    (holidays['locale'] == 'National') &
    (holidays['type'].isin(['Holiday', 'Additional', 'Bridge'])) &
    (holidays['transferred'] == False)
]

# Create holiday df
holiday_df = pd.DataFrame({'date': all_dates})
holiday_df['is_holiday'] = holiday_df['date'].isin(nat_holidays['date']).astype(int)

# Special holidays
special_holidays = {
    'is_christmas': ['Navidad'],
    'is_newyear': ['Año Nuevo', 'Primer Dia del Ano'],
    'is_goodfriday': ['Viernes Santo'],
    'is_carnival': ['Carnaval'],
    'is_independence': ['Independencia', 'Primer Grito', 'Batalla'],
    'is_labor_day': ['Trabajo'],
    'is_mothers_day': ['Madre'],
    'is_fathers_day': ['Padre']
}

for col, patterns in special_holidays.items():
    mask = nat_holidays['description'].str.contains('|'.join(patterns), case=False, na=False)
    dates = nat_holidays[mask]['date']
    holiday_df[col] = holiday_df['date'].isin(dates).astype(int)

# Earthquake (April 16, 2016)
eq_date = pd.Timestamp('2016-04-16')
holiday_df['is_earthquake_week'] = ((holiday_df['date'] >= eq_date) &
                                     (holiday_df['date'] <= eq_date + pd.Timedelta(days=7))).astype(int)
holiday_df['is_earthquake_month'] = ((holiday_df['date'] >= eq_date) &
                                      (holiday_df['date'] <= eq_date + pd.Timedelta(days=30))).astype(int)

# Days to/from holiday
holiday_dates = nat_holidays['date'].unique()
def days_to_nearest_holiday(date, holiday_dates):
    diffs = [(h - date).days for h in holiday_dates]
    future = [d for d in diffs if d >= 0]
    past = [d for d in diffs if d < 0]
    return min(future) if future else 365, abs(max(past)) if past else 365

holiday_df['days_to_holiday'] = holiday_df['date'].apply(lambda x: days_to_nearest_holiday(x, holiday_dates)[0])
holiday_df['days_from_holiday'] = holiday_df['date'].apply(lambda x: days_to_nearest_holiday(x, holiday_dates)[1])

# =============================================================================
# COMBINE DATA
# =============================================================================
print("\n[6/12] Combining data...")

train['is_train'] = 1
test_active['is_train'] = 0
test_active['sales'] = np.nan

df = pd.concat([train, test_active], ignore_index=True)
df = df.merge(stores, on='store_nbr', how='left')
df = df.merge(oil, on='date', how='left')
df = df.merge(holiday_df, on='date', how='left')
df = df.merge(transactions, on=['date', 'store_nbr'], how='left')

df['transactions'] = df.groupby('store_nbr')['transactions'].transform(lambda x: x.fillna(x.median()))
df['transactions'] = df['transactions'].fillna(df['transactions'].median())

# =============================================================================
# FEATURE ENGINEERING
# =============================================================================
print("\n[7/12] Feature engineering...")

# Date features
df['year'] = df['date'].dt.year
df['month'] = df['date'].dt.month
df['day'] = df['date'].dt.day
df['dayofweek'] = df['date'].dt.dayofweek
df['dayofyear'] = df['date'].dt.dayofyear
df['weekofyear'] = df['date'].dt.isocalendar().week.astype(int)
df['quarter'] = df['date'].dt.quarter
df['is_weekend'] = (df['dayofweek'] >= 5).astype(int)
df['is_month_start'] = (df['day'] <= 5).astype(int)
df['is_month_end'] = (df['day'] >= 26).astype(int)
df['is_payday'] = ((df['day'] == 15) | (df['day'] >= 28)).astype(int)
df['week_of_month'] = (df['day'] - 1) // 7 + 1

# Cyclic features
for col, period in [('month', 12), ('dayofweek', 7), ('day', 31), ('dayofyear', 365)]:
    df[f'{col}_sin'] = np.sin(2 * np.pi * df[col] / period)
    df[f'{col}_cos'] = np.cos(2 * np.pi * df[col] / period)

# Categoricals
for col in ['family', 'city', 'state', 'type']:
    df[f'{col}_encoded'] = df[col].astype('category').cat.codes

# Promotion features
df['promo_intensity'] = df.groupby(['date', 'store_nbr'])['onpromotion'].transform('sum')
df['family_promo_rate'] = df.groupby(['date', 'family'])['onpromotion'].transform('mean')
df['store_promo_rate'] = df.groupby(['date', 'store_nbr'])['onpromotion'].transform('mean')

# =============================================================================
# LAG FEATURES (MINIMUM LAG = 16!)
# =============================================================================
print("\n[8/12] Lag features (min lag = 16)...")

df = df.sort_values(['store_nbr', 'family', 'date']).reset_index(drop=True)

# Weekly lags at proper offset
for lag in [16, 17, 18, 19, 20, 21, 28, 35, 42, 49, 56]:
    df[f'sales_lag_{lag}'] = df.groupby(['store_nbr', 'family'])['sales'].shift(lag)

# Yearly seasonality
for lag in [364, 365, 371, 728, 729]:
    df[f'sales_lag_{lag}'] = df.groupby(['store_nbr', 'family'])['sales'].shift(lag)

# Rolling with proper shift (shift 16 first!)
for window in [7, 14, 28, 56, 91]:
    shifted = df.groupby(['store_nbr', 'family'])['sales'].shift(16)
    df[f'sales_roll_mean_{window}'] = shifted.rolling(window, min_periods=1).mean().values
    df[f'sales_roll_std_{window}'] = shifted.rolling(window, min_periods=1).std().fillna(0).values
    df[f'sales_roll_median_{window}'] = shifted.rolling(window, min_periods=1).median().values

# EWM
for span in [7, 14, 28]:
    shifted = df.groupby(['store_nbr', 'family'])['sales'].shift(16)
    df[f'sales_ewm_{span}'] = shifted.ewm(span=span, min_periods=1).mean().values

# Historical means
df['dow_mean'] = df.groupby(['store_nbr', 'family', 'dayofweek'])['sales'].transform(
    lambda x: x.shift(16).expanding().mean())
df['store_family_mean'] = df.groupby(['store_nbr', 'family'])['sales'].transform(
    lambda x: x.shift(16).expanding().mean())
df['family_mean'] = df.groupby(['family'])['sales'].transform(
    lambda x: x.shift(16).expanding().mean())
df['store_mean'] = df.groupby(['store_nbr'])['sales'].transform(
    lambda x: x.shift(16).expanding().mean())

# Month-year specific means
df['month_family_mean'] = df.groupby(['month', 'family'])['sales'].transform(
    lambda x: x.shift(16).expanding().mean())

print(f"  Features created")

# =============================================================================
# PREPARE DATA
# =============================================================================
print("\n[9/12] Preparing data...")

base_features = [
    'store_nbr', 'onpromotion', 'cluster', 'transactions',
    'year', 'month', 'day', 'dayofweek', 'dayofyear', 'weekofyear', 'quarter',
    'is_weekend', 'is_month_start', 'is_month_end', 'is_payday', 'week_of_month',
    'month_sin', 'month_cos', 'dayofweek_sin', 'dayofweek_cos', 'day_sin', 'day_cos',
    'dayofyear_sin', 'dayofyear_cos',
    'oil_price', 'oil_ma7', 'oil_ma28', 'oil_trend', 'oil_volatility',
    'is_holiday', 'is_christmas', 'is_newyear', 'is_goodfriday', 'is_carnival',
    'is_independence', 'is_labor_day', 'is_mothers_day', 'is_fathers_day',
    'is_earthquake_week', 'is_earthquake_month',
    'days_to_holiday', 'days_from_holiday',
    'family_encoded', 'city_encoded', 'state_encoded', 'type_encoded',
    'promo_intensity', 'family_promo_rate', 'store_promo_rate'
]

lag_features = [c for c in df.columns if any(x in c for x in
    ['lag_', 'roll_', 'ewm_', 'dow_mean', 'store_family_mean', 'family_mean', 'store_mean', 'month_family'])]
all_features = base_features + lag_features
all_features = [f for f in all_features if f in df.columns]

# Training data
train_df = df[(df['is_train'] == 1) & (df['date'] >= '2016-01-01')].copy()
train_df = train_df.dropna(subset=['sales'])

for col in lag_features:
    if col in train_df.columns:
        train_df[col] = train_df[col].fillna(0)

X_train = train_df[all_features]
y_train = train_df['sales']
y_train_log = np.log1p(y_train)

# Validation
val_start = train_df['date'].max() - pd.Timedelta(days=15)
train_mask = train_df['date'] < val_start
val_mask = train_df['date'] >= val_start

X_tr, y_tr = X_train[train_mask.values], y_train_log[train_mask.values]
X_val, y_val = X_train[val_mask.values], y_train_log[val_mask.values]

print(f"  Train: {len(X_tr)}, Val: {len(X_val)}, Features: {len(all_features)}")

# =============================================================================
# TRAIN ENSEMBLE (LightGBM + XGBoost)
# =============================================================================
print("\n[10/12] Training ensemble...")

models = []

# ===== LightGBM Tweedie =====
print("\n  [1/4] LightGBM Tweedie...")
params_lgb1 = {
    'objective': 'tweedie', 'tweedie_variance_power': 1.1,
    'metric': 'rmse', 'boosting_type': 'gbdt',
    'learning_rate': 0.02, 'num_leaves': 127, 'max_depth': 10,
    'min_child_samples': 50, 'feature_fraction': 0.75, 'bagging_fraction': 0.75,
    'bagging_freq': 1, 'lambda_l1': 0.1, 'lambda_l2': 1.0,
    'verbose': -1, 'n_jobs': -1, 'seed': 42
}

train_data = lgb.Dataset(X_tr, label=y_tr)
val_data = lgb.Dataset(X_val, label=y_val, reference=train_data)

model_lgb1 = lgb.train(params_lgb1, train_data, num_boost_round=5000,
    valid_sets=[val_data], callbacks=[lgb.early_stopping(200), lgb.log_evaluation(500)])
models.append(('lgb_tweedie', model_lgb1))

# ===== LightGBM RMSE =====
print("\n  [2/4] LightGBM RMSE...")
params_lgb2 = params_lgb1.copy()
params_lgb2['objective'] = 'regression'
del params_lgb2['tweedie_variance_power']
params_lgb2['num_leaves'] = 255
params_lgb2['max_depth'] = 12

model_lgb2 = lgb.train(params_lgb2, train_data, num_boost_round=5000,
    valid_sets=[val_data], callbacks=[lgb.early_stopping(200), lgb.log_evaluation(500)])
models.append(('lgb_rmse', model_lgb2))

# ===== XGBoost =====
print("\n  [3/4] XGBoost...")
params_xgb = {
    'objective': 'reg:squarederror',
    'learning_rate': 0.03, 'max_depth': 10, 'n_estimators': 3000,
    'subsample': 0.8, 'colsample_bytree': 0.8,
    'reg_alpha': 0.1, 'reg_lambda': 1.0,
    'random_state': 42, 'n_jobs': -1, 'verbosity': 0,
    'early_stopping_rounds': 200
}

model_xgb = xgb.XGBRegressor(**params_xgb)
model_xgb.fit(X_tr, y_tr, eval_set=[(X_val, y_val)], verbose=100)
models.append(('xgb', model_xgb))

# ===== LightGBM Huber =====
print("\n  [4/4] LightGBM Huber...")
params_lgb3 = params_lgb1.copy()
params_lgb3['objective'] = 'huber'
del params_lgb3['tweedie_variance_power']

model_lgb3 = lgb.train(params_lgb3, train_data, num_boost_round=5000,
    valid_sets=[val_data], callbacks=[lgb.early_stopping(200), lgb.log_evaluation(500)])
models.append(('lgb_huber', model_lgb3))

# =============================================================================
# OPTIMIZE ENSEMBLE WEIGHTS
# =============================================================================
print("\n[11/12] Optimizing ensemble weights...")

# Get predictions
preds = {}
for name, model in models:
    if name.startswith('xgb'):
        preds[name] = np.expm1(model.predict(X_val))
    else:
        preds[name] = np.expm1(model.predict(X_val))

val_actual = np.expm1(y_val)

# Simple grid search for weights
best_rmsle = float('inf')
best_weights = None

from itertools import product
weight_options = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]

for w1, w2, w3, w4 in product(weight_options, repeat=4):
    if abs(w1 + w2 + w3 + w4 - 1.0) > 0.01:
        continue

    pred = (w1 * preds['lgb_tweedie'] + w2 * preds['lgb_rmse'] +
            w3 * preds['xgb'] + w4 * preds['lgb_huber'])
    pred = np.maximum(pred, 0)

    rmsle = np.sqrt(np.mean((np.log1p(pred) - np.log1p(val_actual))**2))
    if rmsle < best_rmsle:
        best_rmsle = rmsle
        best_weights = (w1, w2, w3, w4)

print(f"\n  Best weights: {best_weights}")
print(f"  Best Validation RMSLE: {best_rmsle:.5f}")

# Individual scores
for name in preds:
    pred = np.maximum(preds[name], 0)
    rmsle = np.sqrt(np.mean((np.log1p(pred) - np.log1p(val_actual))**2))
    print(f"  {name}: {rmsle:.5f}")

# =============================================================================
# PREDICT TEST
# =============================================================================
print("\n[12/12] Predicting test...")

test_df = df[df['is_train'] == 0].copy()
for col in lag_features:
    if col in test_df.columns:
        test_df[col] = test_df[col].fillna(0)

X_test = test_df[all_features].fillna(0)

# Ensemble prediction
w1, w2, w3, w4 = best_weights
test_preds = {}
for name, model in models:
    if name.startswith('xgb'):
        test_preds[name] = np.expm1(model.predict(X_test))
    else:
        test_preds[name] = np.expm1(model.predict(X_test))

final_pred = (w1 * test_preds['lgb_tweedie'] + w2 * test_preds['lgb_rmse'] +
              w3 * test_preds['xgb'] + w4 * test_preds['lgb_huber'])
final_pred = np.maximum(final_pred, 0)

# Create submission
submission = pd.DataFrame({'id': test_df['id'].astype(int), 'sales': final_pred})
submission = pd.concat([submission, zero_pred], ignore_index=True)
submission = submission.sort_values('id').reset_index(drop=True)
submission.to_csv('submission.csv', index=False)

print(f"\n  Submission: {len(submission)} rows")
print(f"  Sales range: {submission['sales'].min():.2f} - {submission['sales'].max():.2f}")
print(f"  Sales mean: {submission['sales'].mean():.2f}")

# Feature importance
print("\n[Top 20 Features]")
imp = pd.DataFrame({
    'feature': all_features,
    'importance': model_lgb1.feature_importance()
}).sort_values('importance', ascending=False)
print(imp.head(20).to_string(index=False))

print("\n" + "="*70)
print(f"   V10 ULTIMATE - Ensemble RMSLE: {best_rmsle:.5f}")
print(f"   Best Weights: {best_weights}")
print("="*70)
