
    4riO                        d Z ddlmZ ddlmZ ddlmZ ddlmZm	Z	m
Z
mZmZmZmZ ddlmZmZmZmZmZmZ erddlmZ g d	Zeeeeee	         f         f         Zeeef         Zeee         ee         f         Z G d
 de          Ze G d d                      Zdede fdZ!dede defdZ" G d d          Z#d,de$de de#fdZ% G d d          Z&deeeeee	         f         f         de
fdZ' G d d          Z(dee)e
f         de
fd Z* G d! d"          Z+d#e	de fd$Z,	 	 	 d-d#e$d'e d(e d)ee-ee-         f         de+f
d*Z.d+S ).zCallbacks library.    )OrderedDict)	dataclass)partial)TYPE_CHECKINGAnyCallableDictListOptionalUnion   )Booster_ConfigAliases!_LGBM_BoosterEvalMethodResultType6_LGBM_BoosterEvalMethodResultWithStandardDeviationType	_log_info_log_warning	CVBooster)EarlyStopExceptionearly_stoppinglog_evaluationrecord_evaluationreset_parameterc                   0     e Zd ZdZdededdf fdZ xZS )r   zException of early stopping.

    Raise this from a callback passed in via keyword argument ``callbacks``
    in ``cv()`` or ``train()`` to trigger early stopping.
    best_iteration
best_scorereturnNc                 d    t                                                       || _        || _        dS )a  Create early stopping exception.

        Parameters
        ----------
        best_iteration : int
            The best iteration stopped.
            0-based... pass ``best_iteration=2`` to indicate that the third iteration was the best one.
        best_score : list of (eval_name, metric_name, eval_result, is_higher_better) tuple or (eval_name, metric_name, eval_result, is_higher_better, stdv) tuple
            Scores for each metric, on each validation set, as of the best iteration.
        N)super__init__r   r   )selfr   r   	__class__s      q/var/www/html/bestrading.cuttalo.com/services/ml-inference/venv/lib/python3.11/site-packages/lightgbm/callback.pyr!   zEarlyStopException.__init__/   s.     	,$    )__name__
__module____qualname____doc__int_ListOfEvalResultTuplesr!   __classcell__)r#   s   @r$   r   r   (   s^         %s %8O %TX % % % % % % % % % %r%   r   c                   x    e Zd ZU eedf         ed<   eeef         ed<   e	ed<   e	ed<   e	ed<   e
e         ed<   dS )	CallbackEnvr   modelparams	iterationbegin_iterationend_iterationevaluation_result_listN)r&   r'   r(   r   r   __annotations__r	   strr   r*   r   r+    r%   r$   r.   r.   @   so         +%&&&&cNNNN$%<======r%   r.   envr   c                 8    ddl m} t          | j        |          S )z.Check if model in callback env is a CVBooster.r   r   )enginer   
isinstancer/   )r8   r   s     r$   _is_using_cvr<   J   s(     "!!!!!ci+++r%   value	show_stdvc                 p    | ^}}}}| d| d|d}|r"t          |           dk    r|d| d         dz  }|S )zFormat metric string.z's z: g   z +    )len)r=   r>   dataset_namemetric_namemetric_value_outs          r$   _format_eval_resultrI   R   se    27/L+|a
=
=k
=
=\
=
=
=C  "SZZ1__!U1X!!!!Jr%   c                   8    e Zd ZdZddededdfdZd	eddfd
ZdS )_LogEvaluationCallbackz'Internal log evaluation callable class.r   Tperiodr>   r   Nc                 >    d| _         d| _        || _        || _        d S )N
   F)orderbefore_iterationrL   r>   )r"   rL   r>   s      r$   r!   z_LogEvaluationCallback.__init__`   s#    
 %"r%   r8   c                       j         dk    rb|j        r]|j        dz    j         z  dk    rId                     fd|j        D                       }t	          d|j        dz    d|            d S d S d S d S )Nr   r   	c                 :    g | ]}t          |j                  S r7   )rI   r>   ).0xr"   s     r$   
<listcomp>z3_LogEvaluationCallback.__call__.<locals>.<listcomp>i   s&    kkk1 3At~ F Fkkkr%   []	)rL   r4   r1   joinr   )r"   r8   results   `  r$   __call__z_LogEvaluationCallback.__call__g   s    ;??s9?s}q?PTXT_>_cd>d>dYYkkkkPSPjkkkllF8#-!+888899999 ???>d>dr%   r   T)	r&   r'   r(   r)   r*   boolr!   r.   r[   r7   r%   r$   rK   rK   ]   sj        11# #s #4 #4 # # # #:K :D : : : : : :r%   rK   TrL   c                 $    t          | |          S )a  Create a callback that logs the evaluation results.

    By default, standard output resource is used.
    Use ``register_logger()`` function to register a custom logger.

    Note
    ----
    Requires at least one validation data.

    Parameters
    ----------
    period : int, optional (default=1)
        The period to log the evaluation results.
        The last boosting stage or the boosting stage found by using ``early_stopping`` callback is also logged.
    show_stdv : bool, optional (default=True)
        Whether to log stdv (if provided).

    Returns
    -------
    callback : _LogEvaluationCallback
        The callback that logs the evaluation results every ``period`` boosting iteration(s).
    rL   r>   )rK   r_   s     r$   r   r   m   s    . "9EEEEr%   c                   B    e Zd ZdZdeddfdZdeddfdZdeddfdZdS )	_RecordEvaluationCallbackz*Internal record evaluation callable class.eval_resultr   Nc                 x    d| _         d| _        t          |t                    st	          d          || _        d S )N   Fz"eval_result should be a dictionary)rO   rP   r;   dict	TypeErrorrb   )r"   rb   s     r$   r!   z"_RecordEvaluationCallback.__init__   sA    
 %+t,, 	B@AAA&r%   r8   c                    |j         t          d          | j                                         |j         D ]}|^}}}| j                            |t                                 t          |          dk    r"| j        |                             |g            c| j        |                             | dg            | j        |                             | dg            d S Nzrecord_evaluation() callback enabled but no evaluation results found. This is a probably bug in LightGBM. Please report it at https://github.com/microsoft/LightGBM/issuesrB   z-meanz-stdv)r4   RuntimeErrorrb   clear
setdefaultr   rC   )r"   r8   itemrD   rE   rG   s         r$   _initz_RecordEvaluationCallback._init   s   %-S   	   . 	U 	UD,0)L+''kmmDDD4yyA~~ .99+rJJJJ .99[:O:O:OQSTTT .99[:O:O:OQSTTTT	U 	Ur%   c                    |j         |j        k    r|                     |           |j        t	          d          |j        D ]}|^}}}}t          |          dk    r'| j        |         |                             |           B|d         }| j        |         | d                             |           | j        |         | d                             |           d S rh   )r1   r2   rm   r4   ri   rC   rb   append)r"   r8   rl   rD   rE   rF   rG   metric_std_devs           r$   r[   z"_RecordEvaluationCallback.__call__   s   =C///JJsOOO%-S   . 
	] 
	]D:>7L+|a4yyA~~ .{;BB<PPPP "&a .+/D/D/DELL\ZZZ .+/D/D/DELL^\\\\
	] 
	]r%   )	r&   r'   r(   r)   _EvalResultDictr!   r.   rm   r[   r7   r%   r$   ra   ra      s        44'O ' ' ' ' 'U U U U U U ]K ]D ] ] ] ] ] ]r%   ra   rb   c                 "    t          |           S )a  Create a callback that records the evaluation history into ``eval_result``.

    Parameters
    ----------
    eval_result : dict
        Dictionary used to store all evaluation results of all validation sets.
        This should be initialized outside of your call to ``record_evaluation()`` and should be empty.
        Any initial contents of the dictionary will be deleted.

        .. rubric:: Example

        With two validation sets named 'eval' and 'train', and one evaluation metric named 'logloss'
        this dictionary after finishing a model training process will have the following structure:

        .. code-block::

            {
             'train':
                 {
                  'logloss': [0.48253, 0.35953, ...]
                 },
             'eval':
                 {
                  'logloss': [0.480385, 0.357756, ...]
                 }
            }

    Returns
    -------
    callback : _RecordEvaluationCallback
        The callback that records the evaluation history into the passed dictionary.
    rb   )ra   rs   s    r$   r   r      s    B %====r%   c                   B    e Zd ZdZdeeef         ddfdZdeddfdZ	dS )_ResetParameterCallbackz(Internal reset parameter callable class.kwargsr   Nc                 0    d| _         d| _        || _        d S )NrN   T)rO   rP   rv   )r"   rv   s     r$   r!   z _ResetParameterCallback.__init__   s    
 $r%   r8   c                    i }| j                                         D ]\  }}t          |t                    rIt	          |          |j        |j        z
  k    rt          d|d          ||j        |j        z
           }n7t          |          r ||j        |j        z
            }nt          d          ||j
                            |d           k    r|||<   |rut          |j        t                    r|j                            |           n$|j        j        D ]}|                    |           |j
                            |           d S d S )NzLength of list z& has to be equal to 'num_boost_round'.zjOnly list and callable values are supported as a mapping from boosting round index to new parameter value.)rv   itemsr;   listrC   r3   r2   
ValueErrorr1   callabler0   getr/   r   r   boostersupdate)r"   r8   new_parameterskeyr=   	new_paramboosters          r$   r[   z _ResetParameterCallback.__call__   sw   +++-- 	0 	0JC%&& 
u::!2S5H!HHH$%ds%d%d%deee!#-#2E"EF		% !E#-#2E"EFF		 U   CJNN35555&/s# 	.#)W-- <	)).9999  #y1 < <G++N;;;;Jn-----	. 	.r%   )
r&   r'   r(   r)   r   rz   r   r!   r.   r[   r7   r%   r$   ru   ru      sg        22tX~!6 4    .K .D . . . . . .r%   ru   rv   c                      t          di | S )a  Create a callback that resets the parameter after the first iteration.

    .. note::

        The initial parameter will still take in-effect on first iteration.

    Parameters
    ----------
    **kwargs : value should be list or callable
        List of parameters for each boosting round
        or a callable that calculates the parameter in terms of
        current number of round (e.g. yields learning rate decay).
        If list lst, parameter = lst[current_round].
        If callable func, parameter = func(current_round).

    Returns
    -------
    callback : _ResetParameterCallback
        The callback that resets the parameter after the first iteration.
    r7   )ru   )rv   s    r$   r   r      s    * #,,V,,,r%   c                       e Zd ZdZ	 	 	 ddedededeeee         f         d	d
f
dZ	ddZ
dededed	efdZdededed	efdZdeded	efdZded	d
fdZdededed	d
fdZded	d
fdZd
S )_EarlyStoppingCallbackz'Internal early stopping callable class.FT        stopping_roundsfirst_metric_onlyverbose	min_deltar   Nc                     t          |          | _        d| _        d| _        || _        || _        || _        || _        |                                  d S )N   F)	_should_enable_early_stoppingenabledrO   rP   r   r   r   r   _reset_storages)r"   r   r   r   r   s        r$   r!   z_EarlyStoppingCallback.__init__  sX     5_EE
 %.!2"r%   c                 L    g | _         g | _        g | _        g | _        d| _        d S )N )r   	best_iterbest_score_listcmp_opfirst_metric)r"   s    r$   r   z&_EarlyStoppingCallback._reset_storages,  s-    ')$&>@<>r%   
curr_scorer   deltac                     |||z   k    S Nr7   r"   r   r   r   s       r$   	_gt_deltaz _EarlyStoppingCallback._gt_delta3      J...r%   c                     |||z
  k     S r   r7   r   s       r$   	_lt_deltaz _EarlyStoppingCallback._lt_delta6  r   r%   rD   r8   c                     t          |          r|dk    rdS t          |j        t                    r||j        j        k    rdS dS )z8Check, by name, if a given Dataset is the training data.trainTF)r<   r;   r/   r   _train_data_name)r"   rD   r8   s      r$   _is_train_setz$_EarlyStoppingCallback._is_train_set9  sR      	!8!84 ci)) 	lci>X.X.X4ur%   c                 ~   j         j         g k    rt          d          t          fdt          j        d          D                       }|rd| _        t          d           d S j         d         ^}}}t          j        t                    rIt          j                   dk    o|                     |          }|rd| _        t          d	           d S | j        rt          d
| j         d           |                                  t          d j         D                       }t          j                   |z  }t          | j        t"                    rt%          d | j        D                       st          d          t          | j                  dk    r!| j        rt          d           dg|z  |z  }	nt          | j                  dk    r3| j        rt          d| j        d          d           | j        |z  |z  }	nt          | j                  |k    rt          d          | j        r%| j        rt          d| j        d          d           | j        |z  }	n_| j        dk     rt          d          | j        dk    r,|dk    r&| j        s| j        rt          d| j         d           | j        g|z  |z  }	|| _        t+          j         |	          D ]\  }
}| j                            d           |
d         rV| j                            t3          d                     | j                            t7          | j        |                     }| j                            t3          d                     | j                            t7          | j        |                     d S )NzSFor early stopping, at least one dataset and eval metric is required for evaluationc              3   V   K   | ]#}j                             |d           dk    V  $dS )r   dartN)r0   r}   )rT   aliasr8   s     r$   	<genexpr>z/_EarlyStoppingCallback._init.<locals>.<genexpr>J  s8      ffecjnnUB//69ffffffr%   boostingFz,Early stopping is not available in dart moder   r   rD   r8   z2Only training set found, disabling early stopping.z3Training until validation scores don't improve for z roundsc                     h | ]
}|d          S )r   r7   )rT   ms     r$   	<setcomp>z/_EarlyStoppingCallback._init.<locals>.<setcomp>c  s    BBB!1BBBr%   c              3   "   K   | ]
}|d k    V  dS )r   Nr7   )rT   ts     r$   r   z/_EarlyStoppingCallback._init.<locals>.<genexpr>f  s&      66!qAv666666r%   z9Values for early stopping min_delta must be non-negative.z'Disabling min_delta for early stopping.r   zUsing z as min_delta for all metrics.z@Must provide a single value for min_delta or as many as metrics.zUsing only z as early stopping min_delta.z.Early stopping min_delta must be non-negative.   z-inf)r   inf)r4   r{   anyr   r}   r   r   r;   r/   r   rC   r   r   r   r   r   r   rz   allr   r   zipr   ro   r   floatr   r   r   r   )r"   r8   is_dartfirst_dataset_namefirst_metric_namerG   only_train_set	n_metrics
n_datasetsdeltaseval_retr   s    `          r$   rm   z_EarlyStoppingCallback._initF  s
   %-1Kr1Q1Qrsssffff~GYZdGeGefffff 	 DLGHHHF 584Nq4Q1- ci)) 	 !;<<A dFXFX/ GY G GN  $QRRR< 	kiDL`iiijjjBBs'ABBBCC	344	A
dnd++ 	?66t~66666 ^ !\]]]4>""a''< IGHHH+i7T^$$))< ZXt~a'8XXXYYY*4y@t~&&)33$%ghhh) ^dl ^\DN1,=\\\]]]*4~!! !QRRR~!!i!mmD<RmW[WcmQ4>QQQRRRn%
2Y>F-"3#=vFF 	I 	IOHeN!!!$$${ I&&uV}}555""74>#G#G#GHHHH&&uU||444""74>#G#G#GHHHH	I 	Ir%   rE   ic                R   |j         |j        dz
  k    r| j        rfd                    d | j        |         D                       }t          d| j        |         dz    d|            | j        rt          d|            t          | j        |         | j        |                   d S )Nr   rR   c                 0    g | ]}t          |d           S T)r>   rI   rT   rU   s     r$   rV   zA_EarlyStoppingCallback._final_iteration_check.<locals>.<listcomp>  s'    +t+t+tWX,?T,R,R,R+t+t+tr%   z1Did not meet early stopping. Best iteration is:
[rX   Evaluated only: )	r1   r3   r   rY   r   r   r   r   r   )r"   r8   rE   r   best_score_strs        r$   _final_iteration_checkz-_EarlyStoppingCallback._final_iteration_check  s    =C-111| @!%+t+t\`\pqr\s+t+t+t!u!usXYIZ]^I^sscqss   ) @>>>???$T^A%68LQ8OPPP 21r%   c                    |j         |j        k    r|                     |           | j        sd S |j        t          d          | j        g k    }t          t          |j                            D ]v}|j        |         ^}}}}|s" | j	        |         || j
        |                   rJ|| j
        |<   |j         | j        |<   |r | j                            |j                   n|j        | j        |<   | j        r| j        |k    r|                     ||          r|j         | j        |         z
  | j        k    r| j        rfd                    d | j        |         D                       }t'          d| j        |         dz    d|            | j        rt'          d|            t)          | j        |         | j        |                   |                     |||	           xd S )
Nzearly_stopping() callback enabled but no evaluation results found. This is a probably bug in LightGBM. Please report it at https://github.com/microsoft/LightGBM/issuesr   rR   c                 0    g | ]}t          |d           S r   r   r   s     r$   rV   z3_EarlyStoppingCallback.__call__.<locals>.<listcomp>  s&    aaaA,Q$???aaar%   z$Early stopping, best iteration is:
[r   rX   r   )r8   rE   r   )r1   r2   rm   r   r4   ri   r   rangerC   r   r   r   ro   r   r   r   r   r   rY   r   r   r   )	r"   r8   #first_time_updating_best_score_listr   rD   rE   rF   rG   eval_result_strs	            r$   r[   z_EarlyStoppingCallback.__call__  sN   =C///JJsOOO| 	F%-S  
 /3.Bb.H+s356677 	O 	OA:=:TUV:W7L+|a2 Indk!n\SWSbcdSe6f6f I%1"$'Mq!6 I(//0JKKKK.1.HD(+% $*;{*J*J!!) "   U !22d6JJJ< D&*iiaaI]^_I`aaa' 'O qdnUVFWZ[F[qq`oqqrrr- D!"B["B"BCCC():D<PQR<STTT''C[A'NNNN3	O 	Or%   FTr   )r   N)r&   r'   r(   r)   r*   r]   r   r   r
   r!   r   r   r   r6   r.   r   rm   r   r[   r7   r%   r$   r   r     s       11
 #(/2    	
 U+, 
   &   /E /u /U /t / / / //E /u /U /t / / / /# K D    ?I ?I ?I ?I ?I ?IB	Q[ 	Qs 	Qs 	QW[ 	Q 	Q 	Q 	Q%OK %OD %O %O %O %O %O %Or%   r   r   c                     t          | t                    s%t          dt          |           j         d          | dk    S )zCheck if early stopping should be activated.

    This function will evaluate to True if the early stopping callback should be
    activated (i.e. stopping_rounds > 0).  It also provides an informative error if the
    type is not int.
    z0early_stopping_round should be an integer. Got ''r   )r;   r*   rf   typer&   )r   s    r$   r   r     sH     os++ nl4P_K`K`KilllmmmQr%   Fr   r   r   r   c                 (    t          | |||          S )a  Create a callback that activates early stopping.

    Activates early stopping.
    The model will train until the validation score doesn't improve by at least ``min_delta``.
    Validation score needs to improve at least every ``stopping_rounds`` round(s)
    to continue training.
    Requires at least one validation data and one metric.
    If there's more than one, will check all of them. But the training data is ignored anyway.
    To check only the first metric set ``first_metric_only`` to True.
    The index of iteration that has the best performance will be saved in the ``best_iteration`` attribute of a model.

    Parameters
    ----------
    stopping_rounds : int
        The possible number of rounds without the trend occurrence.
    first_metric_only : bool, optional (default=False)
        Whether to use only the first metric for early stopping.
    verbose : bool, optional (default=True)
        Whether to log message with early stopping information.
        By default, standard output resource is used.
        Use ``register_logger()`` function to register a custom logger.
    min_delta : float or list of float, optional (default=0.0)
        Minimum improvement in score to keep training.
        If float, this single value is used for all metrics.
        If list, its length should match the total number of metrics.

        .. versionadded:: 4.0.0

    Returns
    -------
    callback : _EarlyStoppingCallback
        The callback that activates early stopping.
    r   r   r   r   )r   r   s       r$   r   r     s(    N "'+	   r%   Nr\   r   )/r)   collectionsr   dataclassesr   	functoolsr   typingr   r   r   r	   r
   r   r   basicr   r   r   r   r   r   r:   r   __all__r6   rq   _EvalResultTupler+   	Exceptionr   r.   r]   r<   rI   rK   r*   r   ra   r   ru   rz   r   r   r   r   r   r7   r%   r$   <module>r      s     # # # # # # ! ! ! ! ! !       L L L L L L L L L L L L L L L L L L                 "!!!!!!   sDd3i001%:<   	*+	?@B % % % % % % % %0 > > > > > > > >,k ,d , , , ,/ D S    : : : : : : : : F F3 Ft F?U F F F F4-] -] -] -] -] -] -] -]`!>4T#tCy.-A(A#B !>x !> !> !> !>H .  .  .  .  .  .  .  .F-eD(N3 - - - - -0aO aO aO aO aO aO aO aOH	3 	4 	 	 	 	 $+.	, ,,, , UDK'(	,
 , , , , , ,r%   