Optimizer factory class.
Classes
class OptimizerFactory
: Optimizer factory class.
Functions
register_optimizer_cls(...)
: Register customize optimizer cls.
Other Members |
LEGACY_OPTIMIZERS_CLS
|
{
'adafactor': 'Unimplemented',
'adagrad': <class 'keras.src.optimizers.legacy.adagrad.Adagrad'>,
'adam': <class 'keras.src.optimizers.legacy.adam.Adam'>,
'adam_experimental': <class 'keras.src.optimizers.adam.Adam'>,
'adamw': <class 'official.modeling.optimization.legacy_adamw.AdamWeightDecay'>,
'adamw_experimental': <class 'keras.src.optimizers.adamw.AdamW'>,
'lamb': <class 'official.modeling.optimization.lamb.LAMB'>,
'lars': <class 'official.modeling.optimization.lars.LARS'>,
'rmsprop': <class 'keras.src.optimizers.legacy.rmsprop.RMSprop'>,
'sgd': <class 'keras.src.optimizers.legacy.gradient_descent.SGD'>,
'sgd_experimental': <class 'keras.src.optimizers.sgd.SGD'>,
'slide': 'Unimplemented'
}
|
LR_CLS
|
{
'cosine': <class 'official.modeling.optimization.lr_schedule.CosineDecayWithOffset'>,
'exponential': <class 'official.modeling.optimization.lr_schedule.ExponentialDecayWithOffset'>,
'polynomial': <class 'official.modeling.optimization.lr_schedule.PolynomialDecayWithOffset'>,
'power': <class 'official.modeling.optimization.lr_schedule.DirectPowerDecay'>,
'power_linear': <class 'official.modeling.optimization.lr_schedule.PowerAndLinearDecay'>,
'power_with_offset': <class 'official.modeling.optimization.lr_schedule.PowerDecayWithOffset'>,
'step_cosine_with_offset': <class 'official.modeling.optimization.lr_schedule.StepCosineDecayWithOffset'>,
'stepwise': <class 'official.modeling.optimization.lr_schedule.PiecewiseConstantDecayWithOffset'>
}
|
NEW_OPTIMIZERS_CLS
|
{
'adafactor': 'Unimplemented',
'adagrad': <class 'keras.src.optimizers.adagrad.Adagrad'>,
'adam': <class 'keras.src.optimizers.adam.Adam'>,
'adam_experimental': <class 'keras.src.optimizers.adam.Adam'>,
'adamw': <class 'official.modeling.optimization.legacy_adamw.AdamWeightDecay'>,
'adamw_experimental': <class 'keras.src.optimizers.adamw.AdamW'>,
'lamb': <class 'official.modeling.optimization.lamb.LAMB'>,
'lars': <class 'official.modeling.optimization.lars.LARS'>,
'rmsprop': <class 'keras.src.optimizers.rmsprop.RMSprop'>,
'sgd': <class 'keras.src.optimizers.sgd.SGD'>,
'sgd_experimental': <class 'keras.src.optimizers.sgd.SGD'>,
'slide': 'Unimplemented'
}
|
SHARED_OPTIMIZERS
|
{
'adafactor': 'Unimplemented',
'adam_experimental': <class 'keras.src.optimizers.adam.Adam'>,
'adamw': <class 'official.modeling.optimization.legacy_adamw.AdamWeightDecay'>,
'adamw_experimental': <class 'keras.src.optimizers.adamw.AdamW'>,
'lamb': <class 'official.modeling.optimization.lamb.LAMB'>,
'lars': <class 'official.modeling.optimization.lars.LARS'>,
'sgd_experimental': <class 'keras.src.optimizers.sgd.SGD'>,
'slide': 'Unimplemented'
}
|
WARMUP_CLS
|
{
'linear': <class 'official.modeling.optimization.lr_schedule.LinearWarmup'>,
'polynomial': <class 'official.modeling.optimization.lr_schedule.PolynomialWarmUp'>
}
|