From 75fb86d8b8cf0031a18d3e43ba70208bbc16e08d Mon Sep 17 00:00:00 2001 From: PaddlePaddle-Gardener Date: Thu, 13 Jan 2022 14:20:59 +0800 Subject: [PATCH] mirgate_38782 --- python/paddle/optimizer/lr.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/python/paddle/optimizer/lr.py b/python/paddle/optimizer/lr.py index d4fafba922..90117f99ab 100644 --- a/python/paddle/optimizer/lr.py +++ b/python/paddle/optimizer/lr.py @@ -398,7 +398,7 @@ class NaturalExpDecay(LRScheduler): Args: learning_rate (float): The initial learning rate. It is a python float number. - gamma (float, optional): A Ratio to update the learning rate. Default: 0.1. + gamma (float, optional): A Ratio to update the learning rate, should greater than 0.0 to make learning rate decay. Default: 0.1. last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate. verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False`` . @@ -456,6 +456,7 @@ class NaturalExpDecay(LRScheduler): """ def __init__(self, learning_rate, gamma, last_epoch=-1, verbose=False): + assert gamma > 0.0, " 'gamma' must be a positive number so that the learning rate will decay." self.gamma = gamma super(NaturalExpDecay, self).__init__(learning_rate, last_epoch, verbose) @@ -573,7 +574,7 @@ class PolynomialDecay(LRScheduler): learning_rate (float): The initial learning rate. It is a python float number. decay_steps(int): The decay step size. It determines the decay cycle. It must be a positive integer. end_lr(float, optional): The minimum final learning rate. Default: 0.0001. - power(float, optional): Power of polynomial. Default: 1.0. + power(float, optional): Power of polynomial, should greater than 0.0 to get learning rate decay. Default: 1.0. cycle(bool, optional): Whether the learning rate rises again. If True, then the learning rate will rise when it decrease to ``end_lr`` . If False, the learning rate is monotone decreasing. Default: False. last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate. @@ -644,6 +645,7 @@ class PolynomialDecay(LRScheduler): decay_steps, int), " 'decay_steps' must be a positive integer." self.decay_steps = decay_steps self.end_lr = end_lr + assert power > 0.0, " 'power' must be greater than 0.0 so that the learning rate will decay." self.power = power self.cycle = cycle super(PolynomialDecay, self).__init__(learning_rate, last_epoch, @@ -820,7 +822,7 @@ class ExponentialDecay(LRScheduler): Args: learning_rate (float): The initial learning rate. It is a python float number. gamma (float): The Ratio that the learning rate will be reduced. ``new_lr = origin_lr * gamma`` . - It should be less than 1.0. + It should be in interval (0.0, 1.0). last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate. verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False`` . @@ -878,6 +880,7 @@ class ExponentialDecay(LRScheduler): """ def __init__(self, learning_rate, gamma, last_epoch=-1, verbose=False): + assert gamma > 0.0 and gamma < 1.0, " 'gamma' must be in interval (0.0, 1.0) so that the learning rate will decay." self.gamma = gamma super(ExponentialDecay, self).__init__(learning_rate, last_epoch, verbose) -- Gitee