Commit 721e65ae authored by Yann SOULLARD's avatar Yann SOULLARD

MAJ evaluate

parent 3c0f97f4
......@@ -347,7 +347,7 @@ class CTCModel:
return out
def evaluate(self, x=None, y=None, batch_size=None, verbose=1, steps=None):
def evaluate(self, x=None, batch_size=None, verbose=1, steps=None, metrics=['loss', 'ler', 'ser']):
""" Evaluates the model on a dataset_manager.
:param: See keras.engine.Model.predict()
......@@ -360,6 +360,11 @@ class CTCModel:
y = Input data as a 2D Tensor (batch_size, max_label_len)
x_len = 1D array with the length of each data in batch_size
y_len = 1D array with the length of each labeling
metrics = list of metrics that are computed. This is elements among the 3 following metrics:
'loss' : compute the loss function on x
'ler' : compute the label error rate
'ser' : compute the sequence error rate
Outputs: a list containing:
ler_dataset = label error rate for each data (a list)
......@@ -373,19 +378,29 @@ class CTCModel:
y_len = x[3]
nb_data = x_input.shape[0]
eval_batch = self.model_eval.predict([x_input, y, x_len, y_len], batch_size=batch_size, verbose=verbose, steps=steps)
if 'ler' in metrics or 'ser' in metrics:
eval_batch = self.model_eval.predict([x_input, y, x_len, y_len], batch_size=batch_size, verbose=verbose, steps=steps)
if 'ser' in metrics:
seq_error += np.sum([1 for ler_data in eval_batch if ler_data != 0])
seq_error = seq_error / nb_data if nb_data > 0 else -1.
seq_error += np.sum([1 for ler_data in eval_batch if ler_data != 0])
seq_error = seq_error / nb_data if nb_data > 0 else -1.
outmetrics = []
if 'loss' in metrics:
outmetrics.append(self.get_loss(x))
if 'ler' in metrics:
outmetrics.append(eval_batch)
if 'ser' in metrics:
outmetrics.append(seq_error)
return eval_batch, seq_error
return outmetrics
def test_on_batch(self, x=None):
def test_on_batch(self, x=None, metrics=['loss', 'ler', 'ser']):
""" Name of a Keras Model function: this relates to evaluate on batch """
return self.evaluate_on_batch(x)
def evaluate_on_batch(self, x=None):
def evaluate_on_batch(self, x=None, metrics=['loss', 'ler', 'ser']):
""" Evaluates the model on a dataset_manager.
:param: See keras.engine.Model.predict_on_batch()
......@@ -398,6 +413,11 @@ class CTCModel:
y = Input data as a 2D Tensor (batch_size, max_label_len)
x_len = 1D array with the length of each data in batch_size
y_len = 1D array with the length of each labeling
metrics = list of metrics that are computed. This is elements among the 3 following metrics:
'loss' : compute the loss function on x
'ler' : compute the label error rate
'ser' : compute the sequence error rate
Outputs: a list containing:
ler_dataset = label error rate for each data (a list)
......@@ -411,15 +431,25 @@ class CTCModel:
y_len = x[3]
nb_data = x_input.shape[0]
eval_batch = self.model_eval.predict_on_batch([x_input, y, x_len, y_len])
if 'ler' in metrics or 'ser' in metrics:
eval_batch = self.model_eval.predict_on_batch([x_input, y, x_len, y_len])
if 'ser' in metrics:
seq_error += np.sum([1 for ler_data in eval_batch if ler_data != 0])
seq_error = seq_error / nb_data if nb_data > 0 else -1.
seq_error += np.sum([1 for ler_data in eval_batch if ler_data != 0])
seq_error = seq_error / nb_data if nb_data > 0 else -1.
outmetrics = []
if 'loss' in metrics:
outmetrics.append(self.get_loss(x))
if 'ler' in metrics:
outmetrics.append(eval_batch)
if 'ser' in metrics:
outmetrics.append(seq_error)
return eval_batch, seq_error
return outmetrics
def evaluate_generator(self, generator, steps=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0):
def evaluate_generator(self, generator, steps=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0, metrics=['ler', 'ser']):
""" Evaluates the model on a data generator.
:param: See keras.engine.Model.fit()
......@@ -434,21 +464,41 @@ class CTCModel:
x_len = 1D array with the length of each data in batch_size
y_len = 1D array with the length of each labeling
nb_batchs = number of batchs that are evaluated
metrics = list of metrics that are computed. This is elements among the 3 following metrics:
'loss' : compute the loss function on x
'ler' : compute the label error rate
'ser' : compute the sequence error rate
Warning: if the 'loss' and another metric are requested, make sure that the number of steps allows to evaluate the entire dataset,
even if the data given by the generator will be not the same for all metrics. To make sure, you can only compute 'ler' and 'ser' here
then initialize again the generator and call get_loss_generator.
Outputs: a list containing:
ler_dataset = label error rate for each data (a list)
seq_error = sequence error rate on the dataset_manager
Outputs: a list containing the metrics given in argument:
loss : the loss on the set
ler : the label error rate for each data (a list)
seq_error : the sequence error rate on the dataset
"""
ler_dataset = self.model_eval.predict_generator(generator, steps,
if 'ler' in metrics or 'ser' in metrics:
ler_dataset = self.model_eval.predict_generator(generator, steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
verbose=verbose)
seq_error = float(np.sum([1 for ler_data in ler_dataset if ler_data != 0])) / len(ler_dataset) if len(ler_dataset)>0 else 0.
return ler_dataset, seq_error
if 'ser' in metrics:
seq_error = float(np.sum([1 for ler_data in ler_dataset if ler_data != 0])) / len(ler_dataset) if len(ler_dataset)>0 else 0.
outmetrics = []
if 'loss' in metrics:
outmetrics.append(self.get_loss_generator(generator, steps))
if 'ler' in metrics:
outmetrics.append(eval_batch)
if 'ser' in metrics:
outmetrics.append(seq_error)
return outmetrics
def predict_on_batch(self, x):
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment