Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
C
CTCModel
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Service Desk
Milestones
Operations
Operations
Incidents
Analytics
Analytics
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
TextRecognition
CTCModel
Commits
c2524a00
Commit
c2524a00
authored
Jan 12, 2018
by
Yann SOULLARD
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
MAJ CTCModel correction eval
parent
70363da4
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
1 addition
and
76 deletions
+1
-76
CTCModel.py
CTCModel.py
+1
-76
No files found.
CTCModel.py
View file @
c2524a00
...
...
@@ -2,6 +2,7 @@ import keras.backend as K
import
tensorflow
as
tf
import
numpy
as
np
import
os
from
keras
import
Input
from
keras.engine
import
Model
from
keras.layers
import
Lambda
...
...
@@ -537,82 +538,6 @@ class CTCModel:
return
self
.
predict
(
x
,
batch_size
=
batch_size
)
def
predict_epoch
(
self
,
generator
,
nb_batchs
,
pred
=
False
,
eval
=
True
,
verbose
=
0
,
batch_size
=
1
,
files_info
=
None
):
""" DEPRECATED : used predict_generator, get_loss_generator or evaluate_generator
CTC prediction on data yielded batch-by-batch by a Python generator.
Inputs:
generator = DataGenerator class that returns:
x = Input data as a 3D Tensor (batch_size, max_input_len, dim_features)
y = Input data as a 2D Tensor (batch_size, max_label_len)
x_len = 1D array with the length of each data in batch_size
y_len = 1D array with the length of each labeling
pred = return predictions from the ctc (from model_pred)
eval = return an analysis of ctc prediction (from model_eval)
Outputs: a list containing:
out_pred = output of model_pred
out_eval = output of model_eval
"""
lik
=
[]
# removed
ed
=
0.
ed_norm
=
0.
list_ler
=
[]
total_data
=
0
seq_error
=
0
out_pred
=
[]
for
k
in
range
(
nb_batchs
):
data
=
next
(
generator
)
x
=
data
[
0
][
0
]
x_len
=
data
[
0
][
2
]
y
=
data
[
0
][
1
]
y_len
=
data
[
0
][
3
]
nb_data
=
x
.
shape
[
0
]
if
pred
:
# label prediction
model_out
=
self
.
model_pred
.
predict
([
x
,
x_len
],
batch_size
=
nb_data
,
verbose
=
verbose
)
out_decode
=
decoding_with_reference
(
model_out
,
self
.
charset
)
# convert model_out to original labels
if
files_info
is
not
None
:
out_label
=
decoding_with_reference
(
y
,
self
.
charset
)
if
batch_size
==
1
:
out_pred
+=
[(
files_info
[
k
*
batch_size
],
out_decode
[
idx_data
],
out_label
[
idx_data
])
for
idx_data
in
range
(
len
(
out_decode
))]
else
:
out_pred
+=
[(
files_info
[
k
*
batch_size
+
idx_data
],
out_decode
[
idx_data
],
out_label
[
idx_data
])
for
idx_data
in
range
(
len
(
out_decode
))]
else
:
out_pred
+=
[(
None
,
out_decode
[
idx_data
],
decoding_with_reference
([
y
[
idx_data
]],
self
.
charset
))
for
idx_data
in
range
(
len
(
out_decode
))]
if
eval
:
total_data
+=
nb_data
out_eval
=
self
.
model_eval
.
predict
([
x
,
y
,
x_len
,
y_len
],
batch_size
=
nb_data
,
verbose
=
verbose
)
ed_norm
+=
np
.
sum
(
out_eval
)
for
ler_data
in
out_eval
:
if
ler_data
!=
0
:
seq_error
+=
1
out_ed
=
(
list_ler
,
ed_norm
/
total_data
,
0
,
0
,
0
,
seq_error
/
total_data
)
if
total_data
>
0
else
(
ed
,
ed_norm
,
0.
,
0.
,
0.
,
0.
)
return
out_pred
,
lik
,
out_ed
# return self.predict_generator(generator, nb_batchs, verbose=verbose, batch_size=batch_size, files_info=files_info),\
# self.get_loss_generator(generator, nb_batchs, verbose=verbose), self.evaluate_generator(generator, nb_batchs)
def
predict_generator
(
self
,
generator
,
steps
,
max_queue_size
=
10
,
workers
=
1
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment