fix
Browse files- charmatch.py +23 -26
charmatch.py
CHANGED
|
@@ -88,33 +88,30 @@ class charmatch(evaluate.Metric):
|
|
| 88 |
pass
|
| 89 |
|
| 90 |
def _compute(self, inputs, expected, outputs):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 91 |
def get_score(input, expected, output):
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
precision = true_positives / distance_to_input
|
| 111 |
-
recall = true_positives / expected_corrections
|
| 112 |
-
f_05 = (1 + 0.5**2) * (precision * recall) / (0.5**2 * precision + recall)
|
| 113 |
-
print(f'P: {precision}\nR: {recall}')
|
| 114 |
-
|
| 115 |
-
return f_05
|
| 116 |
-
|
| 117 |
-
avg = sum([get_score(*row) for row in zip(inputs, expected, outputs)]) / len(inputs) * 100
|
| 118 |
|
| 119 |
return {
|
| 120 |
"fscore": avg
|
|
|
|
| 88 |
pass
|
| 89 |
|
| 90 |
def _compute(self, inputs, expected, outputs):
|
| 91 |
+
def calculate_metric(t, dl_sh, dl_sg):
|
| 92 |
+
precision = sum(t) / sum(dl_sh)
|
| 93 |
+
recall = sum(t) / sum(dl_sg)
|
| 94 |
+
f_05 = (1 + 0.5**2) * ((precision * recall) / (0.5**2 * precision + recall))
|
| 95 |
+
return f_05
|
| 96 |
+
|
| 97 |
def get_score(input, expected, output):
|
| 98 |
+
expected_corrections = lev(input, expected)
|
| 99 |
+
distance_to_input = lev(input, output)
|
| 100 |
+
distance_to_expected = lev(output, expected)
|
| 101 |
+
#true_positives = abs((expected_corrections + distance_to_input - distance_to_expected) / 2)
|
| 102 |
+
true_positives = min(expected_corrections, max(0, (expected_corrections + distance_to_input - distance_to_expected)) / 2)
|
| 103 |
+
return true_positives, distance_to_input, expected_corrections
|
| 104 |
+
|
| 105 |
+
t_list = []
|
| 106 |
+
dl_sh_list = []
|
| 107 |
+
dl_sg_list = []
|
| 108 |
+
for row in zip(inputs, expected, outputs):
|
| 109 |
+
score = get_score(*row)
|
| 110 |
+
t_list.append(score[0])
|
| 111 |
+
dl_sh_list.append(score[1])
|
| 112 |
+
dl_sg_list.append(score[2])
|
| 113 |
+
|
| 114 |
+
avg = calculate_metric(t_list, dl_sh_list, dl_sg_list)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
|
| 116 |
return {
|
| 117 |
"fscore": avg
|