Work on evaluator, and some model stats.
This commit is contained in:
@@ -12,8 +12,8 @@ from .util import ffmpeg_description, parse_ucs
|
||||
|
||||
|
||||
@click.group()
|
||||
@click.option('--verbose', flag_value='verbose', help='Verbose output')
|
||||
def ucsinfer(verbose: bool):
|
||||
# @click.option('--verbose', flag_value='verbose', help='Verbose output')
|
||||
def ucsinfer():
|
||||
pass
|
||||
|
||||
|
||||
@@ -68,14 +68,17 @@ def finetune():
|
||||
@ucsinfer.command('evaluate')
|
||||
@click.option('--offset', type=int, default=0)
|
||||
@click.option('--limit', type=int, default=-1)
|
||||
@click.option('--no-foley', type=bool, default=False)
|
||||
@click.option('--model', type=str,
|
||||
default="paraphrase-multilingual-mpnet-base-v2")
|
||||
@click.argument('dataset', type=click.File('r', encoding='utf8'),
|
||||
default='dataset.csv')
|
||||
def evaluate(dataset, offset, limit):
|
||||
def evaluate(dataset, offset, limit, model, no_foley):
|
||||
"""
|
||||
Use datasets to evauluate model performance
|
||||
"""
|
||||
model = SentenceTransformer("paraphrase-multilingual-mpnet-base-v2")
|
||||
ctx = InferenceContext(model)
|
||||
m = SentenceTransformer(model)
|
||||
ctx = InferenceContext(m, model)
|
||||
reader = csv.reader(dataset)
|
||||
|
||||
results = []
|
||||
@@ -113,7 +116,8 @@ def evaluate(dataset, offset, limit):
|
||||
|
||||
miss_counts = sorted(miss_counts, key=lambda x: x[1])
|
||||
|
||||
print(f" === RESULTS === ")
|
||||
print(f"Results for Model {model}")
|
||||
print("=====\n")
|
||||
|
||||
table = [
|
||||
["Total records in sample:", f"{total}"],
|
||||
@@ -132,7 +136,7 @@ def evaluate(dataset, offset, limit):
|
||||
f"{float(miss_counts[-1][1])/float(total):.2%}"]
|
||||
]
|
||||
|
||||
print(tabulate(table, headers=['', 'n', 'pct']))
|
||||
print(tabulate(table, headers=['', 'n', 'pct'], tablefmt='rst'))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
Reference in New Issue
Block a user