tokens sequence | tags sequence | evaluation_predictions sequence |
|---|---|---|
[
"Woolmer",
",",
"48",
",",
"played",
"19",
"tests",
"for",
"England",
"between",
"1975",
"and",
"1981",
"."
] | [
3,
8,
8,
8,
8,
8,
8,
8,
0,
8,
8,
8,
8,
8
] | [
[
2.9375,
-0.1787109375,
-0.485595703125,
-0.269775390625,
-0.207275390625,
0.306396484375,
-0.79833984375,
0.06646728515625,
-1.0498046875
],
[
0.87646484375,
8.109375,
-1.87890625,
-0.42041015625,
-3.26171875,
-0.391845703125,
-3.0859375,
-0.... |
[
"Russian",
"President",
"Boris",
"Yeltsin",
",",
"who",
"had",
"heart",
"bypass",
"surgery",
"a",
"month",
"ago",
",",
"plans",
"to",
"return",
"to",
"work",
"on",
"December",
"25",
",",
"the",
"head",
"of",
"the",
"upper",
"chamber",
"of",
"parliament",
... | [
1,
8,
3,
7,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
2,
8,
8,
8,
8,
8
] | [
[
3.673828125,
-0.2734375,
-0.499267578125,
-0.39111328125,
-0.1787109375,
-0.0379638671875,
-0.95849609375,
0.056427001953125,
-1.1552734375
],
[
-0.330810546875,
-0.5048828125,
-2.119140625,
-0.377197265625,
-2.310546875,
-1.0263671875,
-2.769531... |
[
"Beauty",
"ideals",
"and",
"cultures",
"are",
"different",
"in",
"every",
"country",
".",
"\""
] | [
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8
] | [
[
2.599609375,
-0.173583984375,
-0.353515625,
-0.262939453125,
-0.1416015625,
0.109375,
-0.73388671875,
0.1568603515625,
-0.97509765625
],
[
8.9609375,
0.2607421875,
-2.884765625,
0.053955078125,
-1.4306640625,
-0.10662841796875,
-2.0234375,
-0... |
[
"W",
"L",
"T",
"GF",
"GA",
"PTS"
] | [
8,
8,
8,
8,
8,
8
] | [[2.845703125,-0.2376708984375,-0.488525390625,-0.142822265625,-0.06866455078125,0.09417724609375,-0(...TRUNCATED) |
[
"UAE",
"2",
"1",
"1",
"0",
"4",
"3",
"4"
] | [
0,
8,
8,
8,
8,
8,
8,
8
] | [[2.701171875,-0.2327880859375,-0.418212890625,-0.135498046875,-0.0772705078125,0.08184814453125,-0.(...TRUNCATED) |
[
"Sutton",
"(",
"Blackburn",
")"
] | [
3,
8,
2,
8
] | [[1.912109375,-0.1920166015625,-0.282958984375,-0.08111572265625,-0.0235137939453125,0.1612548828125(...TRUNCATED) |
[
"LONDON",
"1996-12-07"
] | [
0,
8
] | [[2.412109375,-0.2108154296875,-0.39990234375,-0.19091796875,-0.1441650390625,0.2247314453125,-0.650(...TRUNCATED) |
[
"Livshits",
"'s",
"words",
"are",
"an",
"attempt",
"to",
"put",
"pressure",
"on",
"the",
"company",
".",
"\""
] | [
3,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8
] | [[2.986328125,-0.1337890625,-0.49755859375,-0.124267578125,-0.09014892578125,0.0872802734375,-0.8857(...TRUNCATED) |
[
"These",
"people",
"do",
"n't",
"understand",
"what",
"they",
"'re",
"doing",
".",
"\""
] | [
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8
] | [[2.765625,-0.245361328125,-0.384521484375,-0.253662109375,-0.11822509765625,0.0716552734375,-0.7451(...TRUNCATED) |
[
"SOCCER",
"-",
"FIFA",
"BOSS",
"HAVELANGE",
"STANDS",
"BY",
"WEAH",
"."
] | [
8,
8,
2,
8,
3,
8,
8,
3,
8
] | [[2.294921875,-0.032684326171875,-0.39794921875,-0.1527099609375,-0.1112060546875,0.1578369140625,-0(...TRUNCATED) |
End of preview. Expand
in Data Studio
Dataset Card for AutoTrain Evaluator
This repository contains model predictions generated by AutoTrain for the following task and dataset:
- Task: Token Classification
- Model: ArBert/roberta-base-finetuned-ner-kmeans
- Dataset: conll2003
To run new evaluation jobs, visit Hugging Face's automatic model evaluator.
Contributions
Thanks to @test for evaluating this model.
- Downloads last month
- 13