|
trec-covid |
0.5947 |
0.1091 |
|
0.6559 |
0.1141 |
|
0.7109 |
0.1308 |
|
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-trec-covid-flat \
--topics beir-v1.0.0-trec-covid-test \
--output run.beir-flat.trec-covid.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-trec-covid-test \
run.beir-flat.trec-covid.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-trec-covid-test \
run.beir-flat.trec-covid.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-trec-covid-test \
run.beir-flat.trec-covid.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-trec-covid-multifield \
--topics beir-v1.0.0-trec-covid-test \
--output run.beir-multifield.trec-covid.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-trec-covid-test \
run.beir-multifield.trec-covid.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-trec-covid-test \
run.beir-multifield.trec-covid.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-trec-covid-test \
run.beir-multifield.trec-covid.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-trec-covid-splade_distil_cocodenser_medium \
--topics beir-v1.0.0-trec-covid-test-splade_distil_cocodenser_medium \
--output run.beir-splade-distil-cocodenser-medium.trec-covid.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-trec-covid-test \
run.beir-splade-distil-cocodenser-medium.trec-covid.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-trec-covid-test \
run.beir-splade-distil-cocodenser-medium.trec-covid.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-trec-covid-test \
run.beir-splade-distil-cocodenser-medium.trec-covid.txt
|
|
bioasq |
0.5225 |
0.7687 |
|
0.4646 |
0.7145 |
|
0.5035 |
0.7422 |
|
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-bioasq-flat \
--topics beir-v1.0.0-bioasq-test \
--output run.beir-flat.bioasq.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-bioasq-test \
run.beir-flat.bioasq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-bioasq-test \
run.beir-flat.bioasq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-bioasq-test \
run.beir-flat.bioasq.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-bioasq-multifield \
--topics beir-v1.0.0-bioasq-test \
--output run.beir-multifield.bioasq.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-bioasq-test \
run.beir-multifield.bioasq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-bioasq-test \
run.beir-multifield.bioasq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-bioasq-test \
run.beir-multifield.bioasq.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-bioasq-splade_distil_cocodenser_medium \
--topics beir-v1.0.0-bioasq-test-splade_distil_cocodenser_medium \
--output run.beir-splade-distil-cocodenser-medium.bioasq.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-bioasq-test \
run.beir-splade-distil-cocodenser-medium.bioasq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-bioasq-test \
run.beir-splade-distil-cocodenser-medium.bioasq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-bioasq-test \
run.beir-splade-distil-cocodenser-medium.bioasq.txt
|
|
nfcorpus |
0.3218 |
0.2457 |
|
0.3254 |
0.2500 |
|
0.3454 |
0.2891 |
|
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-nfcorpus-flat \
--topics beir-v1.0.0-nfcorpus-test \
--output run.beir-flat.nfcorpus.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-nfcorpus-test \
run.beir-flat.nfcorpus.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-nfcorpus-test \
run.beir-flat.nfcorpus.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-nfcorpus-test \
run.beir-flat.nfcorpus.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-nfcorpus-multifield \
--topics beir-v1.0.0-nfcorpus-test \
--output run.beir-multifield.nfcorpus.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-nfcorpus-test \
run.beir-multifield.nfcorpus.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-nfcorpus-test \
run.beir-multifield.nfcorpus.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-nfcorpus-test \
run.beir-multifield.nfcorpus.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-nfcorpus-splade_distil_cocodenser_medium \
--topics beir-v1.0.0-nfcorpus-test-splade_distil_cocodenser_medium \
--output run.beir-splade-distil-cocodenser-medium.nfcorpus.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-nfcorpus-test \
run.beir-splade-distil-cocodenser-medium.nfcorpus.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-nfcorpus-test \
run.beir-splade-distil-cocodenser-medium.nfcorpus.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-nfcorpus-test \
run.beir-splade-distil-cocodenser-medium.nfcorpus.txt
|
|
nq |
0.3055 |
0.7513 |
|
0.3285 |
0.7597 |
|
0.5442 |
0.9285 |
|
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-nq-flat \
--topics beir-v1.0.0-nq-test \
--output run.beir-flat.nq.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-nq-test \
run.beir-flat.nq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-nq-test \
run.beir-flat.nq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-nq-test \
run.beir-flat.nq.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-nq-multifield \
--topics beir-v1.0.0-nq-test \
--output run.beir-multifield.nq.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-nq-test \
run.beir-multifield.nq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-nq-test \
run.beir-multifield.nq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-nq-test \
run.beir-multifield.nq.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-nq-splade_distil_cocodenser_medium \
--topics beir-v1.0.0-nq-test-splade_distil_cocodenser_medium \
--output run.beir-splade-distil-cocodenser-medium.nq.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-nq-test \
run.beir-splade-distil-cocodenser-medium.nq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-nq-test \
run.beir-splade-distil-cocodenser-medium.nq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-nq-test \
run.beir-splade-distil-cocodenser-medium.nq.txt
|
|
hotpotqa |
0.6330 |
0.7957 |
|
0.6027 |
0.7400 |
|
0.6860 |
0.8144 |
|
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-hotpotqa-flat \
--topics beir-v1.0.0-hotpotqa-test \
--output run.beir-flat.hotpotqa.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-hotpotqa-test \
run.beir-flat.hotpotqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-hotpotqa-test \
run.beir-flat.hotpotqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-hotpotqa-test \
run.beir-flat.hotpotqa.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-hotpotqa-multifield \
--topics beir-v1.0.0-hotpotqa-test \
--output run.beir-multifield.hotpotqa.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-hotpotqa-test \
run.beir-multifield.hotpotqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-hotpotqa-test \
run.beir-multifield.hotpotqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-hotpotqa-test \
run.beir-multifield.hotpotqa.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-hotpotqa-splade_distil_cocodenser_medium \
--topics beir-v1.0.0-hotpotqa-test-splade_distil_cocodenser_medium \
--output run.beir-splade-distil-cocodenser-medium.hotpotqa.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-hotpotqa-test \
run.beir-splade-distil-cocodenser-medium.hotpotqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-hotpotqa-test \
run.beir-splade-distil-cocodenser-medium.hotpotqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-hotpotqa-test \
run.beir-splade-distil-cocodenser-medium.hotpotqa.txt
|
|
fiqa |
0.2361 |
0.5395 |
|
0.2361 |
0.5395 |
|
0.3514 |
0.6298 |
|
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-fiqa-flat \
--topics beir-v1.0.0-fiqa-test \
--output run.beir-flat.fiqa.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-fiqa-test \
run.beir-flat.fiqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-fiqa-test \
run.beir-flat.fiqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-fiqa-test \
run.beir-flat.fiqa.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-fiqa-multifield \
--topics beir-v1.0.0-fiqa-test \
--output run.beir-multifield.fiqa.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-fiqa-test \
run.beir-multifield.fiqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-fiqa-test \
run.beir-multifield.fiqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-fiqa-test \
run.beir-multifield.fiqa.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-fiqa-splade_distil_cocodenser_medium \
--topics beir-v1.0.0-fiqa-test-splade_distil_cocodenser_medium \
--output run.beir-splade-distil-cocodenser-medium.fiqa.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-fiqa-test \
run.beir-splade-distil-cocodenser-medium.fiqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-fiqa-test \
run.beir-splade-distil-cocodenser-medium.fiqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-fiqa-test \
run.beir-splade-distil-cocodenser-medium.fiqa.txt
|
|
signal1m |
0.3304 |
0.3703 |
|
0.3304 |
0.3703 |
|
0.2957 |
0.3311 |
|
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-signal1m-flat \
--topics beir-v1.0.0-signal1m-test \
--output run.beir-flat.signal1m.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-signal1m-test \
run.beir-flat.signal1m.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-signal1m-test \
run.beir-flat.signal1m.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-signal1m-test \
run.beir-flat.signal1m.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-signal1m-multifield \
--topics beir-v1.0.0-signal1m-test \
--output run.beir-multifield.signal1m.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-signal1m-test \
run.beir-multifield.signal1m.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-signal1m-test \
run.beir-multifield.signal1m.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-signal1m-test \
run.beir-multifield.signal1m.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-signal1m-splade_distil_cocodenser_medium \
--topics beir-v1.0.0-signal1m-test-splade_distil_cocodenser_medium \
--output run.beir-splade-distil-cocodenser-medium.signal1m.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-signal1m-test \
run.beir-splade-distil-cocodenser-medium.signal1m.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-signal1m-test \
run.beir-splade-distil-cocodenser-medium.signal1m.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-signal1m-test \
run.beir-splade-distil-cocodenser-medium.signal1m.txt
|
|
trec-news |
0.3952 |
0.4469 |
|
0.3977 |
0.4216 |
|
0.3936 |
0.4323 |
|
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-trec-news-flat \
--topics beir-v1.0.0-trec-news-test \
--output run.beir-flat.trec-news.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-trec-news-test \
run.beir-flat.trec-news.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-trec-news-test \
run.beir-flat.trec-news.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-trec-news-test \
run.beir-flat.trec-news.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-trec-news-multifield \
--topics beir-v1.0.0-trec-news-test \
--output run.beir-multifield.trec-news.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-trec-news-test \
run.beir-multifield.trec-news.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-trec-news-test \
run.beir-multifield.trec-news.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-trec-news-test \
run.beir-multifield.trec-news.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-trec-news-splade_distil_cocodenser_medium \
--topics beir-v1.0.0-trec-news-test-splade_distil_cocodenser_medium \
--output run.beir-splade-distil-cocodenser-medium.trec-news.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-trec-news-test \
run.beir-splade-distil-cocodenser-medium.trec-news.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-trec-news-test \
run.beir-splade-distil-cocodenser-medium.trec-news.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-trec-news-test \
run.beir-splade-distil-cocodenser-medium.trec-news.txt
|
|
robust04 |
0.4070 |
0.3746 |
|
0.4070 |
0.3746 |
|
0.4581 |
0.3773 |
|
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-robust04-flat \
--topics beir-v1.0.0-robust04-test \
--output run.beir-flat.robust04.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-robust04-test \
run.beir-flat.robust04.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-robust04-test \
run.beir-flat.robust04.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-robust04-test \
run.beir-flat.robust04.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-robust04-multifield \
--topics beir-v1.0.0-robust04-test \
--output run.beir-multifield.robust04.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-robust04-test \
run.beir-multifield.robust04.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-robust04-test \
run.beir-multifield.robust04.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-robust04-test \
run.beir-multifield.robust04.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-robust04-splade_distil_cocodenser_medium \
--topics beir-v1.0.0-robust04-test-splade_distil_cocodenser_medium \
--output run.beir-splade-distil-cocodenser-medium.robust04.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-robust04-test \
run.beir-splade-distil-cocodenser-medium.robust04.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-robust04-test \
run.beir-splade-distil-cocodenser-medium.robust04.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-robust04-test \
run.beir-splade-distil-cocodenser-medium.robust04.txt
|
|
arguana |
0.3970 |
0.9324 |
|
0.4142 |
0.9431 |
|
0.5210 |
0.9822 |
|
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-arguana-flat \
--topics beir-v1.0.0-arguana-test \
--output run.beir-flat.arguana.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-arguana-test \
run.beir-flat.arguana.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-arguana-test \
run.beir-flat.arguana.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-arguana-test \
run.beir-flat.arguana.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-arguana-multifield \
--topics beir-v1.0.0-arguana-test \
--output run.beir-multifield.arguana.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-arguana-test \
run.beir-multifield.arguana.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-arguana-test \
run.beir-multifield.arguana.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-arguana-test \
run.beir-multifield.arguana.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-arguana-splade_distil_cocodenser_medium \
--topics beir-v1.0.0-arguana-test-splade_distil_cocodenser_medium \
--output run.beir-splade-distil-cocodenser-medium.arguana.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-arguana-test \
run.beir-splade-distil-cocodenser-medium.arguana.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-arguana-test \
run.beir-splade-distil-cocodenser-medium.arguana.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-arguana-test \
run.beir-splade-distil-cocodenser-medium.arguana.txt
|
|
webis-touche2020 |
0.4422 |
0.5822 |
|
0.3673 |
0.5376 |
|
0.2435 |
0.4723 |
|
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-webis-touche2020-flat \
--topics beir-v1.0.0-webis-touche2020-test \
--output run.beir-flat.webis-touche2020.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-webis-touche2020-test \
run.beir-flat.webis-touche2020.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-webis-touche2020-test \
run.beir-flat.webis-touche2020.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-webis-touche2020-test \
run.beir-flat.webis-touche2020.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-webis-touche2020-multifield \
--topics beir-v1.0.0-webis-touche2020-test \
--output run.beir-multifield.webis-touche2020.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-webis-touche2020-test \
run.beir-multifield.webis-touche2020.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-webis-touche2020-test \
run.beir-multifield.webis-touche2020.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-webis-touche2020-test \
run.beir-multifield.webis-touche2020.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-webis-touche2020-splade_distil_cocodenser_medium \
--topics beir-v1.0.0-webis-touche2020-test-splade_distil_cocodenser_medium \
--output run.beir-splade-distil-cocodenser-medium.webis-touche2020.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-webis-touche2020-test \
run.beir-splade-distil-cocodenser-medium.webis-touche2020.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-webis-touche2020-test \
run.beir-splade-distil-cocodenser-medium.webis-touche2020.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-webis-touche2020-test \
run.beir-splade-distil-cocodenser-medium.webis-touche2020.txt
|
|
cqadupstack-android |
0.3801 |
0.6829 |
|
0.3709 |
0.6889 |
|
0.3954 |
0.7405 |
|
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-android-flat \
--topics beir-v1.0.0-cqadupstack-android-test \
--output run.beir-flat.cqadupstack-android.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-android-test \
run.beir-flat.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-android-test \
run.beir-flat.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-android-test \
run.beir-flat.cqadupstack-android.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-android-multifield \
--topics beir-v1.0.0-cqadupstack-android-test \
--output run.beir-multifield.cqadupstack-android.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-android-test \
run.beir-multifield.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-android-test \
run.beir-multifield.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-android-test \
run.beir-multifield.cqadupstack-android.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-android-splade_distil_cocodenser_medium \
--topics beir-v1.0.0-cqadupstack-android-test-splade_distil_cocodenser_medium \
--output run.beir-splade-distil-cocodenser-medium.cqadupstack-android.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-android-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-android-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-android-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-android.txt
|
|
cqadupstack-english |
0.3453 |
0.5757 |
|
0.3321 |
0.5842 |
|
0.4026 |
0.6768 |
|
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-english-flat \
--topics beir-v1.0.0-cqadupstack-english-test \
--output run.beir-flat.cqadupstack-english.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-english-test \
run.beir-flat.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-english-test \
run.beir-flat.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-english-test \
run.beir-flat.cqadupstack-english.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-english-multifield \
--topics beir-v1.0.0-cqadupstack-english-test \
--output run.beir-multifield.cqadupstack-english.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-english-test \
run.beir-multifield.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-english-test \
run.beir-multifield.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-english-test \
run.beir-multifield.cqadupstack-english.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-english-splade_distil_cocodenser_medium \
--topics beir-v1.0.0-cqadupstack-english-test-splade_distil_cocodenser_medium \
--output run.beir-splade-distil-cocodenser-medium.cqadupstack-english.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-english-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-english-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-english-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-english.txt
|
|
cqadupstack-gaming |
0.4822 |
0.7651 |
|
0.4418 |
0.7571 |
|
0.5061 |
0.8138 |
|
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-gaming-flat \
--topics beir-v1.0.0-cqadupstack-gaming-test \
--output run.beir-flat.cqadupstack-gaming.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gaming-test \
run.beir-flat.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-gaming-test \
run.beir-flat.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-gaming-test \
run.beir-flat.cqadupstack-gaming.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-gaming-multifield \
--topics beir-v1.0.0-cqadupstack-gaming-test \
--output run.beir-multifield.cqadupstack-gaming.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gaming-test \
run.beir-multifield.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-gaming-test \
run.beir-multifield.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-gaming-test \
run.beir-multifield.cqadupstack-gaming.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-gaming-splade_distil_cocodenser_medium \
--topics beir-v1.0.0-cqadupstack-gaming-test-splade_distil_cocodenser_medium \
--output run.beir-splade-distil-cocodenser-medium.cqadupstack-gaming.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gaming-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-gaming-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-gaming-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-gaming.txt
|
|
cqadupstack-gis |
0.2901 |
0.6119 |
|
0.2904 |
0.6458 |
|
0.3223 |
0.6419 |
|
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-gis-flat \
--topics beir-v1.0.0-cqadupstack-gis-test \
--output run.beir-flat.cqadupstack-gis.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gis-test \
run.beir-flat.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-gis-test \
run.beir-flat.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-gis-test \
run.beir-flat.cqadupstack-gis.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-gis-multifield \
--topics beir-v1.0.0-cqadupstack-gis-test \
--output run.beir-multifield.cqadupstack-gis.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gis-test \
run.beir-multifield.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-gis-test \
run.beir-multifield.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-gis-test \
run.beir-multifield.cqadupstack-gis.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-gis-splade_distil_cocodenser_medium \
--topics beir-v1.0.0-cqadupstack-gis-test-splade_distil_cocodenser_medium \
--output run.beir-splade-distil-cocodenser-medium.cqadupstack-gis.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gis-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-gis-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-gis-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-gis.txt
|
|
cqadupstack-mathematica |
0.2015 |
0.4877 |
|
0.2046 |
0.5215 |
|
0.2423 |
0.5732 |
|
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-mathematica-flat \
--topics beir-v1.0.0-cqadupstack-mathematica-test \
--output run.beir-flat.cqadupstack-mathematica.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir-flat.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir-flat.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir-flat.cqadupstack-mathematica.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-mathematica-multifield \
--topics beir-v1.0.0-cqadupstack-mathematica-test \
--output run.beir-multifield.cqadupstack-mathematica.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir-multifield.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir-multifield.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir-multifield.cqadupstack-mathematica.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-mathematica-splade_distil_cocodenser_medium \
--topics beir-v1.0.0-cqadupstack-mathematica-test-splade_distil_cocodenser_medium \
--output run.beir-splade-distil-cocodenser-medium.cqadupstack-mathematica.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-mathematica.txt
|
|
cqadupstack-physics |
0.3214 |
0.6326 |
|
0.3248 |
0.6486 |
|
0.3668 |
0.7286 |
|
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-physics-flat \
--topics beir-v1.0.0-cqadupstack-physics-test \
--output run.beir-flat.cqadupstack-physics.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-physics-test \
run.beir-flat.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-physics-test \
run.beir-flat.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-physics-test \
run.beir-flat.cqadupstack-physics.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-physics-multifield \
--topics beir-v1.0.0-cqadupstack-physics-test \
--output run.beir-multifield.cqadupstack-physics.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-physics-test \
run.beir-multifield.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-physics-test \
run.beir-multifield.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-physics-test \
run.beir-multifield.cqadupstack-physics.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-physics-splade_distil_cocodenser_medium \
--topics beir-v1.0.0-cqadupstack-physics-test-splade_distil_cocodenser_medium \
--output run.beir-splade-distil-cocodenser-medium.cqadupstack-physics.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-physics-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-physics-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-physics-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-physics.txt
|
|
cqadupstack-programmers |
0.2802 |
0.5588 |
|
0.2963 |
0.6194 |
|
0.3412 |
0.6653 |
|
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-programmers-flat \
--topics beir-v1.0.0-cqadupstack-programmers-test \
--output run.beir-flat.cqadupstack-programmers.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-programmers-test \
run.beir-flat.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-programmers-test \
run.beir-flat.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-programmers-test \
run.beir-flat.cqadupstack-programmers.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-programmers-multifield \
--topics beir-v1.0.0-cqadupstack-programmers-test \
--output run.beir-multifield.cqadupstack-programmers.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-programmers-test \
run.beir-multifield.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-programmers-test \
run.beir-multifield.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-programmers-test \
run.beir-multifield.cqadupstack-programmers.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-programmers-splade_distil_cocodenser_medium \
--topics beir-v1.0.0-cqadupstack-programmers-test-splade_distil_cocodenser_medium \
--output run.beir-splade-distil-cocodenser-medium.cqadupstack-programmers.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-programmers-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-programmers-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-programmers-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-programmers.txt
|
|
cqadupstack-stats |
0.2711 |
0.5338 |
|
0.2790 |
0.5719 |
|
0.3142 |
0.5889 |
|
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-stats-flat \
--topics beir-v1.0.0-cqadupstack-stats-test \
--output run.beir-flat.cqadupstack-stats.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-stats-test \
run.beir-flat.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-stats-test \
run.beir-flat.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-stats-test \
run.beir-flat.cqadupstack-stats.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-stats-multifield \
--topics beir-v1.0.0-cqadupstack-stats-test \
--output run.beir-multifield.cqadupstack-stats.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-stats-test \
run.beir-multifield.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-stats-test \
run.beir-multifield.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-stats-test \
run.beir-multifield.cqadupstack-stats.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-stats-splade_distil_cocodenser_medium \
--topics beir-v1.0.0-cqadupstack-stats-test-splade_distil_cocodenser_medium \
--output run.beir-splade-distil-cocodenser-medium.cqadupstack-stats.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-stats-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-stats-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-stats-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-stats.txt
|
|
cqadupstack-tex |
0.2244 |
0.4686 |
|
0.2086 |
0.4954 |
|
0.2575 |
0.5231 |
|
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-tex-flat \
--topics beir-v1.0.0-cqadupstack-tex-test \
--output run.beir-flat.cqadupstack-tex.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-tex-test \
run.beir-flat.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-tex-test \
run.beir-flat.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-tex-test \
run.beir-flat.cqadupstack-tex.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-tex-multifield \
--topics beir-v1.0.0-cqadupstack-tex-test \
--output run.beir-multifield.cqadupstack-tex.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-tex-test \
run.beir-multifield.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-tex-test \
run.beir-multifield.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-tex-test \
run.beir-multifield.cqadupstack-tex.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-tex-splade_distil_cocodenser_medium \
--topics beir-v1.0.0-cqadupstack-tex-test-splade_distil_cocodenser_medium \
--output run.beir-splade-distil-cocodenser-medium.cqadupstack-tex.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-tex-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-tex-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-tex-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-tex.txt
|
|
cqadupstack-unix |
0.2749 |
0.5417 |
|
0.2788 |
0.5721 |
|
0.3292 |
0.6192 |
|
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-unix-flat \
--topics beir-v1.0.0-cqadupstack-unix-test \
--output run.beir-flat.cqadupstack-unix.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-unix-test \
run.beir-flat.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-unix-test \
run.beir-flat.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-unix-test \
run.beir-flat.cqadupstack-unix.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-unix-multifield \
--topics beir-v1.0.0-cqadupstack-unix-test \
--output run.beir-multifield.cqadupstack-unix.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-unix-test \
run.beir-multifield.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-unix-test \
run.beir-multifield.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-unix-test \
run.beir-multifield.cqadupstack-unix.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-unix-splade_distil_cocodenser_medium \
--topics beir-v1.0.0-cqadupstack-unix-test-splade_distil_cocodenser_medium \
--output run.beir-splade-distil-cocodenser-medium.cqadupstack-unix.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-unix-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-unix-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-unix-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-unix.txt
|
|
cqadupstack-webmasters |
0.3059 |
0.5820 |
|
0.3008 |
0.6100 |
|
0.3343 |
0.6404 |
|
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-webmasters-flat \
--topics beir-v1.0.0-cqadupstack-webmasters-test \
--output run.beir-flat.cqadupstack-webmasters.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir-flat.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir-flat.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir-flat.cqadupstack-webmasters.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-webmasters-multifield \
--topics beir-v1.0.0-cqadupstack-webmasters-test \
--output run.beir-multifield.cqadupstack-webmasters.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir-multifield.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir-multifield.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir-multifield.cqadupstack-webmasters.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-webmasters-splade_distil_cocodenser_medium \
--topics beir-v1.0.0-cqadupstack-webmasters-test-splade_distil_cocodenser_medium \
--output run.beir-splade-distil-cocodenser-medium.cqadupstack-webmasters.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-webmasters.txt
|
|
cqadupstack-wordpress |
0.2483 |
0.5152 |
|
0.2562 |
0.5526 |
|
0.2839 |
0.5974 |
|
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-wordpress-flat \
--topics beir-v1.0.0-cqadupstack-wordpress-test \
--output run.beir-flat.cqadupstack-wordpress.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir-flat.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir-flat.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir-flat.cqadupstack-wordpress.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-wordpress-multifield \
--topics beir-v1.0.0-cqadupstack-wordpress-test \
--output run.beir-multifield.cqadupstack-wordpress.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir-multifield.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir-multifield.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir-multifield.cqadupstack-wordpress.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-cqadupstack-wordpress-splade_distil_cocodenser_medium \
--topics beir-v1.0.0-cqadupstack-wordpress-test-splade_distil_cocodenser_medium \
--output run.beir-splade-distil-cocodenser-medium.cqadupstack-wordpress.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir-splade-distil-cocodenser-medium.cqadupstack-wordpress.txt
|
|
quora |
0.7886 |
0.9733 |
|
0.7886 |
0.9733 |
|
0.8136 |
0.9817 |
|
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-quora-flat \
--topics beir-v1.0.0-quora-test \
--output run.beir-flat.quora.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-quora-test \
run.beir-flat.quora.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-quora-test \
run.beir-flat.quora.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-quora-test \
run.beir-flat.quora.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-quora-multifield \
--topics beir-v1.0.0-quora-test \
--output run.beir-multifield.quora.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-quora-test \
run.beir-multifield.quora.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-quora-test \
run.beir-multifield.quora.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-quora-test \
run.beir-multifield.quora.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-quora-splade_distil_cocodenser_medium \
--topics beir-v1.0.0-quora-test-splade_distil_cocodenser_medium \
--output run.beir-splade-distil-cocodenser-medium.quora.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-quora-test \
run.beir-splade-distil-cocodenser-medium.quora.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-quora-test \
run.beir-splade-distil-cocodenser-medium.quora.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-quora-test \
run.beir-splade-distil-cocodenser-medium.quora.txt
|
|
dbpedia-entity |
0.3180 |
0.4682 |
|
0.3128 |
0.3981 |
|
0.4416 |
0.5636 |
|
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-dbpedia-entity-flat \
--topics beir-v1.0.0-dbpedia-entity-test \
--output run.beir-flat.dbpedia-entity.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-dbpedia-entity-test \
run.beir-flat.dbpedia-entity.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-dbpedia-entity-test \
run.beir-flat.dbpedia-entity.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-dbpedia-entity-test \
run.beir-flat.dbpedia-entity.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-dbpedia-entity-multifield \
--topics beir-v1.0.0-dbpedia-entity-test \
--output run.beir-multifield.dbpedia-entity.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-dbpedia-entity-test \
run.beir-multifield.dbpedia-entity.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-dbpedia-entity-test \
run.beir-multifield.dbpedia-entity.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-dbpedia-entity-test \
run.beir-multifield.dbpedia-entity.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-dbpedia-entity-splade_distil_cocodenser_medium \
--topics beir-v1.0.0-dbpedia-entity-test-splade_distil_cocodenser_medium \
--output run.beir-splade-distil-cocodenser-medium.dbpedia-entity.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-dbpedia-entity-test \
run.beir-splade-distil-cocodenser-medium.dbpedia-entity.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-dbpedia-entity-test \
run.beir-splade-distil-cocodenser-medium.dbpedia-entity.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-dbpedia-entity-test \
run.beir-splade-distil-cocodenser-medium.dbpedia-entity.txt
|
|
scidocs |
0.1490 |
0.3477 |
|
0.1581 |
0.3561 |
|
0.1590 |
0.3671 |
|
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-scidocs-flat \
--topics beir-v1.0.0-scidocs-test \
--output run.beir-flat.scidocs.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-scidocs-test \
run.beir-flat.scidocs.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-scidocs-test \
run.beir-flat.scidocs.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-scidocs-test \
run.beir-flat.scidocs.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-scidocs-multifield \
--topics beir-v1.0.0-scidocs-test \
--output run.beir-multifield.scidocs.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-scidocs-test \
run.beir-multifield.scidocs.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-scidocs-test \
run.beir-multifield.scidocs.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-scidocs-test \
run.beir-multifield.scidocs.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-scidocs-splade_distil_cocodenser_medium \
--topics beir-v1.0.0-scidocs-test-splade_distil_cocodenser_medium \
--output run.beir-splade-distil-cocodenser-medium.scidocs.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-scidocs-test \
run.beir-splade-distil-cocodenser-medium.scidocs.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-scidocs-test \
run.beir-splade-distil-cocodenser-medium.scidocs.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-scidocs-test \
run.beir-splade-distil-cocodenser-medium.scidocs.txt
|
|
fever |
0.6513 |
0.9185 |
|
0.7530 |
0.9309 |
|
0.7962 |
0.9550 |
|
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-fever-flat \
--topics beir-v1.0.0-fever-test \
--output run.beir-flat.fever.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-fever-test \
run.beir-flat.fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-fever-test \
run.beir-flat.fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-fever-test \
run.beir-flat.fever.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-fever-multifield \
--topics beir-v1.0.0-fever-test \
--output run.beir-multifield.fever.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-fever-test \
run.beir-multifield.fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-fever-test \
run.beir-multifield.fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-fever-test \
run.beir-multifield.fever.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-fever-splade_distil_cocodenser_medium \
--topics beir-v1.0.0-fever-test-splade_distil_cocodenser_medium \
--output run.beir-splade-distil-cocodenser-medium.fever.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-fever-test \
run.beir-splade-distil-cocodenser-medium.fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-fever-test \
run.beir-splade-distil-cocodenser-medium.fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-fever-test \
run.beir-splade-distil-cocodenser-medium.fever.txt
|
|
climate-fever |
0.1651 |
0.4249 |
|
0.2129 |
0.4357 |
|
0.2276 |
0.5140 |
|
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-climate-fever-flat \
--topics beir-v1.0.0-climate-fever-test \
--output run.beir-flat.climate-fever.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-climate-fever-test \
run.beir-flat.climate-fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-climate-fever-test \
run.beir-flat.climate-fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-climate-fever-test \
run.beir-flat.climate-fever.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-climate-fever-multifield \
--topics beir-v1.0.0-climate-fever-test \
--output run.beir-multifield.climate-fever.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-climate-fever-test \
run.beir-multifield.climate-fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-climate-fever-test \
run.beir-multifield.climate-fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-climate-fever-test \
run.beir-multifield.climate-fever.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-climate-fever-splade_distil_cocodenser_medium \
--topics beir-v1.0.0-climate-fever-test-splade_distil_cocodenser_medium \
--output run.beir-splade-distil-cocodenser-medium.climate-fever.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-climate-fever-test \
run.beir-splade-distil-cocodenser-medium.climate-fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-climate-fever-test \
run.beir-splade-distil-cocodenser-medium.climate-fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-climate-fever-test \
run.beir-splade-distil-cocodenser-medium.climate-fever.txt
|
|
scifact |
0.6789 |
0.9253 |
|
0.6647 |
0.9076 |
|
0.6992 |
0.9270 |
|
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-scifact-flat \
--topics beir-v1.0.0-scifact-test \
--output run.beir-flat.scifact.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-scifact-test \
run.beir-flat.scifact.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-scifact-test \
run.beir-flat.scifact.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-scifact-test \
run.beir-flat.scifact.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-scifact-multifield \
--topics beir-v1.0.0-scifact-test \
--output run.beir-multifield.scifact.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-scifact-test \
run.beir-multifield.scifact.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-scifact-test \
run.beir-multifield.scifact.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-scifact-test \
run.beir-multifield.scifact.txt
Command to generate run:
python -m pyserini.search.lucene \
--index beir-v1.0.0-scifact-splade_distil_cocodenser_medium \
--topics beir-v1.0.0-scifact-test-splade_distil_cocodenser_medium \
--output run.beir-splade-distil-cocodenser-medium.scifact.txt \
--output-format trec \
--batch 36 --threads 12 \
--hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-scifact-test \
run.beir-splade-distil-cocodenser-medium.scifact.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-scifact-test \
run.beir-splade-distil-cocodenser-medium.scifact.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-scifact-test \
run.beir-splade-distil-cocodenser-medium.scifact.txt
|