BEIR

Key:

BM25 Flat BM25 Multifield SPLADE
nDCG@10 R@100 nDCG@10 R@100 nDCG@10 R@100
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-trec-covid-flat \
  --topics beir-v1.0.0-trec-covid-test \
  --output run.beir-flat.trec-covid.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-trec-covid-test \
  run.beir-flat.trec-covid.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-trec-covid-test \
  run.beir-flat.trec-covid.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-trec-covid-test \
  run.beir-flat.trec-covid.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-trec-covid-multifield \
  --topics beir-v1.0.0-trec-covid-test \
  --output run.beir-multifield.trec-covid.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-trec-covid-test \
  run.beir-multifield.trec-covid.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-trec-covid-test \
  run.beir-multifield.trec-covid.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-trec-covid-test \
  run.beir-multifield.trec-covid.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-trec-covid-splade_distil_cocodenser_medium \
  --topics beir-v1.0.0-trec-covid-test-splade_distil_cocodenser_medium \
  --output run.beir-splade-distil-cocodenser-medium.trec-covid.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-trec-covid-test \
  run.beir-splade-distil-cocodenser-medium.trec-covid.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-trec-covid-test \
  run.beir-splade-distil-cocodenser-medium.trec-covid.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-trec-covid-test \
  run.beir-splade-distil-cocodenser-medium.trec-covid.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-bioasq-flat \
  --topics beir-v1.0.0-bioasq-test \
  --output run.beir-flat.bioasq.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-bioasq-test \
  run.beir-flat.bioasq.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-bioasq-test \
  run.beir-flat.bioasq.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-bioasq-test \
  run.beir-flat.bioasq.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-bioasq-multifield \
  --topics beir-v1.0.0-bioasq-test \
  --output run.beir-multifield.bioasq.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-bioasq-test \
  run.beir-multifield.bioasq.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-bioasq-test \
  run.beir-multifield.bioasq.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-bioasq-test \
  run.beir-multifield.bioasq.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-bioasq-splade_distil_cocodenser_medium \
  --topics beir-v1.0.0-bioasq-test-splade_distil_cocodenser_medium \
  --output run.beir-splade-distil-cocodenser-medium.bioasq.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-bioasq-test \
  run.beir-splade-distil-cocodenser-medium.bioasq.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-bioasq-test \
  run.beir-splade-distil-cocodenser-medium.bioasq.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-bioasq-test \
  run.beir-splade-distil-cocodenser-medium.bioasq.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-nfcorpus-flat \
  --topics beir-v1.0.0-nfcorpus-test \
  --output run.beir-flat.nfcorpus.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-nfcorpus-test \
  run.beir-flat.nfcorpus.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-nfcorpus-test \
  run.beir-flat.nfcorpus.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-nfcorpus-test \
  run.beir-flat.nfcorpus.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-nfcorpus-multifield \
  --topics beir-v1.0.0-nfcorpus-test \
  --output run.beir-multifield.nfcorpus.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-nfcorpus-test \
  run.beir-multifield.nfcorpus.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-nfcorpus-test \
  run.beir-multifield.nfcorpus.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-nfcorpus-test \
  run.beir-multifield.nfcorpus.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-nfcorpus-splade_distil_cocodenser_medium \
  --topics beir-v1.0.0-nfcorpus-test-splade_distil_cocodenser_medium \
  --output run.beir-splade-distil-cocodenser-medium.nfcorpus.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-nfcorpus-test \
  run.beir-splade-distil-cocodenser-medium.nfcorpus.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-nfcorpus-test \
  run.beir-splade-distil-cocodenser-medium.nfcorpus.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-nfcorpus-test \
  run.beir-splade-distil-cocodenser-medium.nfcorpus.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-nq-flat \
  --topics beir-v1.0.0-nq-test \
  --output run.beir-flat.nq.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-nq-test \
  run.beir-flat.nq.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-nq-test \
  run.beir-flat.nq.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-nq-test \
  run.beir-flat.nq.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-nq-multifield \
  --topics beir-v1.0.0-nq-test \
  --output run.beir-multifield.nq.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-nq-test \
  run.beir-multifield.nq.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-nq-test \
  run.beir-multifield.nq.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-nq-test \
  run.beir-multifield.nq.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-nq-splade_distil_cocodenser_medium \
  --topics beir-v1.0.0-nq-test-splade_distil_cocodenser_medium \
  --output run.beir-splade-distil-cocodenser-medium.nq.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-nq-test \
  run.beir-splade-distil-cocodenser-medium.nq.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-nq-test \
  run.beir-splade-distil-cocodenser-medium.nq.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-nq-test \
  run.beir-splade-distil-cocodenser-medium.nq.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-hotpotqa-flat \
  --topics beir-v1.0.0-hotpotqa-test \
  --output run.beir-flat.hotpotqa.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-hotpotqa-test \
  run.beir-flat.hotpotqa.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-hotpotqa-test \
  run.beir-flat.hotpotqa.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-hotpotqa-test \
  run.beir-flat.hotpotqa.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-hotpotqa-multifield \
  --topics beir-v1.0.0-hotpotqa-test \
  --output run.beir-multifield.hotpotqa.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-hotpotqa-test \
  run.beir-multifield.hotpotqa.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-hotpotqa-test \
  run.beir-multifield.hotpotqa.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-hotpotqa-test \
  run.beir-multifield.hotpotqa.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-hotpotqa-splade_distil_cocodenser_medium \
  --topics beir-v1.0.0-hotpotqa-test-splade_distil_cocodenser_medium \
  --output run.beir-splade-distil-cocodenser-medium.hotpotqa.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-hotpotqa-test \
  run.beir-splade-distil-cocodenser-medium.hotpotqa.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-hotpotqa-test \
  run.beir-splade-distil-cocodenser-medium.hotpotqa.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-hotpotqa-test \
  run.beir-splade-distil-cocodenser-medium.hotpotqa.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-fiqa-flat \
  --topics beir-v1.0.0-fiqa-test \
  --output run.beir-flat.fiqa.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-fiqa-test \
  run.beir-flat.fiqa.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-fiqa-test \
  run.beir-flat.fiqa.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-fiqa-test \
  run.beir-flat.fiqa.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-fiqa-multifield \
  --topics beir-v1.0.0-fiqa-test \
  --output run.beir-multifield.fiqa.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-fiqa-test \
  run.beir-multifield.fiqa.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-fiqa-test \
  run.beir-multifield.fiqa.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-fiqa-test \
  run.beir-multifield.fiqa.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-fiqa-splade_distil_cocodenser_medium \
  --topics beir-v1.0.0-fiqa-test-splade_distil_cocodenser_medium \
  --output run.beir-splade-distil-cocodenser-medium.fiqa.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-fiqa-test \
  run.beir-splade-distil-cocodenser-medium.fiqa.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-fiqa-test \
  run.beir-splade-distil-cocodenser-medium.fiqa.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-fiqa-test \
  run.beir-splade-distil-cocodenser-medium.fiqa.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-signal1m-flat \
  --topics beir-v1.0.0-signal1m-test \
  --output run.beir-flat.signal1m.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-signal1m-test \
  run.beir-flat.signal1m.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-signal1m-test \
  run.beir-flat.signal1m.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-signal1m-test \
  run.beir-flat.signal1m.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-signal1m-multifield \
  --topics beir-v1.0.0-signal1m-test \
  --output run.beir-multifield.signal1m.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-signal1m-test \
  run.beir-multifield.signal1m.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-signal1m-test \
  run.beir-multifield.signal1m.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-signal1m-test \
  run.beir-multifield.signal1m.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-signal1m-splade_distil_cocodenser_medium \
  --topics beir-v1.0.0-signal1m-test-splade_distil_cocodenser_medium \
  --output run.beir-splade-distil-cocodenser-medium.signal1m.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-signal1m-test \
  run.beir-splade-distil-cocodenser-medium.signal1m.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-signal1m-test \
  run.beir-splade-distil-cocodenser-medium.signal1m.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-signal1m-test \
  run.beir-splade-distil-cocodenser-medium.signal1m.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-trec-news-flat \
  --topics beir-v1.0.0-trec-news-test \
  --output run.beir-flat.trec-news.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-trec-news-test \
  run.beir-flat.trec-news.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-trec-news-test \
  run.beir-flat.trec-news.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-trec-news-test \
  run.beir-flat.trec-news.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-trec-news-multifield \
  --topics beir-v1.0.0-trec-news-test \
  --output run.beir-multifield.trec-news.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-trec-news-test \
  run.beir-multifield.trec-news.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-trec-news-test \
  run.beir-multifield.trec-news.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-trec-news-test \
  run.beir-multifield.trec-news.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-trec-news-splade_distil_cocodenser_medium \
  --topics beir-v1.0.0-trec-news-test-splade_distil_cocodenser_medium \
  --output run.beir-splade-distil-cocodenser-medium.trec-news.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-trec-news-test \
  run.beir-splade-distil-cocodenser-medium.trec-news.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-trec-news-test \
  run.beir-splade-distil-cocodenser-medium.trec-news.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-trec-news-test \
  run.beir-splade-distil-cocodenser-medium.trec-news.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-robust04-flat \
  --topics beir-v1.0.0-robust04-test \
  --output run.beir-flat.robust04.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-robust04-test \
  run.beir-flat.robust04.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-robust04-test \
  run.beir-flat.robust04.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-robust04-test \
  run.beir-flat.robust04.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-robust04-multifield \
  --topics beir-v1.0.0-robust04-test \
  --output run.beir-multifield.robust04.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-robust04-test \
  run.beir-multifield.robust04.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-robust04-test \
  run.beir-multifield.robust04.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-robust04-test \
  run.beir-multifield.robust04.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-robust04-splade_distil_cocodenser_medium \
  --topics beir-v1.0.0-robust04-test-splade_distil_cocodenser_medium \
  --output run.beir-splade-distil-cocodenser-medium.robust04.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-robust04-test \
  run.beir-splade-distil-cocodenser-medium.robust04.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-robust04-test \
  run.beir-splade-distil-cocodenser-medium.robust04.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-robust04-test \
  run.beir-splade-distil-cocodenser-medium.robust04.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-arguana-flat \
  --topics beir-v1.0.0-arguana-test \
  --output run.beir-flat.arguana.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-arguana-test \
  run.beir-flat.arguana.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-arguana-test \
  run.beir-flat.arguana.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-arguana-test \
  run.beir-flat.arguana.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-arguana-multifield \
  --topics beir-v1.0.0-arguana-test \
  --output run.beir-multifield.arguana.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-arguana-test \
  run.beir-multifield.arguana.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-arguana-test \
  run.beir-multifield.arguana.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-arguana-test \
  run.beir-multifield.arguana.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-arguana-splade_distil_cocodenser_medium \
  --topics beir-v1.0.0-arguana-test-splade_distil_cocodenser_medium \
  --output run.beir-splade-distil-cocodenser-medium.arguana.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-arguana-test \
  run.beir-splade-distil-cocodenser-medium.arguana.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-arguana-test \
  run.beir-splade-distil-cocodenser-medium.arguana.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-arguana-test \
  run.beir-splade-distil-cocodenser-medium.arguana.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-webis-touche2020-flat \
  --topics beir-v1.0.0-webis-touche2020-test \
  --output run.beir-flat.webis-touche2020.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-webis-touche2020-test \
  run.beir-flat.webis-touche2020.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-webis-touche2020-test \
  run.beir-flat.webis-touche2020.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-webis-touche2020-test \
  run.beir-flat.webis-touche2020.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-webis-touche2020-multifield \
  --topics beir-v1.0.0-webis-touche2020-test \
  --output run.beir-multifield.webis-touche2020.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-webis-touche2020-test \
  run.beir-multifield.webis-touche2020.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-webis-touche2020-test \
  run.beir-multifield.webis-touche2020.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-webis-touche2020-test \
  run.beir-multifield.webis-touche2020.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-webis-touche2020-splade_distil_cocodenser_medium \
  --topics beir-v1.0.0-webis-touche2020-test-splade_distil_cocodenser_medium \
  --output run.beir-splade-distil-cocodenser-medium.webis-touche2020.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-webis-touche2020-test \
  run.beir-splade-distil-cocodenser-medium.webis-touche2020.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-webis-touche2020-test \
  run.beir-splade-distil-cocodenser-medium.webis-touche2020.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-webis-touche2020-test \
  run.beir-splade-distil-cocodenser-medium.webis-touche2020.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-android-flat \
  --topics beir-v1.0.0-cqadupstack-android-test \
  --output run.beir-flat.cqadupstack-android.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-android-test \
  run.beir-flat.cqadupstack-android.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-android-test \
  run.beir-flat.cqadupstack-android.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-android-test \
  run.beir-flat.cqadupstack-android.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-android-multifield \
  --topics beir-v1.0.0-cqadupstack-android-test \
  --output run.beir-multifield.cqadupstack-android.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-android-test \
  run.beir-multifield.cqadupstack-android.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-android-test \
  run.beir-multifield.cqadupstack-android.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-android-test \
  run.beir-multifield.cqadupstack-android.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-android-splade_distil_cocodenser_medium \
  --topics beir-v1.0.0-cqadupstack-android-test-splade_distil_cocodenser_medium \
  --output run.beir-splade-distil-cocodenser-medium.cqadupstack-android.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-android-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-android.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-android-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-android.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-android-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-android.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-english-flat \
  --topics beir-v1.0.0-cqadupstack-english-test \
  --output run.beir-flat.cqadupstack-english.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-english-test \
  run.beir-flat.cqadupstack-english.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-english-test \
  run.beir-flat.cqadupstack-english.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-english-test \
  run.beir-flat.cqadupstack-english.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-english-multifield \
  --topics beir-v1.0.0-cqadupstack-english-test \
  --output run.beir-multifield.cqadupstack-english.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-english-test \
  run.beir-multifield.cqadupstack-english.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-english-test \
  run.beir-multifield.cqadupstack-english.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-english-test \
  run.beir-multifield.cqadupstack-english.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-english-splade_distil_cocodenser_medium \
  --topics beir-v1.0.0-cqadupstack-english-test-splade_distil_cocodenser_medium \
  --output run.beir-splade-distil-cocodenser-medium.cqadupstack-english.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-english-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-english.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-english-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-english.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-english-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-english.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-gaming-flat \
  --topics beir-v1.0.0-cqadupstack-gaming-test \
  --output run.beir-flat.cqadupstack-gaming.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gaming-test \
  run.beir-flat.cqadupstack-gaming.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-gaming-test \
  run.beir-flat.cqadupstack-gaming.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-gaming-test \
  run.beir-flat.cqadupstack-gaming.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-gaming-multifield \
  --topics beir-v1.0.0-cqadupstack-gaming-test \
  --output run.beir-multifield.cqadupstack-gaming.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gaming-test \
  run.beir-multifield.cqadupstack-gaming.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-gaming-test \
  run.beir-multifield.cqadupstack-gaming.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-gaming-test \
  run.beir-multifield.cqadupstack-gaming.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-gaming-splade_distil_cocodenser_medium \
  --topics beir-v1.0.0-cqadupstack-gaming-test-splade_distil_cocodenser_medium \
  --output run.beir-splade-distil-cocodenser-medium.cqadupstack-gaming.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gaming-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-gaming.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-gaming-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-gaming.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-gaming-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-gaming.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-gis-flat \
  --topics beir-v1.0.0-cqadupstack-gis-test \
  --output run.beir-flat.cqadupstack-gis.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gis-test \
  run.beir-flat.cqadupstack-gis.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-gis-test \
  run.beir-flat.cqadupstack-gis.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-gis-test \
  run.beir-flat.cqadupstack-gis.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-gis-multifield \
  --topics beir-v1.0.0-cqadupstack-gis-test \
  --output run.beir-multifield.cqadupstack-gis.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gis-test \
  run.beir-multifield.cqadupstack-gis.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-gis-test \
  run.beir-multifield.cqadupstack-gis.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-gis-test \
  run.beir-multifield.cqadupstack-gis.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-gis-splade_distil_cocodenser_medium \
  --topics beir-v1.0.0-cqadupstack-gis-test-splade_distil_cocodenser_medium \
  --output run.beir-splade-distil-cocodenser-medium.cqadupstack-gis.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gis-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-gis.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-gis-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-gis.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-gis-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-gis.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-mathematica-flat \
  --topics beir-v1.0.0-cqadupstack-mathematica-test \
  --output run.beir-flat.cqadupstack-mathematica.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-mathematica-test \
  run.beir-flat.cqadupstack-mathematica.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-mathematica-test \
  run.beir-flat.cqadupstack-mathematica.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-mathematica-test \
  run.beir-flat.cqadupstack-mathematica.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-mathematica-multifield \
  --topics beir-v1.0.0-cqadupstack-mathematica-test \
  --output run.beir-multifield.cqadupstack-mathematica.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-mathematica-test \
  run.beir-multifield.cqadupstack-mathematica.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-mathematica-test \
  run.beir-multifield.cqadupstack-mathematica.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-mathematica-test \
  run.beir-multifield.cqadupstack-mathematica.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-mathematica-splade_distil_cocodenser_medium \
  --topics beir-v1.0.0-cqadupstack-mathematica-test-splade_distil_cocodenser_medium \
  --output run.beir-splade-distil-cocodenser-medium.cqadupstack-mathematica.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-mathematica-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-mathematica.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-mathematica-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-mathematica.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-mathematica-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-mathematica.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-physics-flat \
  --topics beir-v1.0.0-cqadupstack-physics-test \
  --output run.beir-flat.cqadupstack-physics.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-physics-test \
  run.beir-flat.cqadupstack-physics.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-physics-test \
  run.beir-flat.cqadupstack-physics.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-physics-test \
  run.beir-flat.cqadupstack-physics.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-physics-multifield \
  --topics beir-v1.0.0-cqadupstack-physics-test \
  --output run.beir-multifield.cqadupstack-physics.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-physics-test \
  run.beir-multifield.cqadupstack-physics.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-physics-test \
  run.beir-multifield.cqadupstack-physics.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-physics-test \
  run.beir-multifield.cqadupstack-physics.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-physics-splade_distil_cocodenser_medium \
  --topics beir-v1.0.0-cqadupstack-physics-test-splade_distil_cocodenser_medium \
  --output run.beir-splade-distil-cocodenser-medium.cqadupstack-physics.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-physics-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-physics.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-physics-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-physics.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-physics-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-physics.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-programmers-flat \
  --topics beir-v1.0.0-cqadupstack-programmers-test \
  --output run.beir-flat.cqadupstack-programmers.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-programmers-test \
  run.beir-flat.cqadupstack-programmers.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-programmers-test \
  run.beir-flat.cqadupstack-programmers.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-programmers-test \
  run.beir-flat.cqadupstack-programmers.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-programmers-multifield \
  --topics beir-v1.0.0-cqadupstack-programmers-test \
  --output run.beir-multifield.cqadupstack-programmers.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-programmers-test \
  run.beir-multifield.cqadupstack-programmers.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-programmers-test \
  run.beir-multifield.cqadupstack-programmers.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-programmers-test \
  run.beir-multifield.cqadupstack-programmers.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-programmers-splade_distil_cocodenser_medium \
  --topics beir-v1.0.0-cqadupstack-programmers-test-splade_distil_cocodenser_medium \
  --output run.beir-splade-distil-cocodenser-medium.cqadupstack-programmers.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-programmers-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-programmers.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-programmers-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-programmers.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-programmers-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-programmers.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-stats-flat \
  --topics beir-v1.0.0-cqadupstack-stats-test \
  --output run.beir-flat.cqadupstack-stats.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-stats-test \
  run.beir-flat.cqadupstack-stats.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-stats-test \
  run.beir-flat.cqadupstack-stats.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-stats-test \
  run.beir-flat.cqadupstack-stats.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-stats-multifield \
  --topics beir-v1.0.0-cqadupstack-stats-test \
  --output run.beir-multifield.cqadupstack-stats.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-stats-test \
  run.beir-multifield.cqadupstack-stats.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-stats-test \
  run.beir-multifield.cqadupstack-stats.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-stats-test \
  run.beir-multifield.cqadupstack-stats.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-stats-splade_distil_cocodenser_medium \
  --topics beir-v1.0.0-cqadupstack-stats-test-splade_distil_cocodenser_medium \
  --output run.beir-splade-distil-cocodenser-medium.cqadupstack-stats.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-stats-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-stats.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-stats-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-stats.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-stats-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-stats.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-tex-flat \
  --topics beir-v1.0.0-cqadupstack-tex-test \
  --output run.beir-flat.cqadupstack-tex.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-tex-test \
  run.beir-flat.cqadupstack-tex.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-tex-test \
  run.beir-flat.cqadupstack-tex.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-tex-test \
  run.beir-flat.cqadupstack-tex.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-tex-multifield \
  --topics beir-v1.0.0-cqadupstack-tex-test \
  --output run.beir-multifield.cqadupstack-tex.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-tex-test \
  run.beir-multifield.cqadupstack-tex.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-tex-test \
  run.beir-multifield.cqadupstack-tex.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-tex-test \
  run.beir-multifield.cqadupstack-tex.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-tex-splade_distil_cocodenser_medium \
  --topics beir-v1.0.0-cqadupstack-tex-test-splade_distil_cocodenser_medium \
  --output run.beir-splade-distil-cocodenser-medium.cqadupstack-tex.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-tex-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-tex.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-tex-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-tex.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-tex-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-tex.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-unix-flat \
  --topics beir-v1.0.0-cqadupstack-unix-test \
  --output run.beir-flat.cqadupstack-unix.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-unix-test \
  run.beir-flat.cqadupstack-unix.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-unix-test \
  run.beir-flat.cqadupstack-unix.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-unix-test \
  run.beir-flat.cqadupstack-unix.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-unix-multifield \
  --topics beir-v1.0.0-cqadupstack-unix-test \
  --output run.beir-multifield.cqadupstack-unix.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-unix-test \
  run.beir-multifield.cqadupstack-unix.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-unix-test \
  run.beir-multifield.cqadupstack-unix.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-unix-test \
  run.beir-multifield.cqadupstack-unix.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-unix-splade_distil_cocodenser_medium \
  --topics beir-v1.0.0-cqadupstack-unix-test-splade_distil_cocodenser_medium \
  --output run.beir-splade-distil-cocodenser-medium.cqadupstack-unix.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-unix-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-unix.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-unix-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-unix.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-unix-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-unix.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-webmasters-flat \
  --topics beir-v1.0.0-cqadupstack-webmasters-test \
  --output run.beir-flat.cqadupstack-webmasters.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-webmasters-test \
  run.beir-flat.cqadupstack-webmasters.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-webmasters-test \
  run.beir-flat.cqadupstack-webmasters.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-webmasters-test \
  run.beir-flat.cqadupstack-webmasters.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-webmasters-multifield \
  --topics beir-v1.0.0-cqadupstack-webmasters-test \
  --output run.beir-multifield.cqadupstack-webmasters.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-webmasters-test \
  run.beir-multifield.cqadupstack-webmasters.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-webmasters-test \
  run.beir-multifield.cqadupstack-webmasters.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-webmasters-test \
  run.beir-multifield.cqadupstack-webmasters.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-webmasters-splade_distil_cocodenser_medium \
  --topics beir-v1.0.0-cqadupstack-webmasters-test-splade_distil_cocodenser_medium \
  --output run.beir-splade-distil-cocodenser-medium.cqadupstack-webmasters.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-webmasters-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-webmasters.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-webmasters-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-webmasters.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-webmasters-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-webmasters.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-wordpress-flat \
  --topics beir-v1.0.0-cqadupstack-wordpress-test \
  --output run.beir-flat.cqadupstack-wordpress.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-wordpress-test \
  run.beir-flat.cqadupstack-wordpress.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-wordpress-test \
  run.beir-flat.cqadupstack-wordpress.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-wordpress-test \
  run.beir-flat.cqadupstack-wordpress.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-wordpress-multifield \
  --topics beir-v1.0.0-cqadupstack-wordpress-test \
  --output run.beir-multifield.cqadupstack-wordpress.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-wordpress-test \
  run.beir-multifield.cqadupstack-wordpress.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-wordpress-test \
  run.beir-multifield.cqadupstack-wordpress.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-wordpress-test \
  run.beir-multifield.cqadupstack-wordpress.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-cqadupstack-wordpress-splade_distil_cocodenser_medium \
  --topics beir-v1.0.0-cqadupstack-wordpress-test-splade_distil_cocodenser_medium \
  --output run.beir-splade-distil-cocodenser-medium.cqadupstack-wordpress.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-wordpress-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-wordpress.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-wordpress-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-wordpress.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-wordpress-test \
  run.beir-splade-distil-cocodenser-medium.cqadupstack-wordpress.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-quora-flat \
  --topics beir-v1.0.0-quora-test \
  --output run.beir-flat.quora.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-quora-test \
  run.beir-flat.quora.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-quora-test \
  run.beir-flat.quora.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-quora-test \
  run.beir-flat.quora.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-quora-multifield \
  --topics beir-v1.0.0-quora-test \
  --output run.beir-multifield.quora.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-quora-test \
  run.beir-multifield.quora.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-quora-test \
  run.beir-multifield.quora.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-quora-test \
  run.beir-multifield.quora.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-quora-splade_distil_cocodenser_medium \
  --topics beir-v1.0.0-quora-test-splade_distil_cocodenser_medium \
  --output run.beir-splade-distil-cocodenser-medium.quora.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-quora-test \
  run.beir-splade-distil-cocodenser-medium.quora.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-quora-test \
  run.beir-splade-distil-cocodenser-medium.quora.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-quora-test \
  run.beir-splade-distil-cocodenser-medium.quora.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-dbpedia-entity-flat \
  --topics beir-v1.0.0-dbpedia-entity-test \
  --output run.beir-flat.dbpedia-entity.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-dbpedia-entity-test \
  run.beir-flat.dbpedia-entity.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-dbpedia-entity-test \
  run.beir-flat.dbpedia-entity.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-dbpedia-entity-test \
  run.beir-flat.dbpedia-entity.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-dbpedia-entity-multifield \
  --topics beir-v1.0.0-dbpedia-entity-test \
  --output run.beir-multifield.dbpedia-entity.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-dbpedia-entity-test \
  run.beir-multifield.dbpedia-entity.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-dbpedia-entity-test \
  run.beir-multifield.dbpedia-entity.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-dbpedia-entity-test \
  run.beir-multifield.dbpedia-entity.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-dbpedia-entity-splade_distil_cocodenser_medium \
  --topics beir-v1.0.0-dbpedia-entity-test-splade_distil_cocodenser_medium \
  --output run.beir-splade-distil-cocodenser-medium.dbpedia-entity.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-dbpedia-entity-test \
  run.beir-splade-distil-cocodenser-medium.dbpedia-entity.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-dbpedia-entity-test \
  run.beir-splade-distil-cocodenser-medium.dbpedia-entity.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-dbpedia-entity-test \
  run.beir-splade-distil-cocodenser-medium.dbpedia-entity.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-scidocs-flat \
  --topics beir-v1.0.0-scidocs-test \
  --output run.beir-flat.scidocs.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-scidocs-test \
  run.beir-flat.scidocs.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-scidocs-test \
  run.beir-flat.scidocs.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-scidocs-test \
  run.beir-flat.scidocs.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-scidocs-multifield \
  --topics beir-v1.0.0-scidocs-test \
  --output run.beir-multifield.scidocs.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-scidocs-test \
  run.beir-multifield.scidocs.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-scidocs-test \
  run.beir-multifield.scidocs.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-scidocs-test \
  run.beir-multifield.scidocs.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-scidocs-splade_distil_cocodenser_medium \
  --topics beir-v1.0.0-scidocs-test-splade_distil_cocodenser_medium \
  --output run.beir-splade-distil-cocodenser-medium.scidocs.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-scidocs-test \
  run.beir-splade-distil-cocodenser-medium.scidocs.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-scidocs-test \
  run.beir-splade-distil-cocodenser-medium.scidocs.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-scidocs-test \
  run.beir-splade-distil-cocodenser-medium.scidocs.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-fever-flat \
  --topics beir-v1.0.0-fever-test \
  --output run.beir-flat.fever.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-fever-test \
  run.beir-flat.fever.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-fever-test \
  run.beir-flat.fever.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-fever-test \
  run.beir-flat.fever.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-fever-multifield \
  --topics beir-v1.0.0-fever-test \
  --output run.beir-multifield.fever.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-fever-test \
  run.beir-multifield.fever.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-fever-test \
  run.beir-multifield.fever.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-fever-test \
  run.beir-multifield.fever.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-fever-splade_distil_cocodenser_medium \
  --topics beir-v1.0.0-fever-test-splade_distil_cocodenser_medium \
  --output run.beir-splade-distil-cocodenser-medium.fever.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-fever-test \
  run.beir-splade-distil-cocodenser-medium.fever.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-fever-test \
  run.beir-splade-distil-cocodenser-medium.fever.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-fever-test \
  run.beir-splade-distil-cocodenser-medium.fever.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-climate-fever-flat \
  --topics beir-v1.0.0-climate-fever-test \
  --output run.beir-flat.climate-fever.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-climate-fever-test \
  run.beir-flat.climate-fever.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-climate-fever-test \
  run.beir-flat.climate-fever.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-climate-fever-test \
  run.beir-flat.climate-fever.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-climate-fever-multifield \
  --topics beir-v1.0.0-climate-fever-test \
  --output run.beir-multifield.climate-fever.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-climate-fever-test \
  run.beir-multifield.climate-fever.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-climate-fever-test \
  run.beir-multifield.climate-fever.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-climate-fever-test \
  run.beir-multifield.climate-fever.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-climate-fever-splade_distil_cocodenser_medium \
  --topics beir-v1.0.0-climate-fever-test-splade_distil_cocodenser_medium \
  --output run.beir-splade-distil-cocodenser-medium.climate-fever.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-climate-fever-test \
  run.beir-splade-distil-cocodenser-medium.climate-fever.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-climate-fever-test \
  run.beir-splade-distil-cocodenser-medium.climate-fever.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-climate-fever-test \
  run.beir-splade-distil-cocodenser-medium.climate-fever.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-scifact-flat \
  --topics beir-v1.0.0-scifact-test \
  --output run.beir-flat.scifact.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-scifact-test \
  run.beir-flat.scifact.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-scifact-test \
  run.beir-flat.scifact.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-scifact-test \
  run.beir-flat.scifact.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-scifact-multifield \
  --topics beir-v1.0.0-scifact-test \
  --output run.beir-multifield.scifact.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-scifact-test \
  run.beir-multifield.scifact.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-scifact-test \
  run.beir-multifield.scifact.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-scifact-test \
  run.beir-multifield.scifact.txt
Command to generate run:
python -m pyserini.search.lucene \
  --index beir-v1.0.0-scifact-splade_distil_cocodenser_medium \
  --topics beir-v1.0.0-scifact-test-splade_distil_cocodenser_medium \
  --output run.beir-splade-distil-cocodenser-medium.scifact.txt \
  --output-format trec \
  --batch 36 --threads 12 \
  --hits 1000 --impact --remove-query
Evaluation commands:
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-scifact-test \
  run.beir-splade-distil-cocodenser-medium.scifact.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-scifact-test \
  run.beir-splade-distil-cocodenser-medium.scifact.txt

python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-scifact-test \
  run.beir-splade-distil-cocodenser-medium.scifact.txt