Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

LUCENE-10236: Update field-weight used in CombinedFieldQuery scoring calculation (8.11.2 Backporting) #2637

Open
wants to merge 2 commits into
base: branch_8_11
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
4 changes: 3 additions & 1 deletion lucene/CHANGES.txt
Expand Up @@ -7,7 +7,9 @@ http://s.apache.org/luceneversions

Bug Fixes
---------------------
(No changes)

* LUCENE-10236: Stop duplicating norms when scoring in CombinedFieldQuery.
(Zach Chen, Jim Ferenczi, Julie Tibshirani)

======================= Lucene 8.11.1 =======================

Expand Down
Expand Up @@ -409,7 +409,7 @@ public Scorer scorer(LeafReaderContext context) throws IOException {
}

MultiNormsLeafSimScorer scoringSimScorer =
new MultiNormsLeafSimScorer(simWeight, context.reader(), fields, true);
new MultiNormsLeafSimScorer(simWeight, context.reader(), fieldAndWeights.values(), true);
LeafSimScorer nonScoringSimScorer =
new LeafSimScorer(simWeight, context.reader(), "pseudo_field", false);
// we use termscorers + disjunction as an impl detail
Expand Down
Expand Up @@ -21,8 +21,10 @@
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Objects;
import java.util.Set;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.search.similarities.Similarity.SimScorer;
Expand Down Expand Up @@ -59,7 +61,13 @@ final class MultiNormsLeafSimScorer {
if (needsScores) {
final List<NumericDocValues> normsList = new ArrayList<>();
final List<Float> weightList = new ArrayList<>();
final Set<String> duplicateCheckingSet = new HashSet<>();
for (FieldAndWeight field : normFields) {
assert duplicateCheckingSet.add(field.field)
: "There is a duplicated field ["
+ field.field
+ "] used to construct MultiNormsLeafSimScorer";

NumericDocValues norms = reader.getNormValues(field.field);
if (norms != null) {
normsList.add(norms);
Expand Down
Expand Up @@ -16,6 +16,10 @@
*/
package org.apache.lucene.search;

import static com.carrotsearch.randomizedtesting.RandomizedTest.atMost;
import static com.carrotsearch.randomizedtesting.RandomizedTest.randomBoolean;
import static com.carrotsearch.randomizedtesting.RandomizedTest.randomIntBetween;

import com.carrotsearch.randomizedtesting.generators.RandomPicks;
import java.io.IOException;
import java.util.Arrays;
Expand Down Expand Up @@ -154,6 +158,80 @@ public void testSameScore() throws IOException {
dir.close();
}

public void testScoringWithMultipleFieldTermsMatch() throws IOException {
int numMatchDoc = randomIntBetween(100, 500);
int numHits = atMost(100);
int boost1 = Math.max(1, random().nextInt(5));
int boost2 = Math.max(1, random().nextInt(5));

Directory dir = newDirectory();
Similarity similarity = randomCompatibleSimilarity();

IndexWriterConfig iwc = new IndexWriterConfig();
iwc.setSimilarity(similarity);
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);

// adding potentially matching doc
for (int i = 0; i < numMatchDoc; i++) {
Document doc = new Document();

int freqA = random().nextInt(20) + 1;
for (int j = 0; j < freqA; j++) {
doc.add(new TextField("a", "foo", Store.NO));
}

freqA = random().nextInt(20) + 1;
if (randomBoolean()) {
for (int j = 0; j < freqA; j++) {
doc.add(new TextField("a", "foo" + j, Store.NO));
}
}

freqA = random().nextInt(20) + 1;
for (int j = 0; j < freqA; j++) {
doc.add(new TextField("a", "zoo", Store.NO));
}

int freqB = random().nextInt(20) + 1;
for (int j = 0; j < freqB; j++) {
doc.add(new TextField("b", "zoo", Store.NO));
}

freqB = random().nextInt(20) + 1;
if (randomBoolean()) {
for (int j = 0; j < freqB; j++) {
doc.add(new TextField("b", "zoo" + j, Store.NO));
}
}

int freqC = random().nextInt(20) + 1;
for (int j = 0; j < freqC; j++) {
doc.add(new TextField("c", "bla" + j, Store.NO));
}
w.addDocument(doc);
}

IndexReader reader = w.getReader();
IndexSearcher searcher = newSearcher(reader);
searcher.setSimilarity(similarity);

CombinedFieldQuery query =
new CombinedFieldQuery.Builder()
.addField("a", (float) boost1)
.addField("b", (float) boost2)
.addTerm(new BytesRef("foo"))
.addTerm(new BytesRef("zoo"))
.build();

TopScoreDocCollector completeCollector =
TopScoreDocCollector.create(numHits, null, Integer.MAX_VALUE);
searcher.search(query, completeCollector);

reader.close();
w.close();
dir.close();
}

public void testNormsDisabled() throws IOException {
Directory dir = newDirectory();
Similarity similarity = randomCompatibleSimilarity();
Expand Down