Skip to content

Commit

Permalink
Fix merging component templates with a mix of dotted and nested objec…
Browse files Browse the repository at this point in the history
…t mapper definitions (elastic#106077)

Co-authored-by: Andrei Dan <andrei.dan@elastic.co>
(cherry picked from commit ab52ef1)
  • Loading branch information
felixbarny committed Apr 9, 2024
1 parent f4218f5 commit ea535da
Show file tree
Hide file tree
Showing 23 changed files with 292 additions and 115 deletions.
7 changes: 7 additions & 0 deletions docs/changelog/106077.yaml
@@ -0,0 +1,7 @@
pr: 106077
summary: Fix merging component templates with a mix of dotted and nested object mapper
definitions
area: Mapping
type: bug
issues:
- 105482
Expand Up @@ -49,7 +49,11 @@ public void testBasics() throws Exception {
.endObject()
);

Mapping parsedMapping = createMapperService(mapping).parseMapping("type", new CompressedXContent(mapping));
Mapping parsedMapping = createMapperService(mapping).parseMapping(
"type",
MapperService.MergeReason.MAPPING_UPDATE,
new CompressedXContent(mapping)
);
assertEquals(mapping, parsedMapping.toCompressedXContent().toString());
assertNotNull(parsedMapping.getMetadataMapperByClass(RankFeatureMetaFieldMapper.class));
}
Expand Down
Expand Up @@ -52,6 +52,7 @@
import org.elasticsearch.index.mapper.LuceneDocument;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.MapperService.MergeReason;
import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.mapper.SourceToParse;
import org.elasticsearch.index.mapper.TestDocumentParserContext;
Expand Down Expand Up @@ -206,7 +207,7 @@ public void init() throws Exception {
.endObject()
.endObject()
);
mapperService.merge("doc", new CompressedXContent(mapper), MapperService.MergeReason.MAPPING_UPDATE);
mapperService.merge("doc", new CompressedXContent(mapper), MergeReason.MAPPING_UPDATE);
}

private void addQueryFieldMappings() throws Exception {
Expand All @@ -223,7 +224,7 @@ private void addQueryFieldMappings() throws Exception {
.endObject()
.endObject()
);
mapperService.merge("doc", new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE);
mapperService.merge("doc", new CompressedXContent(percolatorMapper), MergeReason.MAPPING_UPDATE);
fieldType = (PercolatorFieldMapper.PercolatorFieldType) mapperService.fieldType(fieldName);
}

Expand Down Expand Up @@ -699,7 +700,7 @@ public void testAllowNoAdditionalSettings() throws Exception {
MapperParsingException e = expectThrows(
MapperParsingException.class,
() -> indexServiceWithoutSettings.mapperService()
.merge("doc", new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE)
.merge("doc", new CompressedXContent(percolatorMapper), MergeReason.MAPPING_UPDATE)
);
assertThat(e.getMessage(), containsString("Mapping definition for [" + fieldName + "] has unsupported parameters: [index : no]"));
}
Expand All @@ -722,7 +723,7 @@ public void testMultiplePercolatorFields() throws Exception {
.endObject()
.endObject()
);
mapperService.merge(typeName, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE);
mapperService.merge(typeName, new CompressedXContent(percolatorMapper), MergeReason.MAPPING_UPDATE);

QueryBuilder queryBuilder = matchQuery("field", "value");
ParsedDocument doc = mapperService.documentMapper()
Expand Down Expand Up @@ -763,7 +764,7 @@ public void testNestedPercolatorField() throws Exception {
.endObject()
.endObject()
);
mapperService.merge(typeName, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE);
mapperService.merge(typeName, new CompressedXContent(percolatorMapper), MergeReason.MAPPING_UPDATE);

QueryBuilder queryBuilder = matchQuery("field", "value");
ParsedDocument doc = mapperService.documentMapper()
Expand Down Expand Up @@ -912,7 +913,7 @@ public void testEmptyName() throws Exception {
);
MapperParsingException e = expectThrows(
MapperParsingException.class,
() -> mapperService.parseMapping("type1", new CompressedXContent(mapping))
() -> mapperService.parseMapping("type1", MergeReason.MAPPING_UPDATE, new CompressedXContent(mapping))
);
assertThat(e.getMessage(), containsString("field name cannot be an empty string"));
}
Expand Down
Expand Up @@ -133,6 +133,7 @@ private static ClusterState applyRequest(
final CompressedXContent mappingUpdateSource = request.source();
final Metadata metadata = currentState.metadata();
final List<IndexMetadata> updateList = new ArrayList<>();
MergeReason reason = request.autoUpdate() ? MergeReason.MAPPING_AUTO_UPDATE : MergeReason.MAPPING_UPDATE;
for (Index index : request.indices()) {
MapperService mapperService = indexMapperServices.get(index);
// IMPORTANT: always get the metadata from the state since it get's batched
Expand All @@ -147,13 +148,8 @@ private static ClusterState applyRequest(
updateList.add(indexMetadata);
// try and parse it (no need to add it here) so we can bail early in case of parsing exception
// first, simulate: just call merge and ignore the result
Mapping mapping = mapperService.parseMapping(MapperService.SINGLE_MAPPING_NAME, mappingUpdateSource);
MapperService.mergeMappings(
mapperService.documentMapper(),
mapping,
request.autoUpdate() ? MergeReason.MAPPING_AUTO_UPDATE : MergeReason.MAPPING_UPDATE,
mapperService.getIndexSettings()
);
Mapping mapping = mapperService.parseMapping(MapperService.SINGLE_MAPPING_NAME, reason, mappingUpdateSource);
MapperService.mergeMappings(mapperService.documentMapper(), mapping, reason, mapperService.getIndexSettings());
}
Metadata.Builder builder = Metadata.builder(metadata);
boolean updated = false;
Expand All @@ -169,11 +165,7 @@ private static ClusterState applyRequest(
if (existingMapper != null) {
existingSource = existingMapper.mappingSource();
}
DocumentMapper mergedMapper = mapperService.merge(
MapperService.SINGLE_MAPPING_NAME,
mappingUpdateSource,
request.autoUpdate() ? MergeReason.MAPPING_AUTO_UPDATE : MergeReason.MAPPING_UPDATE
);
DocumentMapper mergedMapper = mapperService.merge(MapperService.SINGLE_MAPPING_NAME, mappingUpdateSource, reason);
CompressedXContent updatedSource = mergedMapper.mappingSource();

if (existingSource != null) {
Expand Down
Expand Up @@ -14,6 +14,7 @@
import org.elasticsearch.common.time.DateFormatter;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.mapper.MapperService.MergeReason;
import org.elasticsearch.xcontent.FilterXContentParserWrapper;
import org.elasticsearch.xcontent.FlatteningXContentParser;
import org.elasticsearch.xcontent.XContentParser;
Expand Down Expand Up @@ -613,7 +614,7 @@ public final MapperBuilderContext createDynamicMapperBuilderContext() {
if (objectMapper instanceof PassThroughObjectMapper passThroughObjectMapper) {
containsDimensions = passThroughObjectMapper.containsDimensions();
}
return new MapperBuilderContext(p, mappingLookup().isSourceSynthetic(), false, containsDimensions);
return new MapperBuilderContext(p, mappingLookup().isSourceSynthetic(), false, containsDimensions, MergeReason.MAPPING_UPDATE);
}

public abstract XContentParser parser();
Expand Down
Expand Up @@ -9,6 +9,7 @@
package org.elasticsearch.index.mapper;

import org.elasticsearch.common.Strings;
import org.elasticsearch.index.mapper.MapperService.MergeReason;

/**
* Holds context for building Mapper objects from their Builders
Expand All @@ -19,23 +20,35 @@ public class MapperBuilderContext {
* The root context, to be used when building a tree of mappers
*/
public static MapperBuilderContext root(boolean isSourceSynthetic, boolean isDataStream) {
return new MapperBuilderContext(null, isSourceSynthetic, isDataStream, false);
return root(isSourceSynthetic, isDataStream, MergeReason.MAPPING_UPDATE);
}

public static MapperBuilderContext root(boolean isSourceSynthetic, boolean isDataStream, MergeReason mergeReason) {
return new MapperBuilderContext(null, isSourceSynthetic, isDataStream, false, mergeReason);
}

private final String path;
private final boolean isSourceSynthetic;
private final boolean isDataStream;
private final boolean parentObjectContainsDimensions;
private final MergeReason mergeReason;

MapperBuilderContext(String path) {
this(path, false, false, false);
this(path, false, false, false, MergeReason.MAPPING_UPDATE);
}

MapperBuilderContext(String path, boolean isSourceSynthetic, boolean isDataStream, boolean parentObjectContainsDimensions) {
MapperBuilderContext(
String path,
boolean isSourceSynthetic,
boolean isDataStream,
boolean parentObjectContainsDimensions,
MergeReason mergeReason
) {
this.path = path;
this.isSourceSynthetic = isSourceSynthetic;
this.isDataStream = isDataStream;
this.parentObjectContainsDimensions = parentObjectContainsDimensions;
this.mergeReason = mergeReason;
}

/**
Expand All @@ -44,7 +57,7 @@ public static MapperBuilderContext root(boolean isSourceSynthetic, boolean isDat
* @return a new MapperBuilderContext with this context as its parent
*/
public MapperBuilderContext createChildContext(String name) {
return new MapperBuilderContext(buildFullName(name), isSourceSynthetic, isDataStream, parentObjectContainsDimensions);
return new MapperBuilderContext(buildFullName(name), isSourceSynthetic, isDataStream, parentObjectContainsDimensions, mergeReason);
}

/**
Expand Down Expand Up @@ -78,4 +91,11 @@ public boolean parentObjectContainsDimensions() {
return parentObjectContainsDimensions;
}

/**
* The merge reason to use when merging mappers while building the mapper.
* See also {@link ObjectMapper.Builder#buildMappers(MapperBuilderContext)}.
*/
public MergeReason getMergeReason() {
return mergeReason;
}
}
Expand Up @@ -8,6 +8,8 @@

package org.elasticsearch.index.mapper;

import org.elasticsearch.index.mapper.MapperService.MergeReason;

/**
* Holds context used when merging mappings.
* As the merge process also involves building merged {@link Mapper.Builder}s,
Expand All @@ -23,11 +25,18 @@ private MapperMergeContext(MapperBuilderContext mapperBuilderContext, NewFieldsB
this.newFieldsBudget = newFieldsBudget;
}

static MapperMergeContext root(boolean isSourceSynthetic, boolean isDataStream, long newFieldsBudget) {
return root(isSourceSynthetic, isDataStream, MergeReason.MAPPING_UPDATE, newFieldsBudget);
}

/**
* The root context, to be used when merging a tree of mappers
*/
public static MapperMergeContext root(boolean isSourceSynthetic, boolean isDataStream, long newFieldsBudget) {
return new MapperMergeContext(MapperBuilderContext.root(isSourceSynthetic, isDataStream), NewFieldsBudget.of(newFieldsBudget));
public static MapperMergeContext root(boolean isSourceSynthetic, boolean isDataStream, MergeReason mergeReason, long newFieldsBudget) {
return new MapperMergeContext(
MapperBuilderContext.root(isSourceSynthetic, isDataStream, mergeReason),
NewFieldsBudget.of(newFieldsBudget)
);
}

/**
Expand Down
Expand Up @@ -310,7 +310,7 @@ public void updateMapping(final IndexMetadata currentIndexMetadata, final IndexM
if (newMappingMetadata != null) {
String type = newMappingMetadata.type();
CompressedXContent incomingMappingSource = newMappingMetadata.source();
Mapping incomingMapping = parseMapping(type, incomingMappingSource);
Mapping incomingMapping = parseMapping(type, MergeReason.MAPPING_UPDATE, incomingMappingSource);
DocumentMapper previousMapper;
synchronized (this) {
previousMapper = this.mapper;
Expand Down Expand Up @@ -366,7 +366,7 @@ boolean assertNoUpdateRequired(final IndexMetadata newIndexMetadata) {
// that the incoming mappings are the same as the current ones: we need to
// parse the incoming mappings into a DocumentMapper and check that its
// serialization is the same as the existing mapper
Mapping newMapping = parseMapping(mapping.type(), mapping.source());
Mapping newMapping = parseMapping(mapping.type(), MergeReason.MAPPING_UPDATE, mapping.source());
final CompressedXContent currentSource = this.mapper.mappingSource();
final CompressedXContent newSource = newMapping.toCompressedXContent();
if (Objects.equals(currentSource, newSource) == false
Expand Down Expand Up @@ -533,7 +533,7 @@ public DocumentMapper merge(String type, CompressedXContent mappingSource, Merge
}

private synchronized DocumentMapper doMerge(String type, MergeReason reason, Map<String, Object> mappingSourceAsMap) {
Mapping incomingMapping = parseMapping(type, mappingSourceAsMap);
Mapping incomingMapping = parseMapping(type, reason, mappingSourceAsMap);
Mapping mapping = mergeMappings(this.mapper, incomingMapping, reason, this.indexSettings);
// TODO: In many cases the source here is equal to mappingSource so we need not serialize again.
// We should identify these cases reliably and save expensive serialization here
Expand All @@ -542,7 +542,7 @@ private synchronized DocumentMapper doMerge(String type, MergeReason reason, Map
return newMapper;
}
this.mapper = newMapper;
assert assertSerialization(newMapper);
assert assertSerialization(newMapper, reason);
return newMapper;
}

Expand All @@ -552,9 +552,9 @@ private DocumentMapper newDocumentMapper(Mapping mapping, MergeReason reason, Co
return newMapper;
}

public Mapping parseMapping(String mappingType, CompressedXContent mappingSource) {
public Mapping parseMapping(String mappingType, MergeReason reason, CompressedXContent mappingSource) {
try {
return mappingParser.parse(mappingType, mappingSource);
return mappingParser.parse(mappingType, reason, mappingSource);
} catch (Exception e) {
throw new MapperParsingException("Failed to parse mapping: {}", e, e.getMessage());
}
Expand All @@ -564,12 +564,13 @@ public Mapping parseMapping(String mappingType, CompressedXContent mappingSource
* A method to parse mapping from a source in a map form.
*
* @param mappingType the mapping type
* @param reason the merge reason to use when merging mappers while building the mapper
* @param mappingSource mapping source already converted to a map form, but not yet processed otherwise
* @return a parsed mapping
*/
public Mapping parseMapping(String mappingType, Map<String, Object> mappingSource) {
public Mapping parseMapping(String mappingType, MergeReason reason, Map<String, Object> mappingSource) {
try {
return mappingParser.parse(mappingType, mappingSource);
return mappingParser.parse(mappingType, reason, mappingSource);
} catch (Exception e) {
throw new MapperParsingException("Failed to parse mapping: {}", e, e.getMessage());
}
Expand Down Expand Up @@ -619,10 +620,10 @@ static Mapping mergeMappings(DocumentMapper currentMapper, Mapping incomingMappi
return newMapping;
}

private boolean assertSerialization(DocumentMapper mapper) {
private boolean assertSerialization(DocumentMapper mapper, MergeReason reason) {
// capture the source now, it may change due to concurrent parsing
final CompressedXContent mappingSource = mapper.mappingSource();
Mapping newMapping = parseMapping(mapper.type(), mappingSource);
Mapping newMapping = parseMapping(mapper.type(), reason, mappingSource);
if (newMapping.toCompressedXContent().equals(mappingSource) == false) {
throw new AssertionError(
"Mapping serialization result is different from source. \n--> Source ["
Expand Down
Expand Up @@ -137,8 +137,8 @@ public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() {
* @return the resulting merged mapping.
*/
Mapping merge(Mapping mergeWith, MergeReason reason, long newFieldsBudget) {
MapperMergeContext mergeContext = MapperMergeContext.root(isSourceSynthetic(), false, newFieldsBudget);
RootObjectMapper mergedRoot = root.merge(mergeWith.root, reason, mergeContext);
MapperMergeContext mergeContext = MapperMergeContext.root(isSourceSynthetic(), false, reason, newFieldsBudget);
RootObjectMapper mergedRoot = root.merge(mergeWith.root, mergeContext);

// When merging metadata fields as part of applying an index template, new field definitions
// completely overwrite existing ones instead of being merged. This behavior matches how we
Expand Down Expand Up @@ -176,11 +176,11 @@ Mapping merge(Mapping mergeWith, MergeReason reason, long newFieldsBudget) {
* @param fieldsBudget the maximum number of fields this mapping may have
*/
public Mapping withFieldsBudget(long fieldsBudget) {
MapperMergeContext mergeContext = MapperMergeContext.root(isSourceSynthetic(), false, fieldsBudget);
MapperMergeContext mergeContext = MapperMergeContext.root(isSourceSynthetic(), false, MergeReason.MAPPING_RECOVERY, fieldsBudget);
// get a copy of the root mapper, without any fields
RootObjectMapper shallowRoot = root.withoutMappers();
// calling merge on the shallow root to ensure we're only adding as many fields as allowed by the fields budget
return new Mapping(shallowRoot.merge(root, MergeReason.MAPPING_RECOVERY, mergeContext), metadataMappers, meta);
return new Mapping(shallowRoot.merge(root, mergeContext), metadataMappers, meta);
}

@Override
Expand Down

0 comments on commit ea535da

Please sign in to comment.