Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[core] support decouple the delta files lifecycle #3178

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
import org.apache.paimon.manifest.ManifestList;
import org.apache.paimon.metastore.AddPartitionTagCallback;
import org.apache.paimon.metastore.MetastoreClient;
import org.apache.paimon.operation.ChangelogDeletion;
import org.apache.paimon.operation.FileStoreCommitImpl;
import org.apache.paimon.operation.PartitionExpire;
import org.apache.paimon.operation.SnapshotDeletion;
Expand Down Expand Up @@ -201,8 +202,21 @@ public FileStoreCommitImpl newCommit(String commitUser, String branchName) {
}

@Override
public SnapshotDeletion newSnapshotDeletion() {
public SnapshotDeletion newSnapshotDeletion(CoreOptions options) {
return new SnapshotDeletion(
fileIO,
pathFactory(),
manifestFileFactory().create(),
manifestListFactory().create(),
newIndexFileHandler(),
newStatsFileHandler(),
options.changelogLifecycleDecoupled(),
options.changelogProducer() != CoreOptions.ChangelogProducer.NONE);
}

@Override
public ChangelogDeletion newChangelogDeletion(CoreOptions options) {
return new ChangelogDeletion(
fileIO,
pathFactory(),
manifestFileFactory().create(),
Expand Down
5 changes: 4 additions & 1 deletion paimon-core/src/main/java/org/apache/paimon/FileStore.java
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import org.apache.paimon.manifest.ManifestCacheFilter;
import org.apache.paimon.manifest.ManifestFile;
import org.apache.paimon.manifest.ManifestList;
import org.apache.paimon.operation.ChangelogDeletion;
import org.apache.paimon.operation.FileStoreCommit;
import org.apache.paimon.operation.FileStoreScan;
import org.apache.paimon.operation.FileStoreWrite;
Expand Down Expand Up @@ -83,7 +84,9 @@ public interface FileStore<T> extends Serializable {

FileStoreCommit newCommit(String commitUser, String branchName);

SnapshotDeletion newSnapshotDeletion();
SnapshotDeletion newSnapshotDeletion(CoreOptions options);

ChangelogDeletion newChangelogDeletion(CoreOptions options);

TagManager newTagManager();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,8 @@ private RowDataRollingFileWriter createRollingRowWriter() {
seqNumCounter,
fileCompression,
statsCollectors,
fileIndexOptions);
fileIndexOptions,
false);
}

private void trySyncLatestCompaction(boolean blocking)
Expand Down
51 changes: 36 additions & 15 deletions paimon-core/src/main/java/org/apache/paimon/io/DataFileMeta.java
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import org.apache.paimon.data.BinaryRow;
import org.apache.paimon.data.Timestamp;
import org.apache.paimon.fs.Path;
import org.apache.paimon.manifest.FileSource;
import org.apache.paimon.stats.BinaryTableStats;
import org.apache.paimon.stats.FieldStatsArraySerializer;
import org.apache.paimon.types.ArrayType;
Expand All @@ -31,12 +32,14 @@
import org.apache.paimon.types.DataTypes;
import org.apache.paimon.types.IntType;
import org.apache.paimon.types.RowType;
import org.apache.paimon.types.TinyIntType;

import javax.annotation.Nullable;

import java.time.LocalDateTime;
import java.time.ZoneId;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
Expand Down Expand Up @@ -85,6 +88,7 @@ public class DataFileMeta {

// file index filter bytes, if it is small, store in data file meta
private final @Nullable byte[] embeddedIndex;
private final @Nullable FileSource fileSource;

public static DataFileMeta forAppend(
String fileName,
Expand All @@ -93,7 +97,8 @@ public static DataFileMeta forAppend(
BinaryTableStats rowStats,
long minSequenceNumber,
long maxSequenceNumber,
long schemaId) {
long schemaId,
FileSource fileSource) {
return forAppend(
fileName,
fileSize,
Expand All @@ -103,7 +108,8 @@ public static DataFileMeta forAppend(
maxSequenceNumber,
schemaId,
Collections.emptyList(),
null);
null,
fileSource);
}

public static DataFileMeta forAppend(
Expand All @@ -115,7 +121,8 @@ public static DataFileMeta forAppend(
long maxSequenceNumber,
long schemaId,
List<String> extraFiles,
@Nullable byte[] embeddedIndex) {
@Nullable byte[] embeddedIndex,
@Nullable FileSource fileSource) {
return new DataFileMeta(
fileName,
fileSize,
Expand All @@ -131,7 +138,8 @@ public static DataFileMeta forAppend(
extraFiles,
Timestamp.fromLocalDateTime(LocalDateTime.now()).toMillisTimestamp(),
0L,
embeddedIndex);
embeddedIndex,
fileSource);
}

public DataFileMeta(
Expand All @@ -147,7 +155,8 @@ public DataFileMeta(
long schemaId,
int level,
@Nullable Long deleteRowCount,
@Nullable byte[] embeddedIndex) {
@Nullable byte[] embeddedIndex,
@Nullable FileSource fileSource) {
this(
fileName,
fileSize,
Expand All @@ -163,7 +172,8 @@ public DataFileMeta(
Collections.emptyList(),
Timestamp.fromLocalDateTime(LocalDateTime.now()).toMillisTimestamp(),
deleteRowCount,
embeddedIndex);
embeddedIndex,
fileSource);
}

public DataFileMeta(
Expand All @@ -181,7 +191,8 @@ public DataFileMeta(
List<String> extraFiles,
Timestamp creationTime,
@Nullable Long deleteRowCount,
@Nullable byte[] embeddedIndex) {
@Nullable byte[] embeddedIndex,
@Nullable FileSource fileSource) {
this.fileName = fileName;
this.fileSize = fileSize;

Expand All @@ -201,6 +212,7 @@ public DataFileMeta(
this.creationTime = creationTime;

this.deleteRowCount = deleteRowCount;
this.fileSource = fileSource;
}

public String fileName() {
Expand All @@ -223,6 +235,10 @@ public Optional<Long> deleteRowCount() {
return Optional.ofNullable(deleteRowCount);
}

public Optional<FileSource> fileSource() {
return Optional.ofNullable(fileSource);
}

public byte[] embeddedIndex() {
return embeddedIndex;
}
Expand Down Expand Up @@ -313,7 +329,8 @@ public DataFileMeta upgrade(int newLevel) {
extraFiles,
creationTime,
deleteRowCount,
embeddedIndex);
embeddedIndex,
fileSource);
}

public List<Path> collectFiles(DataFilePathFactory pathFactory) {
Expand All @@ -339,7 +356,8 @@ public DataFileMeta copy(List<String> newExtraFiles) {
newExtraFiles,
creationTime,
deleteRowCount,
embeddedIndex);
embeddedIndex,
fileSource);
}

@Override
Expand All @@ -365,7 +383,8 @@ public boolean equals(Object o) {
&& level == that.level
&& Objects.equals(extraFiles, that.extraFiles)
&& Objects.equals(creationTime, that.creationTime)
&& Objects.equals(deleteRowCount, that.deleteRowCount);
&& Objects.equals(deleteRowCount, that.deleteRowCount)
&& Objects.equals(fileSource, that.fileSource);
}

@Override
Expand All @@ -374,7 +393,7 @@ public int hashCode() {
fileName,
fileSize,
rowCount,
embeddedIndex,
Arrays.hashCode(embeddedIndex),
minKey,
maxKey,
keyStats,
Expand All @@ -385,7 +404,8 @@ public int hashCode() {
level,
extraFiles,
creationTime,
deleteRowCount);
deleteRowCount,
fileSource);
}

@Override
Expand All @@ -394,11 +414,10 @@ public String toString() {
"{fileName: %s, fileSize: %d, rowCount: %d, "
+ "minKey: %s, maxKey: %s, keyStats: %s, valueStats: %s, "
+ "minSequenceNumber: %d, maxSequenceNumber: %d, "
+ "schemaId: %d, level: %d, extraFiles: %s, creationTime: %s, deleteRowCount: %d}",
+ "schemaId: %d, level: %d, extraFiles: %s, creationTime: %s, deleteRowCount: %d, fileSource: %s}",
fileName,
fileSize,
rowCount,
embeddedIndex,
minKey,
maxKey,
keyStats,
Expand All @@ -409,7 +428,8 @@ public String toString() {
level,
extraFiles,
creationTime,
deleteRowCount);
deleteRowCount,
fileSource);
}

public static RowType schema() {
Expand All @@ -429,6 +449,7 @@ public static RowType schema() {
fields.add(new DataField(12, "_CREATION_TIME", DataTypes.TIMESTAMP_MILLIS()));
fields.add(new DataField(13, "_DELETE_ROW_COUNT", new BigIntType(true)));
fields.add(new DataField(14, "_EMBEDDED_FILE_INDEX", newBytesType(true)));
fields.add(new DataField(15, "_FILE_SOURCE", new TinyIntType(true)));
return new RowType(fields);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import org.apache.paimon.data.BinaryString;
import org.apache.paimon.data.GenericRow;
import org.apache.paimon.data.InternalRow;
import org.apache.paimon.manifest.FileSource;
import org.apache.paimon.stats.BinaryTableStats;
import org.apache.paimon.utils.ObjectSerializer;

Expand Down Expand Up @@ -55,7 +56,8 @@ public InternalRow toRow(DataFileMeta meta) {
toStringArrayData(meta.extraFiles()),
meta.creationTime(),
meta.deleteRowCount().orElse(null),
meta.embeddedIndex());
meta.embeddedIndex(),
meta.fileSource().map(FileSource::toByteValue).orElse(null));
}

@Override
Expand All @@ -75,6 +77,7 @@ public DataFileMeta fromRow(InternalRow row) {
fromStringArrayData(row.getArray(11)),
row.getTimestamp(12, 3),
row.isNullAt(13) ? null : row.getLong(13),
row.isNullAt(14) ? null : row.getBinary(14));
row.isNullAt(14) ? null : row.getBinary(14),
row.isNullAt(15) ? null : FileSource.fromByteValue(row.getByte(15)));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
import org.apache.paimon.format.TableStatsExtractor;
import org.apache.paimon.fs.FileIO;
import org.apache.paimon.fs.Path;
import org.apache.paimon.manifest.FileSource;
import org.apache.paimon.stats.BinaryTableStats;
import org.apache.paimon.stats.FieldStatsArraySerializer;
import org.apache.paimon.types.RowType;
Expand Down Expand Up @@ -68,6 +69,7 @@ public class KeyValueDataFileWriter
private long minSeqNumber = Long.MAX_VALUE;
private long maxSeqNumber = Long.MIN_VALUE;
private long deleteRecordCount = 0;
private final boolean isCompact;

public KeyValueDataFileWriter(
FileIO fileIO,
Expand All @@ -80,7 +82,8 @@ public KeyValueDataFileWriter(
long schemaId,
int level,
String compression,
CoreOptions options) {
CoreOptions options,
boolean isCompact) {
super(
fileIO,
factory,
Expand All @@ -100,6 +103,7 @@ public KeyValueDataFileWriter(
this.keyStatsConverter = new FieldStatsArraySerializer(keyType);
this.valueStatsConverter = new FieldStatsArraySerializer(valueType);
this.keySerializer = new InternalRowSerializer(keyType);
this.isCompact = isCompact;
}

@Override
Expand Down Expand Up @@ -170,6 +174,7 @@ public DataFileMeta result() throws IOException {
level,
deleteRecordCount,
// TODO: enable file filter for primary key table (e.g. deletion table).
null);
null,
isCompact ? FileSource.COMPACT : FileSource.APPEND);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -81,21 +81,24 @@ public DataFilePathFactory pathFactory(int level) {
return formatContext.pathFactory(level);
}

public RollingFileWriter<KeyValue, DataFileMeta> createRollingMergeTreeFileWriter(int level) {
public RollingFileWriter<KeyValue, DataFileMeta> createRollingMergeTreeFileWriter(
int level, boolean isCompact) {
return new RollingFileWriter<>(
() -> createDataFileWriter(formatContext.pathFactory(level).newPath(), level),
() ->
createDataFileWriter(
formatContext.pathFactory(level).newPath(), level, isCompact),
suggestedFileSize);
}

public RollingFileWriter<KeyValue, DataFileMeta> createRollingChangelogFileWriter(int level) {
return new RollingFileWriter<>(
() ->
createDataFileWriter(
formatContext.pathFactory(level).newChangelogPath(), level),
formatContext.pathFactory(level).newChangelogPath(), level, false),
suggestedFileSize);
}

private KeyValueDataFileWriter createDataFileWriter(Path path, int level) {
private KeyValueDataFileWriter createDataFileWriter(Path path, int level, boolean isCompact) {
KeyValueSerializer kvSerializer = new KeyValueSerializer(keyType, valueType);
return new KeyValueDataFileWriter(
fileIO,
Expand All @@ -108,7 +111,8 @@ private KeyValueDataFileWriter createDataFileWriter(Path path, int level) {
schemaId,
level,
formatContext.compression(level),
options);
options,
isCompact);
}

public void deleteFile(String filename, int level) {
Expand Down