From 026a910b26899f38c062e282fcc5592ed40e03df Mon Sep 17 00:00:00 2001 From: lizhimins <707364882@qq.com> Date: Mon, 18 Mar 2024 10:46:36 +0800 Subject: [PATCH] [ISSUE #7878] Performance Improvement and Bug Fixes for the Tiered Storage Module (#7899) Performance Improvement and Bug Fixes for the Tiered Storage Module --- tieredstore/README.md | 28 +- ...oreConfig.java => MessageStoreConfig.java} | 99 +-- .../tieredstore/MessageStoreExecutor.java | 93 +++ .../tieredstore/TieredDispatcher.java | 607 ------------------ .../tieredstore/TieredMessageFetcher.java | 585 ----------------- .../tieredstore/TieredMessageStore.java | 292 +++++---- .../tieredstore/common/AppendResult.java | 10 - .../tieredstore/common/FileSegmentType.java | 31 +- .../common/GetMessageResultExt.java | 4 + .../common/InFlightRequestFuture.java | 81 --- .../common/InFlightRequestKey.java | 68 -- .../tieredstore/common/MessageCacheKey.java | 56 -- .../common/SelectBufferResultWrapper.java | 64 -- .../common/TieredStoreExecutor.java | 108 ---- .../core/MessageStoreDispatcher.java | 31 + .../core/MessageStoreDispatcherImpl.java | 300 +++++++++ .../{ => core}/MessageStoreFetcher.java | 4 +- .../core/MessageStoreFetcherImpl.java | 427 ++++++++++++ .../MessageStoreFilter.java} | 4 +- .../MessageStoreTopicFilter.java} | 18 +- .../exception/TieredStoreException.java | 29 +- .../tieredstore/file/CompositeFlatFile.java | 495 -------------- .../file/CompositeQueueFlatFile.java | 118 ---- .../tieredstore/file/FlatAppendFile.java | 269 ++++++++ .../tieredstore/file/FlatCommitLogFile.java | 63 ++ .../FlatConsumeQueueFile.java} | 18 +- .../tieredstore/file/FlatFileFactory.java | 56 ++ ...siteAccess.java => FlatFileInterface.java} | 97 ++- .../tieredstore/file/FlatFileStore.java | 163 +++++ .../tieredstore/file/FlatMessageFile.java | 386 +++++++++++ .../tieredstore/file/TieredCommitLog.java | 179 ------ .../tieredstore/file/TieredConsumeQueue.java | 116 ---- .../tieredstore/file/TieredFileAllocator.java | 56 -- .../tieredstore/file/TieredFlatFile.java | 590 ----------------- .../file/TieredFlatFileManager.java | 300 --------- .../rocketmq/tieredstore/index/IndexFile.java | 2 + .../tieredstore/index/IndexService.java | 2 + .../tieredstore/index/IndexStoreFile.java | 34 +- .../tieredstore/index/IndexStoreService.java | 154 +++-- ...Manager.java => DefaultMetadataStore.java} | 99 ++- ...dMetadataStore.java => MetadataStore.java} | 58 +- .../TieredMetadataSerializeWrapper.java | 97 --- .../{ => entity}/FileSegmentMetadata.java | 23 +- .../metadata/{ => entity}/QueueMetadata.java | 12 +- .../metadata/{ => entity}/TopicMetadata.java | 20 +- .../metrics/TieredStoreMetricsManager.java | 64 +- .../tieredstore/provider/FileSegment.java | 346 ++++++++++ .../provider/FileSegmentAllocator.java | 102 --- .../provider/FileSegmentFactory.java | 71 ++ ...Provider.java => FileSegmentProvider.java} | 4 +- .../provider}/MemoryFileSegment.java | 80 +-- .../{posix => }/PosixFileSegment.java | 174 +++-- .../provider/TieredFileSegment.java | 485 -------------- .../stream/CommitLogInputStream.java | 20 +- .../stream/FileSegmentInputStream.java | 2 +- .../stream/FileSegmentInputStreamFactory.java | 7 +- .../tieredstore/util/MessageBufferUtil.java | 184 ------ .../tieredstore/util/MessageFormatUtil.java | 175 +++++ ...edStoreUtil.java => MessageStoreUtil.java} | 98 +-- .../tieredstore/TieredDispatcherTest.java | 178 ----- .../tieredstore/TieredMessageFetcherTest.java | 302 --------- .../tieredstore/TieredMessageStoreTest.java | 320 +++++---- .../tieredstore/TieredStoreTestUtil.java | 68 -- .../FileSegmentTypeTest.java} | 37 +- .../common/GetMessageResultExtTest.java | 48 +- .../common/InFlightRequestFutureTest.java | 145 ----- .../common/SelectBufferResultTest.java | 3 +- .../core/MessageStoreDispatcherImplTest.java | 192 ++++++ .../core/MessageStoreFetcherImplTest.java | 233 +++++++ .../MessageStoreTopicFilterTest.java} | 7 +- .../exception/TieredStoreExceptionTest.java | 41 ++ .../file/CompositeQueueFlatFileTest.java | 197 ------ .../tieredstore/file/FlatAppendFileTest.java | 215 +++++++ .../file/FlatCommitLogFileTest.java | 111 ++++ .../file/FlatConsumeQueueFileTest.java | 21 + .../tieredstore/file/FlatFileFactoryTest.java | 49 ++ .../tieredstore/file/FlatFileStoreTest.java | 101 +++ .../tieredstore/file/FlatMessageFileTest.java | 212 ++++++ .../tieredstore/file/TieredCommitLogTest.java | 108 ---- .../file/TieredFlatFileManagerTest.java | 96 --- .../tieredstore/file/TieredFlatFileTest.java | 342 ---------- .../tieredstore/index/IndexStoreFileTest.java | 31 +- .../index/IndexStoreServiceBenchTest.java | 31 +- .../index/IndexStoreServiceTest.java | 34 +- ...est.java => DefaultMetadataStoreTest.java} | 94 +-- .../TieredStoreMetricsManagerTest.java | 36 +- .../provider/FileSegmentFactoryTest.java | 66 ++ .../tieredstore/provider/FileSegmentTest.java | 469 ++++++++++++++ .../provider/MemoryFileSegmentTest.java | 46 ++ .../provider/MockFileSegmentInputStream.java | 54 -- .../provider/PosixFileSegmentTest.java | 21 + .../provider/TieredFileSegmentTest.java | 235 ------- .../memory/MemoryFileSegmentWithoutCheck.java | 74 --- .../provider/posix/PosixFileSegmentTest.java | 77 --- .../FileSegmentInputStreamTest.java} | 36 +- ...ilTest.java => MessageFormatUtilTest.java} | 231 +++---- .../util/MessageStoreUtilTest.java | 100 +++ .../tieredstore/util/TieredStoreUtilTest.java | 59 -- .../src/test/resources/rmq.logback-test.xml | 2 +- 99 files changed, 5489 insertions(+), 7391 deletions(-) rename tieredstore/src/main/java/org/apache/rocketmq/tieredstore/{common/TieredMessageStoreConfig.java => MessageStoreConfig.java} (83%) create mode 100644 tieredstore/src/main/java/org/apache/rocketmq/tieredstore/MessageStoreExecutor.java delete mode 100644 tieredstore/src/main/java/org/apache/rocketmq/tieredstore/TieredDispatcher.java delete mode 100644 tieredstore/src/main/java/org/apache/rocketmq/tieredstore/TieredMessageFetcher.java delete mode 100644 tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/InFlightRequestFuture.java delete mode 100644 tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/InFlightRequestKey.java delete mode 100644 tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/MessageCacheKey.java delete mode 100644 tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/SelectBufferResultWrapper.java delete mode 100644 tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/TieredStoreExecutor.java create mode 100644 tieredstore/src/main/java/org/apache/rocketmq/tieredstore/core/MessageStoreDispatcher.java create mode 100644 tieredstore/src/main/java/org/apache/rocketmq/tieredstore/core/MessageStoreDispatcherImpl.java rename tieredstore/src/main/java/org/apache/rocketmq/tieredstore/{ => core}/MessageStoreFetcher.java (98%) create mode 100644 tieredstore/src/main/java/org/apache/rocketmq/tieredstore/core/MessageStoreFetcherImpl.java rename tieredstore/src/main/java/org/apache/rocketmq/tieredstore/{provider/TieredStoreTopicFilter.java => core/MessageStoreFilter.java} (90%) rename tieredstore/src/main/java/org/apache/rocketmq/tieredstore/{provider/TieredStoreTopicBlackListFilter.java => core/MessageStoreTopicFilter.java} (63%) delete mode 100644 tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/CompositeFlatFile.java delete mode 100644 tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/CompositeQueueFlatFile.java create mode 100644 tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/FlatAppendFile.java create mode 100644 tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/FlatCommitLogFile.java rename tieredstore/src/main/java/org/apache/rocketmq/tieredstore/{util/CQItemBufferUtil.java => file/FlatConsumeQueueFile.java} (64%) create mode 100644 tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/FlatFileFactory.java rename tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/{CompositeAccess.java => FlatFileInterface.java} (67%) create mode 100644 tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/FlatFileStore.java create mode 100644 tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/FlatMessageFile.java delete mode 100644 tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/TieredCommitLog.java delete mode 100644 tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/TieredConsumeQueue.java delete mode 100644 tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/TieredFileAllocator.java delete mode 100644 tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/TieredFlatFile.java delete mode 100644 tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/TieredFlatFileManager.java rename tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/{TieredMetadataManager.java => DefaultMetadataStore.java} (73%) rename tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/{TieredMetadataStore.java => MetadataStore.java} (60%) delete mode 100644 tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/TieredMetadataSerializeWrapper.java rename tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/{ => entity}/FileSegmentMetadata.java (90%) rename tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/{ => entity}/QueueMetadata.java (88%) rename tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/{ => entity}/TopicMetadata.java (88%) create mode 100644 tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/FileSegment.java delete mode 100644 tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/FileSegmentAllocator.java create mode 100644 tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/FileSegmentFactory.java rename tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/{TieredStoreProvider.java => FileSegmentProvider.java} (95%) rename tieredstore/src/{test/java/org/apache/rocketmq/tieredstore/provider/memory => main/java/org/apache/rocketmq/tieredstore/provider}/MemoryFileSegment.java (61%) rename tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/{posix => }/PosixFileSegment.java (53%) delete mode 100644 tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/TieredFileSegment.java rename tieredstore/src/main/java/org/apache/rocketmq/tieredstore/{provider => }/stream/CommitLogInputStream.java (91%) rename tieredstore/src/main/java/org/apache/rocketmq/tieredstore/{provider => }/stream/FileSegmentInputStream.java (99%) rename tieredstore/src/main/java/org/apache/rocketmq/tieredstore/{provider => }/stream/FileSegmentInputStreamFactory.java (87%) delete mode 100644 tieredstore/src/main/java/org/apache/rocketmq/tieredstore/util/MessageBufferUtil.java create mode 100644 tieredstore/src/main/java/org/apache/rocketmq/tieredstore/util/MessageFormatUtil.java rename tieredstore/src/main/java/org/apache/rocketmq/tieredstore/util/{TieredStoreUtil.java => MessageStoreUtil.java} (54%) delete mode 100644 tieredstore/src/test/java/org/apache/rocketmq/tieredstore/TieredDispatcherTest.java delete mode 100644 tieredstore/src/test/java/org/apache/rocketmq/tieredstore/TieredMessageFetcherTest.java delete mode 100644 tieredstore/src/test/java/org/apache/rocketmq/tieredstore/TieredStoreTestUtil.java rename tieredstore/src/test/java/org/apache/rocketmq/tieredstore/{util/CQItemBufferUtilTest.java => common/FileSegmentTypeTest.java} (51%) delete mode 100644 tieredstore/src/test/java/org/apache/rocketmq/tieredstore/common/InFlightRequestFutureTest.java create mode 100644 tieredstore/src/test/java/org/apache/rocketmq/tieredstore/core/MessageStoreDispatcherImplTest.java create mode 100644 tieredstore/src/test/java/org/apache/rocketmq/tieredstore/core/MessageStoreFetcherImplTest.java rename tieredstore/src/test/java/org/apache/rocketmq/tieredstore/{provider/TieredStoreTopicBlackListFilterTest.java => core/MessageStoreTopicFilterTest.java} (84%) create mode 100644 tieredstore/src/test/java/org/apache/rocketmq/tieredstore/exception/TieredStoreExceptionTest.java delete mode 100644 tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/CompositeQueueFlatFileTest.java create mode 100644 tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/FlatAppendFileTest.java create mode 100644 tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/FlatCommitLogFileTest.java create mode 100644 tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/FlatConsumeQueueFileTest.java create mode 100644 tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/FlatFileFactoryTest.java create mode 100644 tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/FlatFileStoreTest.java create mode 100644 tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/FlatMessageFileTest.java delete mode 100644 tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/TieredCommitLogTest.java delete mode 100644 tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/TieredFlatFileManagerTest.java delete mode 100644 tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/TieredFlatFileTest.java rename tieredstore/src/test/java/org/apache/rocketmq/tieredstore/metadata/{TieredMetadataManagerTest.java => DefaultMetadataStoreTest.java} (77%) create mode 100644 tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/FileSegmentFactoryTest.java create mode 100644 tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/FileSegmentTest.java create mode 100644 tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/MemoryFileSegmentTest.java delete mode 100644 tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/MockFileSegmentInputStream.java create mode 100644 tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/PosixFileSegmentTest.java delete mode 100644 tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/TieredFileSegmentTest.java delete mode 100644 tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/memory/MemoryFileSegmentWithoutCheck.java delete mode 100644 tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/posix/PosixFileSegmentTest.java rename tieredstore/src/test/java/org/apache/rocketmq/tieredstore/{provider/TieredFileSegmentInputStreamTest.java => stream/FileSegmentInputStreamTest.java} (87%) rename tieredstore/src/test/java/org/apache/rocketmq/tieredstore/util/{MessageBufferUtilTest.java => MessageFormatUtilTest.java} (54%) create mode 100644 tieredstore/src/test/java/org/apache/rocketmq/tieredstore/util/MessageStoreUtilTest.java delete mode 100644 tieredstore/src/test/java/org/apache/rocketmq/tieredstore/util/TieredStoreUtilTest.java diff --git a/tieredstore/README.md b/tieredstore/README.md index 9c8ea6b8aa3..edc229e1041 100644 --- a/tieredstore/README.md +++ b/tieredstore/README.md @@ -12,7 +12,7 @@ This article is a cookbook for RocketMQ tiered storage. Use the following steps to easily use tiered storage -1. Change `messageStorePlugIn` to `org.apache.rocketmq.tieredstore.TieredMessageStore` in your `broker.conf`. +1. Change `messageStorePlugIn` to `org.apache.rocketmq.tieredstore.MessageStoreExtend` in your `broker.conf`. 2. Configure your backend service provider. change `tieredBackendServiceProvider` to your storage medium implement. We give a default implement: POSIX provider, and you need to change `tieredStoreFilepath` to the mount point of storage medium for tiered storage. 3. Start the broker and enjoy! @@ -20,19 +20,19 @@ Use the following steps to easily use tiered storage The following are some core configurations, for more details, see [TieredMessageStoreConfig](https://github.com/apache/rocketmq/blob/develop/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/TieredMessageStoreConfig.java) -| Configuration | Default value | Unit | Function | -| ------------------------------- | --------------------------------------------------------------- | ----------- | ------------------------------------------------------------------------------- | -| messageStorePlugIn | | | Set to org.apache.rocketmq.tieredstore.TieredMessageStore to use tiered storage | -| tieredMetadataServiceProvider | org.apache.rocketmq.tieredstore.metadata.TieredMetadataManager | | Select your metadata provider | -| tieredBackendServiceProvider | org.apache.rocketmq.tieredstore.provider.posix.PosixFileSegment | | Select your backend service provider | -| tieredStoreFilepath | | | Select the directory using for tiered storage, only for POSIX provider. | -| tieredStorageLevel | NOT_IN_DISK | | The options are DISABLE, NOT_IN_DISK, NOT_IN_MEM, FORCE | -| tieredStoreFileReservedTime | 72 | hour | Default topic TTL in tiered storage | -| tieredStoreGroupCommitCount | 2500 | | The number of messages that trigger one batch transfer | -| tieredStoreGroupCommitSize | 33554432 | byte | The size of messages that trigger one batch transfer, 32M by default | -| tieredStoreMaxGroupCommitCount | 10000 | | The maximum number of messages waiting to be transfered per queue | -| readAheadCacheExpireDuration | 1000 | millisecond | Read-ahead cache expiration time | -| readAheadCacheSizeThresholdRate | 0.3 | | The maximum heap space occupied by the read-ahead cache | +| Configuration | Default value | Unit | Function | +| ------------------------------- |---------------------------------------------------------------| ----------- | ------------------------------------------------------------------------------- | +| messageStorePlugIn | | | Set to org.apache.rocketmq.tieredstore.MessageStoreExtend to use tiered storage | +| tieredMetadataServiceProvider | org.apache.rocketmq.tieredstore.metadata.DefaultMetadataStore | | Select your metadata provider | +| tieredBackendServiceProvider | org.apache.rocketmq.tieredstore.provider.PosixFileSegment | | Select your backend service provider | +| tieredStoreFilepath | | | Select the directory using for tiered storage, only for POSIX provider. | +| tieredStorageLevel | NOT_IN_DISK | | The options are DISABLE, NOT_IN_DISK, NOT_IN_MEM, FORCE | +| tieredStoreFileReservedTime | 72 | hour | Default topic TTL in tiered storage | +| tieredStoreGroupCommitCount | 2500 | | The number of messages that trigger one batch transfer | +| tieredStoreGroupCommitSize | 33554432 | byte | The size of messages that trigger one batch transfer, 32M by default | +| tieredStoreMaxGroupCommitCount | 10000 | | The maximum number of messages waiting to be transfered per queue | +| readAheadCacheExpireDuration | 1000 | millisecond | Read-ahead cache expiration time | +| readAheadCacheSizeThresholdRate | 0.3 | | The maximum heap space occupied by the read-ahead cache | ## Metrics diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/TieredMessageStoreConfig.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/MessageStoreConfig.java similarity index 83% rename from tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/TieredMessageStoreConfig.java rename to tieredstore/src/main/java/org/apache/rocketmq/tieredstore/MessageStoreConfig.java index b0750e55094..c6e62487309 100644 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/TieredMessageStoreConfig.java +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/MessageStoreConfig.java @@ -14,13 +14,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.rocketmq.tieredstore.common; +package org.apache.rocketmq.tieredstore; import java.io.File; import java.net.InetAddress; import java.net.UnknownHostException; -public class TieredMessageStoreConfig { +public class MessageStoreConfig { + private String brokerName = localHostName(); private String brokerClusterName = "DefaultCluster"; private TieredStorageLevel tieredStorageLevel = TieredStorageLevel.NOT_IN_DISK; @@ -92,38 +93,40 @@ public boolean check(TieredStorageLevel targetLevel) { private int tieredStoreIndexFileMaxIndexNum = 5000000 * 4; // index file will force rolling to next file after idle specified time, default is 3h private int tieredStoreIndexFileRollingIdleInterval = 3 * 60 * 60 * 1000; - private String tieredMetadataServiceProvider = "org.apache.rocketmq.tieredstore.metadata.TieredMetadataManager"; - private String tieredBackendServiceProvider = "org.apache.rocketmq.tieredstore.provider.memory.MemoryFileSegment"; + private String tieredMetadataServiceProvider = "org.apache.rocketmq.tieredstore.metadata.DefaultMetadataStore"; + private String tieredBackendServiceProvider = "org.apache.rocketmq.tieredstore.provider.MemoryFileSegment"; // file reserved time, default is 72 hour private int tieredStoreFileReservedTime = 72; // time of forcing commitLog to roll to next file, default is 24 hour private int commitLogRollingInterval = 24; - // rolling will only happen if file segment size is larger than commitLogRollingMinimumSize, default is 128M + // rolling will only happen if file segment size is larger than commitcp b LogRollingMinimumSize, default is 128M private int commitLogRollingMinimumSize = 128 * 1024 * 1024; // default is 100, unit is millisecond private int maxCommitJitter = 100; - // Cached message count larger than this value will trigger async commit. default is 1000 - private int tieredStoreGroupCommitCount = 2500; - // Cached message size larger than this value will trigger async commit. default is 32M - private int tieredStoreGroupCommitSize = 32 * 1024 * 1024; - // Cached message count larger than this value will suspend append. default is 2000 + + private boolean tieredStoreGroupCommit = true; + private int tieredStoreGroupCommitTimeout = 30 * 1000; + // Cached message count larger than this value will trigger async commit. default is 4096 + private int tieredStoreGroupCommitCount = 4 * 1024; + // Cached message size larger than this value will trigger async commit. default is 4M + private int tieredStoreGroupCommitSize = 4 * 1024 * 1024; + // Cached message count larger than this value will suspend append. default is 10000 private int tieredStoreMaxGroupCommitCount = 10000; - private int readAheadMinFactor = 2; - private int readAheadMaxFactor = 24; - private int readAheadBatchSizeFactorThreshold = 8; - private int readAheadMessageCountThreshold = 2048; - private int readAheadMessageSizeThreshold = 128 * 1024 * 1024; - private long readAheadCacheExpireDuration = 10 * 1000; + private long tieredStoreMaxFallBehindSize = 128 * 1024 * 1024; + + private boolean readAheadCacheEnable = true; + private int readAheadMessageCountThreshold = 4096; + private int readAheadMessageSizeThreshold = 16 * 1024 * 1024; + private long readAheadCacheExpireDuration = 15 * 1000; private double readAheadCacheSizeThresholdRate = 0.3; - private String tieredStoreFilePath = ""; + private int tieredStoreMaxPendingLimit = 10000; + private boolean tieredStoreCrcCheckEnable = false; + private String tieredStoreFilePath = ""; private String objectStoreEndpoint = ""; - private String objectStoreBucket = ""; - private String objectStoreAccessKey = ""; - private String objectStoreSecretKey = ""; public static String localHostName() { @@ -279,6 +282,22 @@ public void setMaxCommitJitter(int maxCommitJitter) { this.maxCommitJitter = maxCommitJitter; } + public boolean isTieredStoreGroupCommit() { + return tieredStoreGroupCommit; + } + + public void setTieredStoreGroupCommit(boolean tieredStoreGroupCommit) { + this.tieredStoreGroupCommit = tieredStoreGroupCommit; + } + + public int getTieredStoreGroupCommitTimeout() { + return tieredStoreGroupCommitTimeout; + } + + public void setTieredStoreGroupCommitTimeout(int tieredStoreGroupCommitTimeout) { + this.tieredStoreGroupCommitTimeout = tieredStoreGroupCommitTimeout; + } + public int getTieredStoreGroupCommitCount() { return tieredStoreGroupCommitCount; } @@ -303,28 +322,20 @@ public void setTieredStoreMaxGroupCommitCount(int tieredStoreMaxGroupCommitCount this.tieredStoreMaxGroupCommitCount = tieredStoreMaxGroupCommitCount; } - public int getReadAheadMinFactor() { - return readAheadMinFactor; - } - - public void setReadAheadMinFactor(int readAheadMinFactor) { - this.readAheadMinFactor = readAheadMinFactor; + public long getTieredStoreMaxFallBehindSize() { + return tieredStoreMaxFallBehindSize; } - public int getReadAheadMaxFactor() { - return readAheadMaxFactor; + public void setTieredStoreMaxFallBehindSize(long tieredStoreMaxFallBehindSize) { + this.tieredStoreMaxFallBehindSize = tieredStoreMaxFallBehindSize; } - public int getReadAheadBatchSizeFactorThreshold() { - return readAheadBatchSizeFactorThreshold; + public boolean isReadAheadCacheEnable() { + return readAheadCacheEnable; } - public void setReadAheadBatchSizeFactorThreshold(int readAheadBatchSizeFactorThreshold) { - this.readAheadBatchSizeFactorThreshold = readAheadBatchSizeFactorThreshold; - } - - public void setReadAheadMaxFactor(int readAheadMaxFactor) { - this.readAheadMaxFactor = readAheadMaxFactor; + public void setReadAheadCacheEnable(boolean readAheadCacheEnable) { + this.readAheadCacheEnable = readAheadCacheEnable; } public int getReadAheadMessageCountThreshold() { @@ -359,6 +370,22 @@ public void setReadAheadCacheSizeThresholdRate(double rate) { this.readAheadCacheSizeThresholdRate = rate; } + public int getTieredStoreMaxPendingLimit() { + return tieredStoreMaxPendingLimit; + } + + public void setTieredStoreMaxPendingLimit(int tieredStoreMaxPendingLimit) { + this.tieredStoreMaxPendingLimit = tieredStoreMaxPendingLimit; + } + + public boolean isTieredStoreCrcCheckEnable() { + return tieredStoreCrcCheckEnable; + } + + public void setTieredStoreCrcCheckEnable(boolean tieredStoreCrcCheckEnable) { + this.tieredStoreCrcCheckEnable = tieredStoreCrcCheckEnable; + } + public String getTieredStoreFilePath() { return tieredStoreFilePath; } diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/MessageStoreExecutor.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/MessageStoreExecutor.java new file mode 100644 index 00000000000..56f564e7d2d --- /dev/null +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/MessageStoreExecutor.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.rocketmq.tieredstore; + +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import org.apache.rocketmq.common.ThreadFactoryImpl; +import org.apache.rocketmq.common.utils.ThreadUtils; + +public class MessageStoreExecutor { + + public final BlockingQueue bufferCommitThreadPoolQueue; + public final BlockingQueue bufferFetchThreadPoolQueue; + public final BlockingQueue fileRecyclingThreadPoolQueue; + + public final ScheduledExecutorService commonExecutor; + public final ExecutorService bufferCommitExecutor; + public final ExecutorService bufferFetchExecutor; + public final ExecutorService fileRecyclingExecutor; + + private static class SingletonHolder { + private static final MessageStoreExecutor INSTANCE = new MessageStoreExecutor(); + } + + public static MessageStoreExecutor getInstance() { + return SingletonHolder.INSTANCE; + } + + public MessageStoreExecutor() { + this(10000); + } + + public MessageStoreExecutor(int maxQueueCapacity) { + + this.commonExecutor = ThreadUtils.newScheduledThreadPool( + Math.max(4, Runtime.getRuntime().availableProcessors()), + new ThreadFactoryImpl("TieredCommonExecutor_")); + + this.bufferCommitThreadPoolQueue = new LinkedBlockingQueue<>(maxQueueCapacity); + this.bufferCommitExecutor = ThreadUtils.newThreadPoolExecutor( + Math.max(16, Runtime.getRuntime().availableProcessors() * 4), + Math.max(16, Runtime.getRuntime().availableProcessors() * 4), + TimeUnit.MINUTES.toMillis(1), TimeUnit.MILLISECONDS, + this.bufferCommitThreadPoolQueue, + new ThreadFactoryImpl("BufferCommitExecutor_")); + + this.bufferFetchThreadPoolQueue = new LinkedBlockingQueue<>(maxQueueCapacity); + this.bufferFetchExecutor = ThreadUtils.newThreadPoolExecutor( + Math.max(16, Runtime.getRuntime().availableProcessors() * 4), + Math.max(16, Runtime.getRuntime().availableProcessors() * 4), + TimeUnit.MINUTES.toMillis(1), TimeUnit.MILLISECONDS, + this.bufferFetchThreadPoolQueue, + new ThreadFactoryImpl("BufferFetchExecutor_")); + + this.fileRecyclingThreadPoolQueue = new LinkedBlockingQueue<>(maxQueueCapacity); + this.fileRecyclingExecutor = ThreadUtils.newThreadPoolExecutor( + Math.max(4, Runtime.getRuntime().availableProcessors()), + Math.max(4, Runtime.getRuntime().availableProcessors()), + TimeUnit.MINUTES.toMillis(1), TimeUnit.MILLISECONDS, + this.fileRecyclingThreadPoolQueue, + new ThreadFactoryImpl("BufferFetchExecutor_")); + } + + private void shutdownExecutor(ExecutorService executor) { + if (executor != null) { + executor.shutdown(); + } + } + + public void shutdown() { + this.shutdownExecutor(this.commonExecutor); + this.shutdownExecutor(this.bufferCommitExecutor); + this.shutdownExecutor(this.bufferFetchExecutor); + this.shutdownExecutor(this.fileRecyclingExecutor); + } +} diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/TieredDispatcher.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/TieredDispatcher.java deleted file mode 100644 index 766c559e9c8..00000000000 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/TieredDispatcher.java +++ /dev/null @@ -1,607 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.rocketmq.tieredstore; - -import io.opentelemetry.api.common.Attributes; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.locks.ReentrantLock; -import java.util.function.Consumer; -import org.apache.rocketmq.common.ServiceThread; -import org.apache.rocketmq.common.message.MessageConst; -import org.apache.rocketmq.common.message.MessageQueue; -import org.apache.rocketmq.logging.org.slf4j.Logger; -import org.apache.rocketmq.logging.org.slf4j.LoggerFactory; -import org.apache.rocketmq.store.CommitLogDispatcher; -import org.apache.rocketmq.store.ConsumeQueue; -import org.apache.rocketmq.store.DispatchRequest; -import org.apache.rocketmq.store.MessageStore; -import org.apache.rocketmq.store.SelectMappedBufferResult; -import org.apache.rocketmq.tieredstore.common.AppendResult; -import org.apache.rocketmq.tieredstore.common.FileSegmentType; -import org.apache.rocketmq.tieredstore.common.TieredMessageStoreConfig; -import org.apache.rocketmq.tieredstore.common.TieredStoreExecutor; -import org.apache.rocketmq.tieredstore.file.CompositeQueueFlatFile; -import org.apache.rocketmq.tieredstore.file.TieredFlatFileManager; -import org.apache.rocketmq.tieredstore.metrics.TieredStoreMetricsConstant; -import org.apache.rocketmq.tieredstore.metrics.TieredStoreMetricsManager; -import org.apache.rocketmq.tieredstore.provider.TieredStoreTopicBlackListFilter; -import org.apache.rocketmq.tieredstore.provider.TieredStoreTopicFilter; -import org.apache.rocketmq.tieredstore.util.CQItemBufferUtil; -import org.apache.rocketmq.tieredstore.util.MessageBufferUtil; -import org.apache.rocketmq.tieredstore.util.TieredStoreUtil; - -public class TieredDispatcher extends ServiceThread implements CommitLogDispatcher { - - private static final Logger logger = LoggerFactory.getLogger(TieredStoreUtil.TIERED_STORE_LOGGER_NAME); - - private TieredStoreTopicFilter topicFilter; - private final String brokerName; - private final MessageStore defaultStore; - private final TieredMessageStoreConfig storeConfig; - private final TieredFlatFileManager tieredFlatFileManager; - private final ReentrantLock dispatchTaskLock; - private final ReentrantLock dispatchWriteLock; - - private ConcurrentMap> dispatchRequestReadMap; - private ConcurrentMap> dispatchRequestWriteMap; - - public TieredDispatcher(MessageStore defaultStore, TieredMessageStoreConfig storeConfig) { - this.defaultStore = defaultStore; - this.storeConfig = storeConfig; - this.brokerName = storeConfig.getBrokerName(); - this.topicFilter = new TieredStoreTopicBlackListFilter(); - this.tieredFlatFileManager = TieredFlatFileManager.getInstance(storeConfig); - this.dispatchRequestReadMap = new ConcurrentHashMap<>(); - this.dispatchRequestWriteMap = new ConcurrentHashMap<>(); - this.dispatchTaskLock = new ReentrantLock(); - this.dispatchWriteLock = new ReentrantLock(); - } - - protected void initScheduleTask() { - TieredStoreExecutor.commonScheduledExecutor.scheduleWithFixedDelay(() -> - tieredFlatFileManager.deepCopyFlatFileToList().forEach(flatFile -> { - if (!flatFile.getCompositeFlatFileLock().isLocked()) { - dispatchFlatFileAsync(flatFile); - } - }), 30, 10, TimeUnit.SECONDS); - } - - public TieredStoreTopicFilter getTopicFilter() { - return topicFilter; - } - - public void setTopicFilter(TieredStoreTopicFilter topicFilter) { - this.topicFilter = topicFilter; - } - - @Override - public void dispatch(DispatchRequest request) { - if (stopped) { - return; - } - - String topic = request.getTopic(); - if (topicFilter != null && topicFilter.filterTopic(topic)) { - return; - } - - CompositeQueueFlatFile flatFile = tieredFlatFileManager.getOrCreateFlatFileIfAbsent( - new MessageQueue(topic, brokerName, request.getQueueId())); - - if (flatFile == null) { - logger.error("[Bug] TieredDispatcher#dispatch: get or create flat file failed, skip this request. ", - "topic: {}, queueId: {}", request.getTopic(), request.getQueueId()); - return; - } - - if (detectFallBehind(flatFile)) { - return; - } - - // Set cq offset as commitlog first dispatch offset if flat file first init - if (flatFile.getDispatchOffset() == -1) { - flatFile.initOffset(request.getConsumeQueueOffset()); - } - - if (request.getConsumeQueueOffset() == flatFile.getDispatchOffset()) { - - // In order to ensure the efficiency of dispatch operation and avoid high dispatch delay, - // it is not allowed to block for a long time here. - try { - // Acquired flat file write lock to append commitlog - if (flatFile.getCompositeFlatFileLock().isLocked() - || !flatFile.getCompositeFlatFileLock().tryLock(3, TimeUnit.MILLISECONDS)) { - return; - } - } catch (Exception e) { - logger.warn("Temporarily skip dispatch request because we can not acquired write lock. " + - "topic: {}, queueId: {}", request.getTopic(), request.getQueueId(), e); - if (flatFile.getCompositeFlatFileLock().isLocked()) { - flatFile.getCompositeFlatFileLock().unlock(); - } - return; - } - - // double check whether the offset matches - if (request.getConsumeQueueOffset() != flatFile.getDispatchOffset()) { - flatFile.getCompositeFlatFileLock().unlock(); - return; - } - - // obtain message - SelectMappedBufferResult message = - defaultStore.selectOneMessageByOffset(request.getCommitLogOffset(), request.getMsgSize()); - - if (message == null) { - logger.error("TieredDispatcher#dispatch: dispatch failed, " + - "can not get message from next store: topic: {}, queueId: {}, commitLog offset: {}, size: {}", - request.getTopic(), request.getQueueId(), request.getCommitLogOffset(), request.getMsgSize()); - flatFile.getCompositeFlatFileLock().unlock(); - return; - } - - // drop expired request - try { - if (request.getConsumeQueueOffset() < flatFile.getDispatchOffset()) { - return; - } - AppendResult result = flatFile.appendCommitLog(message.getByteBuffer()); - long newCommitLogOffset = flatFile.getCommitLogMaxOffset() - message.getByteBuffer().remaining(); - doRedispatchRequestToWriteMap(result, flatFile, request.getConsumeQueueOffset(), - newCommitLogOffset, request.getMsgSize(), request.getTagsCode(), message.getByteBuffer()); - - if (result == AppendResult.SUCCESS) { - Attributes attributes = TieredStoreMetricsManager.newAttributesBuilder() - .put(TieredStoreMetricsConstant.LABEL_TOPIC, request.getTopic()) - .put(TieredStoreMetricsConstant.LABEL_QUEUE_ID, request.getQueueId()) - .put(TieredStoreMetricsConstant.LABEL_FILE_TYPE, - FileSegmentType.COMMIT_LOG.name().toLowerCase()) - .build(); - TieredStoreMetricsManager.messagesDispatchTotal.add(1, attributes); - } - } catch (Exception throwable) { - logger.error("TieredDispatcher#dispatch: dispatch has unexpected problem. " + - "topic: {}, queueId: {}, queue offset: {}", request.getTopic(), request.getQueueId(), - request.getConsumeQueueOffset(), throwable); - } finally { - message.release(); - flatFile.getCompositeFlatFileLock().unlock(); - } - } - } - - // prevent consume queue and index file falling too far - private boolean detectFallBehind(CompositeQueueFlatFile flatFile) { - int groupCommitCount = storeConfig.getTieredStoreMaxGroupCommitCount(); - return dispatchRequestWriteMap.getOrDefault(flatFile, Collections.emptyList()).size() > groupCommitCount - || dispatchRequestReadMap.getOrDefault(flatFile, Collections.emptyList()).size() > groupCommitCount; - } - - public void dispatchFlatFileAsync(CompositeQueueFlatFile flatFile) { - this.dispatchFlatFileAsync(flatFile, null); - } - - public void dispatchFlatFileAsync(CompositeQueueFlatFile flatFile, Consumer consumer) { - // Avoid dispatch tasks too much - if (TieredStoreExecutor.dispatchThreadPoolQueue.size() > - TieredStoreExecutor.QUEUE_CAPACITY * 0.75) { - return; - } - TieredStoreExecutor.dispatchExecutor.execute(() -> { - try { - dispatchFlatFile(flatFile); - } catch (Throwable throwable) { - logger.error("[Bug] TieredDispatcher#dispatchFlatFileAsync failed, topic: {}, queueId: {}", - flatFile.getMessageQueue().getTopic(), flatFile.getMessageQueue().getQueueId(), throwable); - } - - if (consumer != null) { - consumer.accept(flatFile.getDispatchOffset()); - } - }); - } - - protected void dispatchFlatFile(CompositeQueueFlatFile flatFile) { - if (stopped) { - return; - } - - if (topicFilter != null && topicFilter.filterTopic(flatFile.getMessageQueue().getTopic())) { - return; - } - - if (flatFile.getDispatchOffset() == -1L) { - return; - } - - if (detectFallBehind(flatFile)) { - return; - } - - MessageQueue mq = flatFile.getMessageQueue(); - String topic = mq.getTopic(); - int queueId = mq.getQueueId(); - - long beforeOffset = flatFile.getDispatchOffset(); - long minOffsetInQueue = defaultStore.getMinOffsetInQueue(topic, queueId); - long maxOffsetInQueue = defaultStore.getMaxOffsetInQueue(topic, queueId); - - // perhaps it was caused by local cq file corruption or ha truncation - if (beforeOffset >= maxOffsetInQueue) { - return; - } - - try { - if (!flatFile.getCompositeFlatFileLock().tryLock(200, TimeUnit.MILLISECONDS)) { - return; - } - } catch (Exception e) { - logger.warn("TieredDispatcher#dispatchFlatFile: can not acquire flatFile lock, " + - "topic: {}, queueId: {}", mq.getTopic(), mq.getQueueId(), e); - if (flatFile.getCompositeFlatFileLock().isLocked()) { - flatFile.getCompositeFlatFileLock().unlock(); - } - return; - } - - try { - long dispatchOffset = flatFile.getDispatchOffset(); - if (dispatchOffset < minOffsetInQueue) { - // If the tiered storage feature is turned off midway, - // it may cause cq discontinuity, resulting in data loss here. - logger.warn("TieredDispatcher#dispatchFlatFile: dispatch offset is too small, " + - "topic: {}, queueId: {}, dispatch offset: {}, local cq offset range {}-{}", - topic, queueId, dispatchOffset, minOffsetInQueue, maxOffsetInQueue); - - // when dispatch offset is smaller than min offset in local cq - // some earliest messages may be lost at this time - tieredFlatFileManager.destroyCompositeFile(flatFile.getMessageQueue()); - CompositeQueueFlatFile newFlatFile = - tieredFlatFileManager.getOrCreateFlatFileIfAbsent(new MessageQueue(topic, brokerName, queueId)); - if (newFlatFile != null) { - newFlatFile.initOffset(maxOffsetInQueue); - } - return; - } - beforeOffset = dispatchOffset; - - // flow control by max count, also we could do flow control based on message size - long maxCount = storeConfig.getTieredStoreGroupCommitCount(); - long upperBound = Math.min(dispatchOffset + maxCount, maxOffsetInQueue); - ConsumeQueue consumeQueue = (ConsumeQueue) defaultStore.getConsumeQueue(topic, queueId); - - logger.debug("DispatchFlatFile race, topic={}, queueId={}, cq range={}-{}, dispatch offset={}-{}", - topic, queueId, minOffsetInQueue, maxOffsetInQueue, dispatchOffset, upperBound - 1); - - for (; dispatchOffset < upperBound; dispatchOffset++) { - // get consume queue - SelectMappedBufferResult cqItem = consumeQueue.getIndexBuffer(dispatchOffset); - if (cqItem == null) { - logger.error("[Bug] TieredDispatcher#dispatchFlatFile: cq item is null, " + - "topic: {}, queueId: {}, dispatch offset: {}, local cq offset range {}-{}", - topic, queueId, dispatchOffset, minOffsetInQueue, maxOffsetInQueue); - return; - } - long commitLogOffset = CQItemBufferUtil.getCommitLogOffset(cqItem.getByteBuffer()); - int size = CQItemBufferUtil.getSize(cqItem.getByteBuffer()); - long tagCode = CQItemBufferUtil.getTagCode(cqItem.getByteBuffer()); - cqItem.release(); - - // get message - SelectMappedBufferResult message = defaultStore.selectOneMessageByOffset(commitLogOffset, size); - if (message == null) { - logger.error("TieredDispatcher#dispatchFlatFile: get message from next store failed, " + - "topic: {}, queueId: {}, commitLog offset: {}, size: {}", - topic, queueId, commitLogOffset, size); - // not dispatch immediately - return; - } - - // append commitlog will increase dispatch offset here - AppendResult result = flatFile.appendCommitLog(message.getByteBuffer(), true); - long newCommitLogOffset = flatFile.getCommitLogMaxOffset() - message.getByteBuffer().remaining(); - doRedispatchRequestToWriteMap( - result, flatFile, dispatchOffset, newCommitLogOffset, size, tagCode, message.getByteBuffer()); - message.release(); - - switch (result) { - case SUCCESS: - continue; - case FILE_CLOSED: - tieredFlatFileManager.destroyCompositeFile(flatFile.getMessageQueue()); - logger.info("File has been closed and destroy, topic: {}, queueId: {}", topic, queueId); - return; - default: - dispatchOffset--; - break; - } - } - - Attributes attributes = TieredStoreMetricsManager.newAttributesBuilder() - .put(TieredStoreMetricsConstant.LABEL_TOPIC, mq.getTopic()) - .put(TieredStoreMetricsConstant.LABEL_QUEUE_ID, mq.getQueueId()) - .put(TieredStoreMetricsConstant.LABEL_FILE_TYPE, FileSegmentType.COMMIT_LOG.name().toLowerCase()) - .build(); - - TieredStoreMetricsManager.messagesDispatchTotal.add(dispatchOffset - beforeOffset, attributes); - } finally { - flatFile.getCompositeFlatFileLock().unlock(); - } - - // If this queue dispatch falls too far, dispatch again immediately - if (flatFile.getDispatchOffset() < maxOffsetInQueue && !flatFile.getCompositeFlatFileLock().isLocked()) { - dispatchFlatFileAsync(flatFile); - } - } - - // Submit cq to write map if append commitlog success - public void doRedispatchRequestToWriteMap(AppendResult result, CompositeQueueFlatFile flatFile, - long queueOffset, long newCommitLogOffset, int size, long tagCode, ByteBuffer message) { - - MessageQueue mq = flatFile.getMessageQueue(); - String topic = mq.getTopic(); - int queueId = mq.getQueueId(); - - switch (result) { - case SUCCESS: - long offset = MessageBufferUtil.getQueueOffset(message); - if (queueOffset != offset) { - logger.warn("Message cq offset in commitlog does not meet expectations, " + - "result={}, topic={}, queueId={}, cq offset={}, msg offset={}", - AppendResult.OFFSET_INCORRECT, topic, queueId, queueOffset, offset); - } - break; - case BUFFER_FULL: - logger.debug("Commitlog buffer full, result={}, topic={}, queueId={}, offset={}", - result, topic, queueId, queueOffset); - return; - default: - logger.info("Commitlog append failed, result={}, topic={}, queueId={}, offset={}", - result, topic, queueId, queueOffset); - return; - } - - dispatchWriteLock.lock(); - try { - Map properties = MessageBufferUtil.getProperties(message); - DispatchRequest dispatchRequest = new DispatchRequest( - topic, - queueId, - newCommitLogOffset, - size, - tagCode, - MessageBufferUtil.getStoreTimeStamp(message), - queueOffset, - properties.getOrDefault(MessageConst.PROPERTY_KEYS, ""), - properties.getOrDefault(MessageConst.PROPERTY_UNIQ_CLIENT_MESSAGE_ID_KEYIDX, ""), - 0, 0, new HashMap<>()); - dispatchRequest.setOffsetId(MessageBufferUtil.getOffsetId(message)); - List requestList = - dispatchRequestWriteMap.computeIfAbsent(flatFile, k -> new ArrayList<>()); - requestList.add(dispatchRequest); - if (requestList.get(0).getConsumeQueueOffset() >= flatFile.getConsumeQueueMaxOffset()) { - wakeup(); - } - } finally { - dispatchWriteLock.unlock(); - } - } - - public void swapDispatchRequestList() { - dispatchWriteLock.lock(); - try { - dispatchRequestReadMap = dispatchRequestWriteMap; - dispatchRequestWriteMap = new ConcurrentHashMap<>(); - } finally { - dispatchWriteLock.unlock(); - } - } - - public void copySurvivorObject() { - if (dispatchRequestReadMap.isEmpty()) { - return; - } - - try { - dispatchWriteLock.lock(); - dispatchRequestReadMap.forEach((flatFile, requestList) -> { - String topic = flatFile.getMessageQueue().getTopic(); - int queueId = flatFile.getMessageQueue().getQueueId(); - if (requestList.isEmpty()) { - logger.warn("Copy survivor object failed, dispatch request list is empty, " + - "topic: {}, queueId: {}", topic, queueId); - return; - } - - List requestListToWrite = - dispatchRequestWriteMap.computeIfAbsent(flatFile, k -> new ArrayList<>()); - - if (!requestListToWrite.isEmpty()) { - long readOffset = requestList.get(requestList.size() - 1).getConsumeQueueOffset(); - long writeOffset = requestListToWrite.get(0).getConsumeQueueOffset(); - if (readOffset > writeOffset) { - logger.warn("Copy survivor object failed, offset in request list are not continuous. " + - "topic: {}, queueId: {}, read offset: {}, write offset: {}", - topic, queueId, readOffset, writeOffset); - - // sort request list according cq offset - requestList.sort(Comparator.comparingLong(DispatchRequest::getConsumeQueueOffset)); - } - } - - requestList.addAll(requestListToWrite); - dispatchRequestWriteMap.put(flatFile, requestList); - }); - dispatchRequestReadMap = new ConcurrentHashMap<>(); - } finally { - dispatchWriteLock.unlock(); - } - } - - protected void buildConsumeQueueAndIndexFile() { - swapDispatchRequestList(); - Map cqMetricsMap = new HashMap<>(); - Map ifMetricsMap = new HashMap<>(); - - for (Map.Entry> entry : dispatchRequestReadMap.entrySet()) { - CompositeQueueFlatFile flatFile = entry.getKey(); - List requestList = entry.getValue(); - if (flatFile.isClosed()) { - requestList.clear(); - } - - MessageQueue messageQueue = flatFile.getMessageQueue(); - Iterator iterator = requestList.iterator(); - while (iterator.hasNext()) { - DispatchRequest request = iterator.next(); - - // remove expired request - if (request.getConsumeQueueOffset() < flatFile.getConsumeQueueMaxOffset()) { - iterator.remove(); - continue; - } - - // wait uploading commitLog - if (flatFile.getCommitLogDispatchCommitOffset() < request.getConsumeQueueOffset()) { - break; - } - - // build consume queue - AppendResult result = flatFile.appendConsumeQueue(request, true); - - // handle build cq result - if (AppendResult.SUCCESS.equals(result)) { - long cqCount = cqMetricsMap.computeIfAbsent(messageQueue, key -> 0L); - cqMetricsMap.put(messageQueue, cqCount + 1); - - // build index - if (storeConfig.isMessageIndexEnable()) { - result = flatFile.appendIndexFile(request); - if (AppendResult.SUCCESS.equals(result)) { - long ifCount = ifMetricsMap.computeIfAbsent(messageQueue, key -> 0L); - ifMetricsMap.put(messageQueue, ifCount + 1); - iterator.remove(); - } else { - logger.warn("Build index failed, skip this message, " + - "result: {}, topic: {}, queue: {}, request offset: {}", - result, request.getTopic(), request.getQueueId(), request.getConsumeQueueOffset()); - } - } - continue; - } - - if (AppendResult.OFFSET_INCORRECT.equals(result)) { - logger.error("Consume queue offset incorrect, try to recreated consume queue, " + - "result: {}, topic: {}, queue: {}, request offset: {}, current cq offset: {}", - result, request.getTopic(), request.getQueueId(), - request.getConsumeQueueOffset(), flatFile.getConsumeQueueMaxOffset()); - - try { - flatFile.getCompositeFlatFileLock().lock(); - - // reset dispatch offset, this operation will cause duplicate message in commitLog - long minOffsetInQueue = - defaultStore.getMinOffsetInQueue(request.getTopic(), request.getQueueId()); - - // when dispatch offset is smaller than min offset in local cq - // some messages may be lost at this time - if (flatFile.getConsumeQueueMaxOffset() < minOffsetInQueue) { - // if we use flatFile.destroy() directly will cause manager reference leak. - tieredFlatFileManager.destroyCompositeFile(flatFile.getMessageQueue()); - logger.warn("Found cq max offset is smaller than local cq min offset, " + - "so destroy tiered flat file to recreated, topic: {}, queueId: {}", - request.getTopic(), request.getQueueId()); - } else { - flatFile.initOffset(flatFile.getConsumeQueueMaxOffset()); - } - - // clean invalid dispatch request - dispatchRequestWriteMap.remove(flatFile); - requestList.clear(); - } finally { - flatFile.getCompositeFlatFileLock().unlock(); - } - break; - } - - // other append result - logger.warn("Append consume queue failed, result: {}, topic: {}, queue: {}, request offset: {}", - result, request.getTopic(), request.getQueueId(), request.getConsumeQueueOffset()); - } - - // remove empty list, prevent send back - if (requestList.isEmpty()) { - dispatchRequestReadMap.remove(flatFile); - } - } - - cqMetricsMap.forEach((messageQueue, count) -> { - Attributes attributes = TieredStoreMetricsManager.newAttributesBuilder() - .put(TieredStoreMetricsConstant.LABEL_TOPIC, messageQueue.getTopic()) - .put(TieredStoreMetricsConstant.LABEL_QUEUE_ID, messageQueue.getQueueId()) - .put(TieredStoreMetricsConstant.LABEL_FILE_TYPE, FileSegmentType.CONSUME_QUEUE.name().toLowerCase()) - .build(); - TieredStoreMetricsManager.messagesDispatchTotal.add(count, attributes); - }); - - ifMetricsMap.forEach((messageQueue, count) -> { - Attributes attributes = TieredStoreMetricsManager.newAttributesBuilder() - .put(TieredStoreMetricsConstant.LABEL_TOPIC, messageQueue.getTopic()) - .put(TieredStoreMetricsConstant.LABEL_QUEUE_ID, messageQueue.getQueueId()) - .put(TieredStoreMetricsConstant.LABEL_FILE_TYPE, FileSegmentType.INDEX.name().toLowerCase()) - .build(); - TieredStoreMetricsManager.messagesDispatchTotal.add(count, attributes); - }); - - copySurvivorObject(); - } - - // Allow work-stealing - public void doDispatchTask() { - try { - dispatchTaskLock.lock(); - buildConsumeQueueAndIndexFile(); - } catch (Exception e) { - logger.error("Tiered storage do dispatch task failed", e); - } finally { - dispatchTaskLock.unlock(); - } - } - - @Override - public String getServiceName() { - return "TieredStoreDispatcherService"; - } - - @Override - public void run() { - while (!stopped) { - waitForRunning(1000); - doDispatchTask(); - } - } -} diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/TieredMessageFetcher.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/TieredMessageFetcher.java deleted file mode 100644 index 7b0c47c592b..00000000000 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/TieredMessageFetcher.java +++ /dev/null @@ -1,585 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.rocketmq.tieredstore; - -import com.github.benmanes.caffeine.cache.Cache; -import com.github.benmanes.caffeine.cache.Caffeine; -import com.github.benmanes.caffeine.cache.Scheduler; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Stopwatch; -import io.opentelemetry.api.common.Attributes; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.TimeUnit; -import org.apache.commons.lang3.tuple.Pair; -import org.apache.rocketmq.common.BoundaryType; -import org.apache.rocketmq.common.message.MessageQueue; -import org.apache.rocketmq.logging.org.slf4j.Logger; -import org.apache.rocketmq.logging.org.slf4j.LoggerFactory; -import org.apache.rocketmq.store.GetMessageResult; -import org.apache.rocketmq.store.GetMessageStatus; -import org.apache.rocketmq.store.MessageFilter; -import org.apache.rocketmq.store.QueryMessageResult; -import org.apache.rocketmq.store.SelectMappedBufferResult; -import org.apache.rocketmq.tieredstore.common.GetMessageResultExt; -import org.apache.rocketmq.tieredstore.common.InFlightRequestFuture; -import org.apache.rocketmq.tieredstore.common.MessageCacheKey; -import org.apache.rocketmq.tieredstore.common.SelectBufferResult; -import org.apache.rocketmq.tieredstore.common.SelectBufferResultWrapper; -import org.apache.rocketmq.tieredstore.common.TieredMessageStoreConfig; -import org.apache.rocketmq.tieredstore.common.TieredStoreExecutor; -import org.apache.rocketmq.tieredstore.exception.TieredStoreException; -import org.apache.rocketmq.tieredstore.file.CompositeFlatFile; -import org.apache.rocketmq.tieredstore.file.CompositeQueueFlatFile; -import org.apache.rocketmq.tieredstore.file.TieredConsumeQueue; -import org.apache.rocketmq.tieredstore.file.TieredFlatFileManager; -import org.apache.rocketmq.tieredstore.index.IndexItem; -import org.apache.rocketmq.tieredstore.index.IndexService; -import org.apache.rocketmq.tieredstore.metadata.TieredMetadataStore; -import org.apache.rocketmq.tieredstore.metadata.TopicMetadata; -import org.apache.rocketmq.tieredstore.metrics.TieredStoreMetricsConstant; -import org.apache.rocketmq.tieredstore.metrics.TieredStoreMetricsManager; -import org.apache.rocketmq.tieredstore.util.CQItemBufferUtil; -import org.apache.rocketmq.tieredstore.util.MessageBufferUtil; -import org.apache.rocketmq.tieredstore.util.TieredStoreUtil; - -public class TieredMessageFetcher implements MessageStoreFetcher { - - private static final Logger LOGGER = LoggerFactory.getLogger(TieredStoreUtil.TIERED_STORE_LOGGER_NAME); - - private final String brokerName; - private final TieredMetadataStore metadataStore; - private final TieredMessageStoreConfig storeConfig; - private final TieredFlatFileManager flatFileManager; - private final Cache readAheadCache; - - public TieredMessageFetcher(TieredMessageStoreConfig storeConfig) { - this.storeConfig = storeConfig; - this.brokerName = storeConfig.getBrokerName(); - this.metadataStore = TieredStoreUtil.getMetadataStore(storeConfig); - this.flatFileManager = TieredFlatFileManager.getInstance(storeConfig); - this.readAheadCache = this.initCache(storeConfig); - } - - private Cache initCache(TieredMessageStoreConfig storeConfig) { - long memoryMaxSize = - (long) (Runtime.getRuntime().maxMemory() * storeConfig.getReadAheadCacheSizeThresholdRate()); - - return Caffeine.newBuilder() - .scheduler(Scheduler.systemScheduler()) - .expireAfterWrite(storeConfig.getReadAheadCacheExpireDuration(), TimeUnit.MILLISECONDS) - .maximumWeight(memoryMaxSize) - // Using the buffer size of messages to calculate memory usage - .weigher((MessageCacheKey key, SelectBufferResultWrapper msg) -> msg.getBufferSize()) - .recordStats() - .build(); - } - - @VisibleForTesting - public Cache getMessageCache() { - return readAheadCache; - } - - protected void putMessageToCache(CompositeFlatFile flatFile, SelectBufferResultWrapper result) { - readAheadCache.put(new MessageCacheKey(flatFile, result.getOffset()), result); - } - - protected SelectBufferResultWrapper getMessageFromCache(CompositeFlatFile flatFile, long offset) { - return readAheadCache.getIfPresent(new MessageCacheKey(flatFile, offset)); - } - - protected void recordCacheAccess(CompositeFlatFile flatFile, - String group, long offset, List resultWrapperList) { - if (!resultWrapperList.isEmpty()) { - offset = resultWrapperList.get(resultWrapperList.size() - 1).getOffset(); - } - flatFile.recordGroupAccess(group, offset); - resultWrapperList.forEach(wrapper -> { - if (wrapper.incrementAndGet() >= flatFile.getActiveGroupCount()) { - readAheadCache.invalidate(new MessageCacheKey(flatFile, wrapper.getOffset())); - } - }); - } - - private void prefetchMessage(CompositeQueueFlatFile flatFile, String group, int maxCount, long nextBeginOffset) { - if (maxCount == 1 || flatFile.getReadAheadFactor() == 1) { - return; - } - - // make sure there is only one request per group and request range - int prefetchBatchSize = Math.min(maxCount * flatFile.getReadAheadFactor(), storeConfig.getReadAheadMessageCountThreshold()); - InFlightRequestFuture inflightRequest = flatFile.getInflightRequest(group, nextBeginOffset, prefetchBatchSize); - if (!inflightRequest.isAllDone()) { - return; - } - - synchronized (flatFile) { - inflightRequest = flatFile.getInflightRequest(nextBeginOffset, maxCount); - if (!inflightRequest.isAllDone()) { - return; - } - - long maxOffsetOfLastRequest = inflightRequest.getLastFuture().join(); - boolean lastRequestIsExpired = getMessageFromCache(flatFile, nextBeginOffset) == null; - - if (lastRequestIsExpired || - maxOffsetOfLastRequest != -1L && nextBeginOffset >= inflightRequest.getStartOffset()) { - - long queueOffset; - if (lastRequestIsExpired) { - queueOffset = nextBeginOffset; - flatFile.decreaseReadAheadFactor(); - } else { - queueOffset = maxOffsetOfLastRequest + 1; - flatFile.increaseReadAheadFactor(); - } - - int factor = Math.min(flatFile.getReadAheadFactor(), storeConfig.getReadAheadMessageCountThreshold() / maxCount); - int flag = 0; - int concurrency = 1; - if (factor > storeConfig.getReadAheadBatchSizeFactorThreshold()) { - flag = factor % storeConfig.getReadAheadBatchSizeFactorThreshold() == 0 ? 0 : 1; - concurrency = factor / storeConfig.getReadAheadBatchSizeFactorThreshold() + flag; - } - int requestBatchSize = maxCount * Math.min(factor, storeConfig.getReadAheadBatchSizeFactorThreshold()); - - List>> futureList = new ArrayList<>(); - long nextQueueOffset = queueOffset; - if (flag == 1) { - int firstBatchSize = factor % storeConfig.getReadAheadBatchSizeFactorThreshold() * maxCount; - CompletableFuture future = prefetchMessageThenPutToCache(flatFile, nextQueueOffset, firstBatchSize); - futureList.add(Pair.of(firstBatchSize, future)); - nextQueueOffset += firstBatchSize; - } - for (long i = 0; i < concurrency - flag; i++) { - CompletableFuture future = prefetchMessageThenPutToCache(flatFile, nextQueueOffset + i * requestBatchSize, requestBatchSize); - futureList.add(Pair.of(requestBatchSize, future)); - } - flatFile.putInflightRequest(group, queueOffset, maxCount * factor, futureList); - LOGGER.debug("TieredMessageFetcher#preFetchMessage: try to prefetch messages for later requests: next begin offset: {}, request offset: {}, factor: {}, flag: {}, request batch: {}, concurrency: {}", - nextBeginOffset, queueOffset, factor, flag, requestBatchSize, concurrency); - } - } - } - - private CompletableFuture prefetchMessageThenPutToCache( - CompositeQueueFlatFile flatFile, long queueOffset, int batchSize) { - - MessageQueue mq = flatFile.getMessageQueue(); - return getMessageFromTieredStoreAsync(flatFile, queueOffset, batchSize) - .thenApply(result -> { - if (result.getStatus() == GetMessageStatus.OFFSET_OVERFLOW_ONE || - result.getStatus() == GetMessageStatus.OFFSET_OVERFLOW_BADLY) { - return -1L; - } - if (result.getStatus() != GetMessageStatus.FOUND) { - LOGGER.warn("MessageFetcher prefetch message then put to cache failed, result: {}, " + - "topic: {}, queue: {}, queue offset: {}, batch size: {}", - result.getStatus(), mq.getTopic(), mq.getQueueId(), queueOffset, batchSize); - return -1L; - } - try { - List offsetList = result.getMessageQueueOffset(); - List tagCodeList = result.getTagCodeList(); - List msgList = result.getMessageMapedList(); - for (int i = 0; i < offsetList.size(); i++) { - SelectMappedBufferResult msg = msgList.get(i); - SelectBufferResultWrapper bufferResult = new SelectBufferResultWrapper( - msg, offsetList.get(i), tagCodeList.get(i), false); - this.putMessageToCache(flatFile, bufferResult); - } - return offsetList.get(offsetList.size() - 1); - } catch (Exception e) { - LOGGER.error("MessageFetcher prefetch message then put to cache failed, " + - "topic: {}, queue: {}, queue offset: {}, batch size: {}", - mq.getTopic(), mq.getQueueId(), queueOffset, batchSize, e); - } - return -1L; - }); - } - - public CompletableFuture getMessageFromCacheAsync(CompositeQueueFlatFile flatFile, - String group, long queueOffset, int maxCount, boolean waitInflightRequest) { - - MessageQueue mq = flatFile.getMessageQueue(); - - long lastGetOffset = queueOffset - 1; - List resultWrapperList = new ArrayList<>(maxCount); - for (int i = 0; i < maxCount; i++) { - lastGetOffset++; - SelectBufferResultWrapper wrapper = getMessageFromCache(flatFile, lastGetOffset); - if (wrapper == null) { - lastGetOffset--; - break; - } - resultWrapperList.add(wrapper); - } - - // only record cache access count once - if (waitInflightRequest) { - Attributes attributes = TieredStoreMetricsManager.newAttributesBuilder() - .put(TieredStoreMetricsConstant.LABEL_TOPIC, mq.getTopic()) - .put(TieredStoreMetricsConstant.LABEL_GROUP, group) - .build(); - TieredStoreMetricsManager.cacheAccess.add(maxCount, attributes); - TieredStoreMetricsManager.cacheHit.add(resultWrapperList.size(), attributes); - } - - // If there are no messages in the cache and there are currently requests being pulled. - // We need to wait for the request to return before continuing. - if (resultWrapperList.isEmpty() && waitInflightRequest) { - CompletableFuture future = - flatFile.getInflightRequest(group, queueOffset, maxCount).getFuture(queueOffset); - if (!future.isDone()) { - Stopwatch stopwatch = Stopwatch.createStarted(); - // to prevent starvation issues, only allow waiting for processing request once - return future.thenComposeAsync(v -> { - LOGGER.debug("MessageFetcher#getMessageFromCacheAsync: wait for response cost: {}ms", - stopwatch.elapsed(TimeUnit.MILLISECONDS)); - return getMessageFromCacheAsync(flatFile, group, queueOffset, maxCount, false); - }, TieredStoreExecutor.fetchDataExecutor); - } - } - - // try to get message from cache again when prefetch request is done - for (int i = 0; i < maxCount - resultWrapperList.size(); i++) { - lastGetOffset++; - SelectBufferResultWrapper wrapper = getMessageFromCache(flatFile, lastGetOffset); - if (wrapper == null) { - lastGetOffset--; - break; - } - resultWrapperList.add(wrapper); - } - - recordCacheAccess(flatFile, group, queueOffset, resultWrapperList); - - if (resultWrapperList.isEmpty()) { - // If cache miss, pull messages immediately - LOGGER.info("MessageFetcher cache miss, group: {}, topic: {}, queueId: {}, offset: {}, maxCount: {}", - group, mq.getTopic(), mq.getQueueId(), queueOffset, maxCount); - } else { - // If cache hit, return buffer result immediately and asynchronously prefetch messages - LOGGER.debug("MessageFetcher cache hit, group: {}, topic: {}, queueId: {}, offset: {}, maxCount: {}, resultSize: {}", - group, mq.getTopic(), mq.getQueueId(), queueOffset, maxCount, resultWrapperList.size()); - - GetMessageResultExt result = new GetMessageResultExt(); - result.setStatus(GetMessageStatus.FOUND); - result.setMinOffset(flatFile.getConsumeQueueMinOffset()); - result.setMaxOffset(flatFile.getConsumeQueueCommitOffset()); - result.setNextBeginOffset(queueOffset + resultWrapperList.size()); - resultWrapperList.forEach(wrapper -> result.addMessageExt( - wrapper.getDuplicateResult(), wrapper.getOffset(), wrapper.getTagCode())); - - if (lastGetOffset < result.getMaxOffset()) { - this.prefetchMessage(flatFile, group, maxCount, lastGetOffset + 1); - } - return CompletableFuture.completedFuture(result); - } - - CompletableFuture resultFuture; - synchronized (flatFile) { - int batchSize = maxCount * storeConfig.getReadAheadMinFactor(); - resultFuture = getMessageFromTieredStoreAsync(flatFile, queueOffset, batchSize) - .thenApply(result -> { - if (result.getStatus() != GetMessageStatus.FOUND) { - return result; - } - - GetMessageResultExt newResult = new GetMessageResultExt(); - List offsetList = result.getMessageQueueOffset(); - List tagCodeList = result.getTagCodeList(); - List msgList = result.getMessageMapedList(); - - for (int i = 0; i < offsetList.size(); i++) { - SelectMappedBufferResult msg = msgList.get(i); - SelectBufferResultWrapper bufferResult = new SelectBufferResultWrapper( - msg, offsetList.get(i), tagCodeList.get(i), true); - this.putMessageToCache(flatFile, bufferResult); - if (newResult.getMessageMapedList().size() < maxCount) { - newResult.addMessageExt(msg, offsetList.get(i), tagCodeList.get(i)); - } - } - - newResult.setStatus(GetMessageStatus.FOUND); - newResult.setMinOffset(flatFile.getConsumeQueueMinOffset()); - newResult.setMaxOffset(flatFile.getConsumeQueueCommitOffset()); - newResult.setNextBeginOffset(queueOffset + newResult.getMessageMapedList().size()); - return newResult; - }); - - List>> futureList = new ArrayList<>(); - CompletableFuture inflightRequestFuture = resultFuture.thenApply(result -> - result.getStatus() == GetMessageStatus.FOUND ? - result.getMessageQueueOffset().get(result.getMessageQueueOffset().size() - 1) : -1L); - futureList.add(Pair.of(batchSize, inflightRequestFuture)); - flatFile.putInflightRequest(group, queueOffset, batchSize, futureList); - } - return resultFuture; - } - - public CompletableFuture getMessageFromTieredStoreAsync( - CompositeQueueFlatFile flatFile, long queueOffset, int batchSize) { - - GetMessageResultExt result = new GetMessageResultExt(); - result.setMinOffset(flatFile.getConsumeQueueMinOffset()); - result.setMaxOffset(flatFile.getConsumeQueueCommitOffset()); - - if (queueOffset < result.getMaxOffset()) { - batchSize = Math.min(batchSize, (int) Math.min(result.getMaxOffset() - queueOffset, Integer.MAX_VALUE)); - } else if (queueOffset == result.getMaxOffset()) { - result.setStatus(GetMessageStatus.OFFSET_OVERFLOW_ONE); - result.setNextBeginOffset(queueOffset); - return CompletableFuture.completedFuture(result); - } else if (queueOffset > result.getMaxOffset()) { - result.setStatus(GetMessageStatus.OFFSET_OVERFLOW_BADLY); - result.setNextBeginOffset(result.getMaxOffset()); - return CompletableFuture.completedFuture(result); - } - - LOGGER.info("MessageFetcher#getMessageFromTieredStoreAsync, " + - "topic: {}, queueId: {}, broker offset: {}-{}, offset: {}, expect: {}", - flatFile.getMessageQueue().getTopic(), flatFile.getMessageQueue().getQueueId(), - result.getMinOffset(), result.getMaxOffset(), queueOffset, batchSize); - - CompletableFuture readConsumeQueueFuture; - try { - readConsumeQueueFuture = flatFile.getConsumeQueueAsync(queueOffset, batchSize); - } catch (TieredStoreException e) { - switch (e.getErrorCode()) { - case NO_NEW_DATA: - result.setStatus(GetMessageStatus.OFFSET_OVERFLOW_ONE); - result.setNextBeginOffset(queueOffset); - return CompletableFuture.completedFuture(result); - case ILLEGAL_PARAM: - case ILLEGAL_OFFSET: - default: - result.setStatus(GetMessageStatus.OFFSET_FOUND_NULL); - result.setNextBeginOffset(queueOffset); - return CompletableFuture.completedFuture(result); - } - } - - CompletableFuture readCommitLogFuture = readConsumeQueueFuture.thenCompose(cqBuffer -> { - long firstCommitLogOffset = CQItemBufferUtil.getCommitLogOffset(cqBuffer); - cqBuffer.position(cqBuffer.remaining() - TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE); - long lastCommitLogOffset = CQItemBufferUtil.getCommitLogOffset(cqBuffer); - if (lastCommitLogOffset < firstCommitLogOffset) { - LOGGER.error("MessageFetcher#getMessageFromTieredStoreAsync, " + - "last offset is smaller than first offset, " + - "topic: {} queueId: {}, offset: {}, firstOffset: {}, lastOffset: {}", - flatFile.getMessageQueue().getTopic(), flatFile.getMessageQueue().getQueueId(), queueOffset, - firstCommitLogOffset, lastCommitLogOffset); - return CompletableFuture.completedFuture(ByteBuffer.allocate(0)); - } - - // Get the total size of the data by reducing the length limit of cq to prevent OOM - long length = lastCommitLogOffset - firstCommitLogOffset + CQItemBufferUtil.getSize(cqBuffer); - while (cqBuffer.limit() > TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE && - length > storeConfig.getReadAheadMessageSizeThreshold()) { - cqBuffer.limit(cqBuffer.position()); - cqBuffer.position(cqBuffer.limit() - TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE); - length = CQItemBufferUtil.getCommitLogOffset(cqBuffer) - - firstCommitLogOffset + CQItemBufferUtil.getSize(cqBuffer); - } - - return flatFile.getCommitLogAsync(firstCommitLogOffset, (int) length); - }); - - int finalBatchSize = batchSize; - return readConsumeQueueFuture.thenCombine(readCommitLogFuture, (cqBuffer, msgBuffer) -> { - List bufferList = MessageBufferUtil.splitMessageBuffer(cqBuffer, msgBuffer); - int requestSize = cqBuffer.remaining() / TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE; - if (bufferList.isEmpty()) { - result.setStatus(GetMessageStatus.NO_MATCHED_MESSAGE); - result.setNextBeginOffset(queueOffset + requestSize); - } else { - result.setStatus(GetMessageStatus.FOUND); - result.setNextBeginOffset(queueOffset + requestSize); - - for (SelectBufferResult bufferResult : bufferList) { - ByteBuffer slice = bufferResult.getByteBuffer().slice(); - slice.limit(bufferResult.getSize()); - SelectMappedBufferResult msg = new SelectMappedBufferResult(bufferResult.getStartOffset(), - bufferResult.getByteBuffer(), bufferResult.getSize(), null); - result.addMessageExt(msg, MessageBufferUtil.getQueueOffset(slice), bufferResult.getTagCode()); - } - } - return result; - }).exceptionally(e -> { - MessageQueue mq = flatFile.getMessageQueue(); - LOGGER.warn("MessageFetcher#getMessageFromTieredStoreAsync failed, " + - "topic: {} queueId: {}, offset: {}, batchSize: {}", mq.getTopic(), mq.getQueueId(), queueOffset, finalBatchSize, e); - result.setStatus(GetMessageStatus.OFFSET_FOUND_NULL); - result.setNextBeginOffset(queueOffset); - return result; - }); - } - - @Override - public CompletableFuture getMessageAsync( - String group, String topic, int queueId, long queueOffset, int maxCount, final MessageFilter messageFilter) { - - GetMessageResult result = new GetMessageResult(); - CompositeQueueFlatFile flatFile = flatFileManager.getFlatFile(new MessageQueue(topic, brokerName, queueId)); - - if (flatFile == null) { - result.setNextBeginOffset(queueOffset); - result.setStatus(GetMessageStatus.NO_MATCHED_LOGIC_QUEUE); - return CompletableFuture.completedFuture(result); - } - - // Max queue offset means next message put position - result.setMinOffset(flatFile.getConsumeQueueMinOffset()); - result.setMaxOffset(flatFile.getConsumeQueueCommitOffset()); - - // Fill result according file offset. - // Offset range | Result | Fix to - // (-oo, 0] | no message | current offset - // (0, min) | too small | min offset - // [min, max) | correct | - // [max, max] | overflow one | max offset - // (max, +oo) | overflow badly | max offset - - if (result.getMaxOffset() <= 0) { - result.setStatus(GetMessageStatus.NO_MESSAGE_IN_QUEUE); - result.setNextBeginOffset(queueOffset); - return CompletableFuture.completedFuture(result); - } else if (queueOffset < result.getMinOffset()) { - result.setStatus(GetMessageStatus.OFFSET_TOO_SMALL); - result.setNextBeginOffset(result.getMinOffset()); - return CompletableFuture.completedFuture(result); - } else if (queueOffset == result.getMaxOffset()) { - result.setStatus(GetMessageStatus.OFFSET_OVERFLOW_ONE); - result.setNextBeginOffset(result.getMaxOffset()); - return CompletableFuture.completedFuture(result); - } else if (queueOffset > result.getMaxOffset()) { - result.setStatus(GetMessageStatus.OFFSET_OVERFLOW_BADLY); - result.setNextBeginOffset(result.getMaxOffset()); - return CompletableFuture.completedFuture(result); - } - - return getMessageFromCacheAsync(flatFile, group, queueOffset, maxCount, true) - .thenApply(messageResultExt -> messageResultExt.doFilterMessage(messageFilter)); - } - - @Override - public CompletableFuture getEarliestMessageTimeAsync(String topic, int queueId) { - CompositeFlatFile flatFile = flatFileManager.getFlatFile(new MessageQueue(topic, brokerName, queueId)); - if (flatFile == null) { - return CompletableFuture.completedFuture(-1L); - } - - // read from timestamp to timestamp + length - int length = MessageBufferUtil.STORE_TIMESTAMP_POSITION + 8; - return flatFile.getCommitLogAsync(flatFile.getCommitLogMinOffset(), length) - .thenApply(MessageBufferUtil::getStoreTimeStamp); - } - - @Override - public CompletableFuture getMessageStoreTimeStampAsync(String topic, int queueId, long queueOffset) { - CompositeFlatFile flatFile = flatFileManager.getFlatFile(new MessageQueue(topic, brokerName, queueId)); - if (flatFile == null) { - return CompletableFuture.completedFuture(-1L); - } - - return flatFile.getConsumeQueueAsync(queueOffset) - .thenComposeAsync(cqItem -> { - long commitLogOffset = CQItemBufferUtil.getCommitLogOffset(cqItem); - int size = CQItemBufferUtil.getSize(cqItem); - return flatFile.getCommitLogAsync(commitLogOffset, size); - }, TieredStoreExecutor.fetchDataExecutor) - .thenApply(MessageBufferUtil::getStoreTimeStamp) - .exceptionally(e -> { - LOGGER.error("TieredMessageFetcher#getMessageStoreTimeStampAsync: " + - "get or decode message failed: topic: {}, queue: {}, offset: {}", topic, queueId, queueOffset, e); - return -1L; - }); - } - - @Override - public long getOffsetInQueueByTime(String topic, int queueId, long timestamp, BoundaryType type) { - CompositeFlatFile flatFile = flatFileManager.getFlatFile(new MessageQueue(topic, brokerName, queueId)); - if (flatFile == null) { - return -1L; - } - - try { - return flatFile.getOffsetInConsumeQueueByTime(timestamp, type); - } catch (Exception e) { - LOGGER.error("TieredMessageFetcher#getOffsetInQueueByTime: " + - "get offset in queue by time failed: topic: {}, queue: {}, timestamp: {}, type: {}", - topic, queueId, timestamp, type, e); - } - return -1L; - } - - @Override - public CompletableFuture queryMessageAsync( - String topic, String key, int maxCount, long begin, long end) { - - IndexService indexStoreService = TieredFlatFileManager.getTieredIndexService(storeConfig); - - long topicId; - try { - TopicMetadata topicMetadata = metadataStore.getTopic(topic); - if (topicMetadata == null) { - LOGGER.info("MessageFetcher#queryMessageAsync, topic metadata not found, topic: {}", topic); - return CompletableFuture.completedFuture(new QueryMessageResult()); - } - topicId = topicMetadata.getTopicId(); - } catch (Exception e) { - LOGGER.error("MessageFetcher#queryMessageAsync, get topic id failed, topic: {}", topic, e); - return CompletableFuture.completedFuture(new QueryMessageResult()); - } - - CompletableFuture> future = indexStoreService.queryAsync(topic, key, maxCount, begin, end); - - return future.thenCompose(indexItemList -> { - QueryMessageResult result = new QueryMessageResult(); - List> futureList = new ArrayList<>(maxCount); - for (IndexItem indexItem : indexItemList) { - if (topicId != indexItem.getTopicId()) { - continue; - } - CompositeFlatFile flatFile = - flatFileManager.getFlatFile(new MessageQueue(topic, brokerName, indexItem.getQueueId())); - if (flatFile == null) { - continue; - } - CompletableFuture getMessageFuture = flatFile - .getCommitLogAsync(indexItem.getOffset(), indexItem.getSize()) - .thenAccept(messageBuffer -> result.addMessage( - new SelectMappedBufferResult( - indexItem.getOffset(), messageBuffer, indexItem.getSize(), null))); - futureList.add(getMessageFuture); - if (futureList.size() >= maxCount) { - break; - } - } - return CompletableFuture.allOf(futureList.toArray(new CompletableFuture[0])).thenApply(v -> result); - }).whenComplete((result, throwable) -> { - if (result != null) { - LOGGER.info("MessageFetcher#queryMessageAsync, " + - "query result: {}, topic: {}, topicId: {}, key: {}, maxCount: {}, timestamp: {}-{}", - result.getMessageBufferList().size(), topic, topicId, key, maxCount, begin, end); - } - }); - } -} diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/TieredMessageStore.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/TieredMessageStore.java index 015c27efae1..99d586ae236 100644 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/TieredMessageStore.java +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/TieredMessageStore.java @@ -16,109 +16,158 @@ */ package org.apache.rocketmq.tieredstore; +import com.google.common.base.Stopwatch; +import com.google.common.collect.Sets; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.common.AttributesBuilder; +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.sdk.metrics.InstrumentSelector; +import io.opentelemetry.sdk.metrics.ViewBuilder; +import java.lang.reflect.Constructor; import java.util.List; import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; - -import com.google.common.base.Stopwatch; - -import org.apache.commons.lang3.StringUtils; import org.apache.rocketmq.common.BoundaryType; import org.apache.rocketmq.common.MixAll; import org.apache.rocketmq.common.Pair; -import org.apache.rocketmq.common.PopAckConstants; -import org.apache.rocketmq.common.message.MessageExtBrokerInner; import org.apache.rocketmq.common.message.MessageQueue; import org.apache.rocketmq.common.topic.TopicValidator; -import org.apache.rocketmq.logging.org.slf4j.Logger; -import org.apache.rocketmq.logging.org.slf4j.LoggerFactory; import org.apache.rocketmq.store.GetMessageResult; import org.apache.rocketmq.store.GetMessageStatus; import org.apache.rocketmq.store.MessageFilter; import org.apache.rocketmq.store.MessageStore; -import org.apache.rocketmq.store.PutMessageResult; import org.apache.rocketmq.store.QueryMessageResult; import org.apache.rocketmq.store.SelectMappedBufferResult; import org.apache.rocketmq.store.plugin.AbstractPluginMessageStore; import org.apache.rocketmq.store.plugin.MessageStorePluginContext; -import org.apache.rocketmq.tieredstore.common.TieredMessageStoreConfig; -import org.apache.rocketmq.tieredstore.common.TieredStoreExecutor; -import org.apache.rocketmq.tieredstore.file.CompositeFlatFile; -import org.apache.rocketmq.tieredstore.file.TieredFlatFileManager; -import org.apache.rocketmq.tieredstore.metadata.TieredMetadataStore; +import org.apache.rocketmq.tieredstore.core.MessageStoreDispatcher; +import org.apache.rocketmq.tieredstore.core.MessageStoreDispatcherImpl; +import org.apache.rocketmq.tieredstore.core.MessageStoreFetcher; +import org.apache.rocketmq.tieredstore.core.MessageStoreFetcherImpl; +import org.apache.rocketmq.tieredstore.core.MessageStoreFilter; +import org.apache.rocketmq.tieredstore.core.MessageStoreTopicFilter; +import org.apache.rocketmq.tieredstore.file.FlatFileStore; +import org.apache.rocketmq.tieredstore.file.FlatMessageFile; +import org.apache.rocketmq.tieredstore.index.IndexService; +import org.apache.rocketmq.tieredstore.index.IndexStoreService; +import org.apache.rocketmq.tieredstore.metadata.MetadataStore; import org.apache.rocketmq.tieredstore.metrics.TieredStoreMetricsConstant; import org.apache.rocketmq.tieredstore.metrics.TieredStoreMetricsManager; -import org.apache.rocketmq.tieredstore.util.TieredStoreUtil; - -import io.opentelemetry.api.common.Attributes; -import io.opentelemetry.api.common.AttributesBuilder; -import io.opentelemetry.api.metrics.Meter; -import io.opentelemetry.sdk.metrics.InstrumentSelector; -import io.opentelemetry.sdk.metrics.ViewBuilder; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TieredMessageStore extends AbstractPluginMessageStore { - protected static final Logger logger = LoggerFactory.getLogger(TieredStoreUtil.TIERED_STORE_LOGGER_NAME); + protected static final Logger log = LoggerFactory.getLogger(MessageStoreUtil.TIERED_STORE_LOGGER_NAME); protected final String brokerName; - protected final TieredMessageStoreConfig storeConfig; - protected final TieredMetadataStore metadataStore; - - protected final TieredDispatcher dispatcher; - protected final TieredMessageFetcher fetcher; - protected final TieredFlatFileManager flatFileManager; + protected final MessageStore defaultStore; + protected final MessageStoreConfig storeConfig; + protected final MessageStorePluginContext context; + + protected final MetadataStore metadataStore; + protected final MessageStoreExecutor storeExecutor; + protected final IndexService indexService; + protected final FlatFileStore flatFileStore; + protected final MessageStoreFilter topicFilter; + protected final MessageStoreFetcher fetcher; + protected final MessageStoreDispatcher dispatcher; public TieredMessageStore(MessageStorePluginContext context, MessageStore next) { super(context, next); - this.storeConfig = new TieredMessageStoreConfig(); - context.registerConfiguration(storeConfig); - this.brokerName = storeConfig.getBrokerName(); - TieredStoreUtil.addSystemTopic(storeConfig.getBrokerClusterName()); - TieredStoreUtil.addSystemTopic(brokerName); - - TieredStoreExecutor.init(); - this.metadataStore = TieredStoreUtil.getMetadataStore(storeConfig); - this.fetcher = new TieredMessageFetcher(storeConfig); - this.dispatcher = new TieredDispatcher(next, storeConfig); - - this.flatFileManager = TieredFlatFileManager.getInstance(storeConfig); + + this.storeConfig = new MessageStoreConfig(); + this.context = context; + this.context.registerConfiguration(this.storeConfig); + this.brokerName = this.storeConfig.getBrokerName(); + this.defaultStore = next; + + this.metadataStore = this.getMetadataStore(this.storeConfig); + this.topicFilter = new MessageStoreTopicFilter(this.storeConfig); + this.storeExecutor = new MessageStoreExecutor(); + this.flatFileStore = new FlatFileStore(this.storeConfig, this.metadataStore, this.storeExecutor); + this.indexService = new IndexStoreService(this.flatFileStore.getFlatFileFactory(), + MessageStoreUtil.getIndexFilePath(this.storeConfig.getBrokerName())); + this.fetcher = new MessageStoreFetcherImpl(this); + this.dispatcher = new MessageStoreDispatcherImpl(this); next.addDispatcher(dispatcher); } @Override public boolean load() { - boolean loadFlatFile = flatFileManager.load(); + boolean loadFlatFile = flatFileStore.load(); boolean loadNextStore = next.load(); boolean result = loadFlatFile && loadNextStore; if (result) { - dispatcher.initScheduleTask(); + indexService.start(); dispatcher.start(); } return result; } - public TieredMessageStoreConfig getStoreConfig() { + public String getBrokerName() { + return brokerName; + } + + public MessageStoreConfig getStoreConfig() { return storeConfig; } + public MessageStore getDefaultStore() { + return defaultStore; + } + + private MetadataStore getMetadataStore(MessageStoreConfig storeConfig) { + try { + Class clazz = + Class.forName(storeConfig.getTieredMetadataServiceProvider()).asSubclass(MetadataStore.class); + Constructor constructor = clazz.getConstructor(MessageStoreConfig.class); + return constructor.newInstance(storeConfig); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public MetadataStore getMetadataStore() { + return metadataStore; + } + + public MessageStoreFilter getTopicFilter() { + return topicFilter; + } + + public MessageStoreExecutor getStoreExecutor() { + return storeExecutor; + } + + public FlatFileStore getFlatFileStore() { + return flatFileStore; + } + + public IndexService getIndexService() { + return indexService; + } + public boolean fetchFromCurrentStore(String topic, int queueId, long offset) { return fetchFromCurrentStore(topic, queueId, offset, 1); } + @SuppressWarnings("all") public boolean fetchFromCurrentStore(String topic, int queueId, long offset, int batchSize) { - TieredMessageStoreConfig.TieredStorageLevel deepStorageLevel = storeConfig.getTieredStorageLevel(); + MessageStoreConfig.TieredStorageLevel storageLevel = storeConfig.getTieredStorageLevel(); - if (deepStorageLevel.check(TieredMessageStoreConfig.TieredStorageLevel.FORCE)) { + if (storageLevel.check(MessageStoreConfig.TieredStorageLevel.FORCE)) { return true; } - if (!deepStorageLevel.isEnable()) { + if (!storageLevel.isEnable()) { return false; } - CompositeFlatFile flatFile = flatFileManager.getFlatFile(new MessageQueue(topic, brokerName, queueId)); + FlatMessageFile flatFile = flatFileStore.getFlatFile(new MessageQueue(topic, brokerName, queueId)); if (flatFile == null) { return false; } @@ -128,12 +177,12 @@ public boolean fetchFromCurrentStore(String topic, int queueId, long offset, int } // determine whether tiered storage path conditions are met - if (deepStorageLevel.check(TieredMessageStoreConfig.TieredStorageLevel.NOT_IN_DISK) + if (storageLevel.check(MessageStoreConfig.TieredStorageLevel.NOT_IN_DISK) && !next.checkInStoreByConsumeOffset(topic, queueId, offset)) { return true; } - if (deepStorageLevel.check(TieredMessageStoreConfig.TieredStorageLevel.NOT_IN_MEM) + if (storageLevel.check(MessageStoreConfig.TieredStorageLevel.NOT_IN_MEM) && !next.checkInMemByConsumeOffset(topic, queueId, offset, batchSize)) { return true; } @@ -150,15 +199,17 @@ public GetMessageResult getMessage(String group, String topic, int queueId, long public CompletableFuture getMessageAsync(String group, String topic, int queueId, long offset, int maxMsgNums, MessageFilter messageFilter) { - // For system topic, force reading from local store - if (TieredStoreUtil.isSystemTopic(topic) || PopAckConstants.isStartWithRevivePrefix(topic)) { + // for system topic, force reading from local store + if (topicFilter.filterTopic(topic)) { return next.getMessageAsync(group, topic, queueId, offset, maxMsgNums, messageFilter); } if (fetchFromCurrentStore(topic, queueId, offset, maxMsgNums)) { - logger.trace("GetMessageAsync from current store, topic: {}, queue: {}, offset: {}", topic, queueId, offset); + log.trace("GetMessageAsync from current store, " + + "topic: {}, queue: {}, offset: {}, maxCount: {}", topic, queueId, offset, maxMsgNums); } else { - logger.trace("GetMessageAsync from next store, topic: {}, queue: {}, offset: {}", topic, queueId, offset); + log.trace("GetMessageAsync from remote store, " + + "topic: {}, queue: {}, offset: {}, maxCount: {}", topic, queueId, offset, maxMsgNums); return next.getMessageAsync(group, topic, queueId, offset, maxMsgNums, messageFilter); } @@ -179,7 +230,7 @@ public CompletableFuture getMessageAsync(String group, String if (next.checkInStoreByConsumeOffset(topic, queueId, offset)) { TieredStoreMetricsManager.fallbackTotal.add(1, latencyAttributes); - logger.debug("GetMessageAsync not found, then back to next store, result: {}, " + + log.debug("GetMessageAsync not found, then back to next store, result: {}, " + "topic: {}, queue: {}, queue offset: {}, offset range: {}-{}", result.getStatus(), topic, queueId, offset, result.getMinOffset(), result.getMaxOffset()); return next.getMessage(group, topic, queueId, offset, maxMsgNums, messageFilter); @@ -187,10 +238,12 @@ public CompletableFuture getMessageAsync(String group, String } if (result.getStatus() != GetMessageStatus.FOUND && + result.getStatus() != GetMessageStatus.NO_MESSAGE_IN_QUEUE && result.getStatus() != GetMessageStatus.NO_MATCHED_LOGIC_QUEUE && + result.getStatus() != GetMessageStatus.OFFSET_TOO_SMALL && result.getStatus() != GetMessageStatus.OFFSET_OVERFLOW_ONE && result.getStatus() != GetMessageStatus.OFFSET_OVERFLOW_BADLY) { - logger.warn("GetMessageAsync not found and message is not in next store, result: {}, " + + log.warn("GetMessageAsync not found and message is not in next store, result: {}, " + "topic: {}, queue: {}, queue offset: {}, offset range: {}-{}", result.getStatus(), topic, queueId, offset, result.getMinOffset(), result.getMaxOffset()); } @@ -201,6 +254,10 @@ public CompletableFuture getMessageAsync(String group, String .put(TieredStoreMetricsConstant.LABEL_GROUP, group) .build(); TieredStoreMetricsManager.messagesOutTotal.add(result.getMessageCount(), messagesOutAttributes); + + if (next.getStoreStatsService() != null) { + next.getStoreStatsService().getGetMessageTransferredMsgCount().add(result.getMessageCount()); + } } // Fix min or max offset according next store at last @@ -211,29 +268,24 @@ public CompletableFuture getMessageAsync(String group, String // In general, the local cq offset is slightly greater than the commit offset in read message, // so there is no need to update the maximum offset to the local cq offset here, - // otherwise it will cause repeated consumption after next begin offset over commit offset. + // otherwise it will cause repeated consumption after next start offset over commit offset. if (storeConfig.isRecordGetMessageResult()) { - logger.info("GetMessageAsync result, {}, group: {}, topic: {}, queueId: {}, offset: {}, count:{}", + log.info("GetMessageAsync result, {}, group: {}, topic: {}, queueId: {}, offset: {}, count:{}", result, group, topic, queueId, offset, maxMsgNums); } return result; }).exceptionally(e -> { - logger.error("GetMessageAsync from tiered store failed", e); + log.error("GetMessageAsync from tiered store failed", e); return next.getMessage(group, topic, queueId, offset, maxMsgNums, messageFilter); }); } - @Override - public CompletableFuture asyncPutMessage(MessageExtBrokerInner msg) { - return super.asyncPutMessage(msg); - } - @Override public long getMinOffsetInQueue(String topic, int queueId) { long minOffsetInNextStore = next.getMinOffsetInQueue(topic, queueId); - CompositeFlatFile flatFile = flatFileManager.getFlatFile(new MessageQueue(topic, brokerName, queueId)); + FlatMessageFile flatFile = flatFileStore.getFlatFile(new MessageQueue(topic, brokerName, queueId)); if (flatFile == null) { return minOffsetInNextStore; } @@ -262,7 +314,7 @@ public CompletableFuture getEarliestMessageTimeAsync(String topic, int que .build(); TieredStoreMetricsManager.apiLatency.record(stopwatch.elapsed(TimeUnit.MILLISECONDS), latencyAttributes); if (time < 0) { - logger.debug("GetEarliestMessageTimeAsync failed, try to get earliest message time from next store: topic: {}, queue: {}", + log.debug("GetEarliestMessageTimeAsync failed, try to get earliest message time from next store: topic: {}, queue: {}", topic, queueId); return finalNextEarliestMessageTime != Long.MAX_VALUE ? finalNextEarliestMessageTime : -1; } @@ -278,12 +330,13 @@ public CompletableFuture getMessageStoreTimeStampAsync(String topic, int q return fetcher.getMessageStoreTimeStampAsync(topic, queueId, consumeQueueOffset) .thenApply(time -> { Attributes latencyAttributes = TieredStoreMetricsManager.newAttributesBuilder() - .put(TieredStoreMetricsConstant.LABEL_OPERATION, TieredStoreMetricsConstant.OPERATION_API_GET_TIME_BY_OFFSET) + .put(TieredStoreMetricsConstant.LABEL_OPERATION, + TieredStoreMetricsConstant.OPERATION_API_GET_TIME_BY_OFFSET) .put(TieredStoreMetricsConstant.LABEL_TOPIC, topic) .build(); TieredStoreMetricsManager.apiLatency.record(stopwatch.elapsed(TimeUnit.MILLISECONDS), latencyAttributes); if (time == -1) { - logger.debug("GetEarliestMessageTimeAsync failed, try to get message time from next store, topic: {}, queue: {}, queue offset: {}", + log.debug("GetEarliestMessageTimeAsync failed, try to get message time from next store, topic: {}, queue: {}, queue offset: {}", topic, queueId, consumeQueueOffset); return next.getMessageStoreTimeStamp(topic, queueId, consumeQueueOffset); } @@ -300,13 +353,8 @@ public long getOffsetInQueueByTime(String topic, int queueId, long timestamp) { @Override public long getOffsetInQueueByTime(String topic, int queueId, long timestamp, BoundaryType boundaryType) { - long earliestTimeInNextStore = next.getEarliestMessageTime(); - if (earliestTimeInNextStore <= 0) { - logger.warn("TieredMessageStore#getOffsetInQueueByTimeAsync: get earliest message time in next store failed: {}", earliestTimeInNextStore); - return next.getOffsetInQueueByTime(topic, queueId, timestamp); - } - boolean isForce = storeConfig.getTieredStorageLevel() == TieredMessageStoreConfig.TieredStorageLevel.FORCE; - if (timestamp < earliestTimeInNextStore || isForce) { + boolean isForce = storeConfig.getTieredStorageLevel() == MessageStoreConfig.TieredStorageLevel.FORCE; + if (timestamp < next.getEarliestMessageTime() || isForce) { Stopwatch stopwatch = Stopwatch.createStarted(); long offsetInTieredStore = fetcher.getOffsetInQueueByTime(topic, queueId, timestamp, boundaryType); Attributes latencyAttributes = TieredStoreMetricsManager.newAttributesBuilder() @@ -314,7 +362,7 @@ public long getOffsetInQueueByTime(String topic, int queueId, long timestamp, Bo .put(TieredStoreMetricsConstant.LABEL_TOPIC, topic) .build(); TieredStoreMetricsManager.apiLatency.record(stopwatch.elapsed(TimeUnit.MILLISECONDS), latencyAttributes); - if (offsetInTieredStore == -1 && !isForce) { + if (offsetInTieredStore == -1L && !isForce) { return next.getOffsetInQueueByTime(topic, queueId, timestamp); } return offsetInTieredStore; @@ -332,9 +380,9 @@ public CompletableFuture queryMessageAsync(String topic, Str int maxNum, long begin, long end) { long earliestTimeInNextStore = next.getEarliestMessageTime(); if (earliestTimeInNextStore <= 0) { - logger.warn("TieredMessageStore#queryMessageAsync: get earliest message time in next store failed: {}", earliestTimeInNextStore); + log.warn("TieredMessageStore#queryMessageAsync: get earliest message time in next store failed: {}", earliestTimeInNextStore); } - boolean isForce = storeConfig.getTieredStorageLevel() == TieredMessageStoreConfig.TieredStorageLevel.FORCE; + boolean isForce = storeConfig.getTieredStorageLevel() == MessageStoreConfig.TieredStorageLevel.FORCE; QueryMessageResult result = end < earliestTimeInNextStore || isForce ? new QueryMessageResult() : next.queryMessage(topic, key, maxNum, begin, end); @@ -355,7 +403,7 @@ public CompletableFuture queryMessageAsync(String topic, Str return result; }); } catch (Exception e) { - logger.error("TieredMessageStore#queryMessageAsync: query message in tiered store failed", e); + log.error("TieredMessageStore#queryMessageAsync: query message in tiered store failed", e); return CompletableFuture.completedFuture(result); } } @@ -372,69 +420,61 @@ public List> getMetricsView() { @Override public void initMetrics(Meter meter, Supplier attributesBuilderSupplier) { super.initMetrics(meter, attributesBuilderSupplier); - TieredStoreMetricsManager.init(meter, attributesBuilderSupplier, storeConfig, fetcher, next); + TieredStoreMetricsManager.init(meter, attributesBuilderSupplier, storeConfig, fetcher, flatFileStore, next); } @Override - public void shutdown() { - next.shutdown(); - - dispatcher.shutdown(); - TieredFlatFileManager.getInstance(storeConfig).shutdown(); - TieredStoreExecutor.shutdown(); + public int cleanUnusedTopic(Set retainTopics) { + metadataStore.iterateTopic(topicMetadata -> { + String topic = topicMetadata.getTopic(); + if (retainTopics.contains(topic) || + TopicValidator.isSystemTopic(topic) || + MixAll.isLmq(topic)) { + return; + } + this.deleteTopics(Sets.newHashSet(topicMetadata.getTopic())); + }); + return next.cleanUnusedTopic(retainTopics); } @Override - public void destroy() { - next.destroy(); - - TieredFlatFileManager.getInstance(storeConfig).destroy(); - try { - metadataStore.destroy(); - } catch (Exception e) { - logger.error("TieredMessageStore#destroy: destroy metadata store failed", e); + public int deleteTopics(Set deleteTopics) { + for (String topic : deleteTopics) { + metadataStore.iterateQueue(topic, queueMetadata -> { + flatFileStore.destroyFile(queueMetadata.getQueue()); + }); + metadataStore.deleteTopic(topic); + log.info("MessageStore delete topic success, topicName={}", topic); } + return next.deleteTopics(deleteTopics); } @Override - public int cleanUnusedTopic(Set retainTopics) { - try { - metadataStore.iterateTopic(topicMetadata -> { - String topic = topicMetadata.getTopic(); - if (retainTopics.contains(topic) || - TopicValidator.isSystemTopic(topic) || - MixAll.isLmq(topic)) { - return; - } - this.destroyCompositeFlatFile(topicMetadata.getTopic()); - }); - } catch (Exception e) { - logger.error("TieredMessageStore#cleanUnusedTopic: iterate topic metadata failed", e); + public synchronized void shutdown() { + if (next != null) { + next.shutdown(); + } + if (dispatcher != null) { + dispatcher.shutdown(); + } + if (flatFileStore != null) { + flatFileStore.shutdown(); + } + if (storeExecutor != null) { + storeExecutor.shutdown(); } - return next.cleanUnusedTopic(retainTopics); } @Override - public int deleteTopics(Set deleteTopics) { - for (String topic : deleteTopics) { - this.destroyCompositeFlatFile(topic); + public void destroy() { + if (next != null) { + next.destroy(); } - return next.deleteTopics(deleteTopics); - } - - public void destroyCompositeFlatFile(String topic) { - try { - if (StringUtils.isBlank(topic)) { - return; - } - metadataStore.iterateQueue(topic, queueMetadata -> { - flatFileManager.destroyCompositeFile(queueMetadata.getQueue()); - }); - // delete topic metadata - metadataStore.deleteTopic(topic); - logger.info("Destroy composite flat file in message store, topic={}", topic); - } catch (Exception e) { - logger.error("Destroy composite flat file in message store failed, topic={}", topic, e); + if (flatFileStore != null) { + flatFileStore.destroy(); + } + if (metadataStore != null) { + metadataStore.destroy(); } } } diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/AppendResult.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/AppendResult.java index 4482cb79be2..97cfe4d4247 100644 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/AppendResult.java +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/AppendResult.java @@ -23,11 +23,6 @@ public enum AppendResult { */ SUCCESS, - /** - * The offset provided for the append operation is incorrect. - */ - OFFSET_INCORRECT, - /** * The buffer used for the append operation is full. */ @@ -38,11 +33,6 @@ public enum AppendResult { */ FILE_FULL, - /** - * There was an I/O error during the append operation. - */ - IO_ERROR, - /** * The file is closed and cannot accept more data. */ diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/FileSegmentType.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/FileSegmentType.java index a370bec00bd..d7b3c9af87b 100644 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/FileSegmentType.java +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/FileSegmentType.java @@ -16,31 +16,30 @@ */ package org.apache.rocketmq.tieredstore.common; +import java.util.Arrays; + public enum FileSegmentType { + COMMIT_LOG(0), + CONSUME_QUEUE(1), + INDEX(2); - private final int type; + private final int code; - FileSegmentType(int type) { - this.type = type; + FileSegmentType(int code) { + this.code = code; } - public int getType() { - return type; + public int getCode() { + return code; } - public static FileSegmentType valueOf(int type) { - switch (type) { - case 0: - return COMMIT_LOG; - case 1: - return CONSUME_QUEUE; - case 2: - return INDEX; - default: - throw new IllegalStateException("Unexpected value: " + type); - } + public static FileSegmentType valueOf(int fileType) { + return Arrays.stream(FileSegmentType.values()) + .filter(segmentType -> segmentType.getCode() == fileType) + .findFirst() + .orElse(COMMIT_LOG); } } \ No newline at end of file diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/GetMessageResultExt.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/GetMessageResultExt.java index 2e294c1c7dc..b6016f25a37 100644 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/GetMessageResultExt.java +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/GetMessageResultExt.java @@ -41,6 +41,10 @@ public List getTagCodeList() { return tagCodeList; } + /** + * Due to the message fetched from the object storage is sequential, + * do message filtering occurs after the data retrieval. + */ public GetMessageResult doFilterMessage(MessageFilter messageFilter) { if (GetMessageStatus.FOUND != super.getStatus() || messageFilter == null) { return this; diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/InFlightRequestFuture.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/InFlightRequestFuture.java deleted file mode 100644 index fb872833a84..00000000000 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/InFlightRequestFuture.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.rocketmq.tieredstore.common; - -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.stream.Collectors; -import javax.annotation.Nonnull; -import org.apache.commons.lang3.tuple.Pair; - -public class InFlightRequestFuture { - - private final long startOffset; - private final List>> futureList; - - public InFlightRequestFuture(long startOffset, @Nonnull List>> futureList) { - this.startOffset = startOffset; - this.futureList = futureList; - } - - public long getStartOffset() { - return startOffset; - } - - public CompletableFuture getFirstFuture() { - return futureList.isEmpty() ? CompletableFuture.completedFuture(-1L) : futureList.get(0).getRight(); - } - - public CompletableFuture getFuture(long queueOffset) { - if (queueOffset < startOffset) { - return CompletableFuture.completedFuture(-1L); - } - long nextRequestOffset = startOffset; - for (Pair> pair : futureList) { - nextRequestOffset += pair.getLeft(); - if (queueOffset < nextRequestOffset) { - return pair.getRight(); - } - } - return CompletableFuture.completedFuture(-1L); - } - - public CompletableFuture getLastFuture() { - return futureList.isEmpty() ? - CompletableFuture.completedFuture(-1L) : futureList.get(futureList.size() - 1).getRight(); - } - - public boolean isFirstDone() { - if (!futureList.isEmpty()) { - return futureList.get(0).getRight().isDone(); - } - return true; - } - - public boolean isAllDone() { - for (Pair> pair : futureList) { - if (!pair.getRight().isDone()) { - return false; - } - } - return true; - } - - public List> getAllFuture() { - return futureList.stream().map(Pair::getValue).collect(Collectors.toList()); - } -} diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/InFlightRequestKey.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/InFlightRequestKey.java deleted file mode 100644 index 0e461a83072..00000000000 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/InFlightRequestKey.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.rocketmq.tieredstore.common; - -import com.google.common.base.Objects; - -public class InFlightRequestKey { - - private final String group; - private long offset; - private int batchSize; - private final long requestTime = System.currentTimeMillis(); - - public InFlightRequestKey(String group) { - this.group = group; - } - - public InFlightRequestKey(String group, long offset, int batchSize) { - this.group = group; - this.offset = offset; - this.batchSize = batchSize; - } - - public String getGroup() { - return group; - } - - public long getOffset() { - return offset; - } - - public int getBatchSize() { - return batchSize; - } - - public long getRequestTime() { - return requestTime; - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; - InFlightRequestKey key = (InFlightRequestKey) o; - return Objects.equal(group, key.group); - } - - @Override - public int hashCode() { - return Objects.hashCode(group); - } -} diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/MessageCacheKey.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/MessageCacheKey.java deleted file mode 100644 index ab06aa64d2e..00000000000 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/MessageCacheKey.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.rocketmq.tieredstore.common; - -import java.util.Objects; -import org.apache.rocketmq.tieredstore.file.CompositeFlatFile; - -public class MessageCacheKey { - - private final CompositeFlatFile flatFile; - private final long offset; - - public MessageCacheKey(CompositeFlatFile flatFile, long offset) { - this.flatFile = flatFile; - this.offset = offset; - } - - public CompositeFlatFile getFlatFile() { - return flatFile; - } - - public long getOffset() { - return offset; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - MessageCacheKey that = (MessageCacheKey) o; - return offset == that.offset && Objects.equals(flatFile, that.flatFile); - } - - @Override - public int hashCode() { - return Objects.hash(flatFile, offset); - } -} diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/SelectBufferResultWrapper.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/SelectBufferResultWrapper.java deleted file mode 100644 index 4f9f00a074c..00000000000 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/SelectBufferResultWrapper.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.rocketmq.tieredstore.common; - -import java.util.concurrent.atomic.AtomicInteger; -import org.apache.rocketmq.store.SelectMappedBufferResult; - -public class SelectBufferResultWrapper { - - private final SelectMappedBufferResult result; - private final long offset; - private final long tagCode; - private final AtomicInteger accessCount; - - public SelectBufferResultWrapper(SelectMappedBufferResult result, long offset, long tagCode, boolean used) { - this.result = result; - this.offset = offset; - this.tagCode = tagCode; - this.accessCount = new AtomicInteger(used ? 1 : 0); - } - - public SelectMappedBufferResult getDuplicateResult() { - - return new SelectMappedBufferResult( - result.getStartOffset(), - result.getByteBuffer().asReadOnlyBuffer(), - result.getSize(), - result.getMappedFile()); - } - - public long getOffset() { - return offset; - } - - public int getBufferSize() { - return this.result.getSize(); - } - - public long getTagCode() { - return tagCode; - } - - public int incrementAndGet() { - return accessCount.incrementAndGet(); - } - - public int getAccessCount() { - return accessCount.get(); - } -} diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/TieredStoreExecutor.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/TieredStoreExecutor.java deleted file mode 100644 index 65d586f43dd..00000000000 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/common/TieredStoreExecutor.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.rocketmq.tieredstore.common; - -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import org.apache.rocketmq.common.ThreadFactoryImpl; -import org.apache.rocketmq.common.utils.ThreadUtils; - -public class TieredStoreExecutor { - - public static final int QUEUE_CAPACITY = 10000; - - // Visible for monitor - public static BlockingQueue dispatchThreadPoolQueue; - public static BlockingQueue fetchDataThreadPoolQueue; - public static BlockingQueue compactIndexFileThreadPoolQueue; - - public static ScheduledExecutorService commonScheduledExecutor; - public static ScheduledExecutorService commitExecutor; - public static ScheduledExecutorService cleanExpiredFileExecutor; - - public static ExecutorService dispatchExecutor; - public static ExecutorService fetchDataExecutor; - public static ExecutorService compactIndexFileExecutor; - - public static void init() { - commonScheduledExecutor = ThreadUtils.newScheduledThreadPool( - Math.max(4, Runtime.getRuntime().availableProcessors()), - new ThreadFactoryImpl("TieredCommonExecutor_")); - - commitExecutor = ThreadUtils.newScheduledThreadPool( - Math.max(16, Runtime.getRuntime().availableProcessors() * 4), - new ThreadFactoryImpl("TieredCommitExecutor_")); - - cleanExpiredFileExecutor = ThreadUtils.newScheduledThreadPool( - Math.max(4, Runtime.getRuntime().availableProcessors()), - new ThreadFactoryImpl("TieredCleanFileExecutor_")); - - dispatchThreadPoolQueue = new LinkedBlockingQueue<>(QUEUE_CAPACITY); - dispatchExecutor = ThreadUtils.newThreadPoolExecutor( - Math.max(2, Runtime.getRuntime().availableProcessors()), - Math.max(16, Runtime.getRuntime().availableProcessors() * 4), - 1000 * 60, - TimeUnit.MILLISECONDS, - dispatchThreadPoolQueue, - new ThreadFactoryImpl("TieredDispatchExecutor_"), - new ThreadPoolExecutor.DiscardOldestPolicy()); - - fetchDataThreadPoolQueue = new LinkedBlockingQueue<>(QUEUE_CAPACITY); - fetchDataExecutor = ThreadUtils.newThreadPoolExecutor( - Math.max(16, Runtime.getRuntime().availableProcessors() * 4), - Math.max(64, Runtime.getRuntime().availableProcessors() * 8), - 1000 * 60, - TimeUnit.MILLISECONDS, - fetchDataThreadPoolQueue, - new ThreadFactoryImpl("TieredFetchExecutor_")); - - compactIndexFileThreadPoolQueue = new LinkedBlockingQueue<>(QUEUE_CAPACITY); - compactIndexFileExecutor = ThreadUtils.newThreadPoolExecutor( - 1, - 1, - 1000 * 60, - TimeUnit.MILLISECONDS, - compactIndexFileThreadPoolQueue, - new ThreadFactoryImpl("TieredCompactIndexFileExecutor_")); - } - - public static void shutdown() { - shutdownExecutor(dispatchExecutor); - shutdownExecutor(commonScheduledExecutor); - shutdownExecutor(commitExecutor); - shutdownExecutor(cleanExpiredFileExecutor); - shutdownExecutor(fetchDataExecutor); - shutdownExecutor(compactIndexFileExecutor); - } - - private static void shutdownExecutor(ExecutorService executor) { - if (executor != null) { - executor.shutdown(); - try { - if (!executor.awaitTermination(5, TimeUnit.SECONDS)) { - executor.shutdownNow(); - } - } catch (InterruptedException e) { - executor.shutdownNow(); - } - } - } -} diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/core/MessageStoreDispatcher.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/core/MessageStoreDispatcher.java new file mode 100644 index 00000000000..e1e142ad236 --- /dev/null +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/core/MessageStoreDispatcher.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.rocketmq.tieredstore.core; + +import java.util.concurrent.CompletableFuture; +import org.apache.rocketmq.store.CommitLogDispatcher; +import org.apache.rocketmq.tieredstore.file.FlatFileInterface; + +public interface MessageStoreDispatcher extends CommitLogDispatcher { + + void start(); + + void shutdown(); + + CompletableFuture doScheduleDispatch(FlatFileInterface flatFile, boolean force); +} diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/core/MessageStoreDispatcherImpl.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/core/MessageStoreDispatcherImpl.java new file mode 100644 index 00000000000..330872ab9cd --- /dev/null +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/core/MessageStoreDispatcherImpl.java @@ -0,0 +1,300 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.rocketmq.tieredstore.core; + +import io.opentelemetry.api.common.Attributes; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; +import org.apache.commons.lang3.StringUtils; +import org.apache.rocketmq.common.ServiceThread; +import org.apache.rocketmq.common.message.MessageConst; +import org.apache.rocketmq.common.message.MessageQueue; +import org.apache.rocketmq.store.DispatchRequest; +import org.apache.rocketmq.store.MessageStore; +import org.apache.rocketmq.store.SelectMappedBufferResult; +import org.apache.rocketmq.store.queue.ConsumeQueueInterface; +import org.apache.rocketmq.store.queue.CqUnit; +import org.apache.rocketmq.tieredstore.MessageStoreConfig; +import org.apache.rocketmq.tieredstore.MessageStoreExecutor; +import org.apache.rocketmq.tieredstore.TieredMessageStore; +import org.apache.rocketmq.tieredstore.common.AppendResult; +import org.apache.rocketmq.tieredstore.common.FileSegmentType; +import org.apache.rocketmq.tieredstore.file.FlatFileInterface; +import org.apache.rocketmq.tieredstore.file.FlatFileStore; +import org.apache.rocketmq.tieredstore.index.IndexService; +import org.apache.rocketmq.tieredstore.metrics.TieredStoreMetricsConstant; +import org.apache.rocketmq.tieredstore.metrics.TieredStoreMetricsManager; +import org.apache.rocketmq.tieredstore.util.MessageFormatUtil; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class MessageStoreDispatcherImpl extends ServiceThread implements MessageStoreDispatcher { + + protected static final Logger log = LoggerFactory.getLogger(MessageStoreUtil.TIERED_STORE_LOGGER_NAME); + + protected final String brokerName; + protected final MessageStore defaultStore; + protected final MessageStoreConfig storeConfig; + protected final TieredMessageStore messageStore; + protected final FlatFileStore flatFileStore; + protected final MessageStoreExecutor storeExecutor; + protected final MessageStoreFilter topicFilter; + protected final Semaphore semaphore; + protected final IndexService indexService; + + public MessageStoreDispatcherImpl(TieredMessageStore messageStore) { + this.messageStore = messageStore; + this.storeConfig = messageStore.getStoreConfig(); + this.defaultStore = messageStore.getDefaultStore(); + this.brokerName = storeConfig.getBrokerName(); + this.semaphore = new Semaphore( + this.storeConfig.getTieredStoreMaxPendingLimit() / 4); + this.topicFilter = messageStore.getTopicFilter(); + this.flatFileStore = messageStore.getFlatFileStore(); + this.storeExecutor = messageStore.getStoreExecutor(); + this.indexService = messageStore.getIndexService(); + } + + @Override + public String getServiceName() { + return MessageStoreDispatcher.class.getSimpleName(); + } + + public void dispatchWithSemaphore(FlatFileInterface flatFile) { + try { + if (stopped) { + return; + } + semaphore.acquire(); + this.doScheduleDispatch(flatFile, false) + .whenComplete((future, throwable) -> semaphore.release()); + } catch (InterruptedException e) { + semaphore.release(); + } + } + + @Override + public void dispatch(DispatchRequest request) { + if (stopped || topicFilter != null && topicFilter.filterTopic(request.getTopic())) { + return; + } + flatFileStore.computeIfAbsent( + new MessageQueue(request.getTopic(), brokerName, request.getQueueId())); + } + + @Override + public CompletableFuture doScheduleDispatch(FlatFileInterface flatFile, boolean force) { + if (stopped) { + return CompletableFuture.completedFuture(true); + } + + String topic = flatFile.getMessageQueue().getTopic(); + int queueId = flatFile.getMessageQueue().getQueueId(); + + // For test scenarios, we set the 'force' variable to true to + // ensure that the data in the cache is directly committed successfully. + force = !storeConfig.isTieredStoreGroupCommit() || force; + if (force) { + flatFile.getFileLock().lock(); + } else { + if (!flatFile.getFileLock().tryLock()) { + return CompletableFuture.completedFuture(false); + } + } + + try { + if (topicFilter != null && topicFilter.filterTopic(flatFile.getMessageQueue().getTopic())) { + flatFileStore.destroyFile(flatFile.getMessageQueue()); + return CompletableFuture.completedFuture(false); + } + + long currentOffset = flatFile.getConsumeQueueMaxOffset(); + long commitOffset = flatFile.getConsumeQueueCommitOffset(); + long minOffsetInQueue = defaultStore.getMinOffsetInQueue(topic, queueId); + long maxOffsetInQueue = defaultStore.getMaxOffsetInQueue(topic, queueId); + + // If set to max offset here, some written messages may be lost + if (!flatFile.isFlatFileInit()) { + currentOffset = Math.max(minOffsetInQueue, + maxOffsetInQueue - storeConfig.getTieredStoreGroupCommitSize()); + flatFile.initOffset(currentOffset); + return CompletableFuture.completedFuture(true); + } + + // If the previous commit fails, attempt to trigger a commit directly. + if (commitOffset < currentOffset) { + this.commitAsync(flatFile); + return CompletableFuture.completedFuture(false); + } + + if (currentOffset < minOffsetInQueue) { + log.warn("MessageDispatcher#dispatch, current offset is too small, " + + "topic={}, queueId={}, offset={}-{}, current={}", + topic, queueId, minOffsetInQueue, maxOffsetInQueue, currentOffset); + flatFileStore.destroyFile(flatFile.getMessageQueue()); + flatFileStore.computeIfAbsent(new MessageQueue(topic, brokerName, queueId)); + return CompletableFuture.completedFuture(true); + } + + if (currentOffset > maxOffsetInQueue) { + log.warn("MessageDispatcher#dispatch, current offset is too large, " + + "topic: {}, queueId: {}, offset={}-{}, current={}", + topic, queueId, minOffsetInQueue, maxOffsetInQueue, currentOffset); + return CompletableFuture.completedFuture(false); + } + + long interval = TimeUnit.HOURS.toMillis(storeConfig.getCommitLogRollingInterval()); + if (flatFile.rollingFile(interval)) { + log.info("MessageDispatcher#dispatch, rolling file, " + + "topic: {}, queueId: {}, offset={}-{}, current={}", + topic, queueId, minOffsetInQueue, maxOffsetInQueue, currentOffset); + } + + if (currentOffset == maxOffsetInQueue) { + return CompletableFuture.completedFuture(false); + } + + long bufferSize = 0L; + long groupCommitSize = storeConfig.getTieredStoreGroupCommitSize(); + long groupCommitCount = storeConfig.getTieredStoreGroupCommitCount(); + long targetOffset = Math.min(currentOffset + groupCommitCount, maxOffsetInQueue); + + ConsumeQueueInterface consumeQueue = defaultStore.getConsumeQueue(topic, queueId); + CqUnit cqUnit = consumeQueue.get(currentOffset); + SelectMappedBufferResult message = + defaultStore.selectOneMessageByOffset(cqUnit.getPos(), cqUnit.getSize()); + boolean timeout = MessageFormatUtil.getStoreTimeStamp(message.getByteBuffer()) + + storeConfig.getTieredStoreGroupCommitTimeout() < System.currentTimeMillis(); + boolean bufferFull = maxOffsetInQueue - currentOffset > storeConfig.getTieredStoreGroupCommitCount(); + + if (!timeout && !bufferFull && !force) { + log.debug("MessageDispatcher#dispatch hold, topic={}, queueId={}, offset={}-{}, current={}, remain={}", + topic, queueId, minOffsetInQueue, maxOffsetInQueue, currentOffset, maxOffsetInQueue - currentOffset); + return CompletableFuture.completedFuture(false); + } else { + if (MessageFormatUtil.getStoreTimeStamp(message.getByteBuffer()) + + TimeUnit.MINUTES.toMillis(5) < System.currentTimeMillis()) { + log.warn("MessageDispatcher#dispatch behind too much, topic={}, queueId={}, offset={}-{}, current={}, remain={}", + topic, queueId, minOffsetInQueue, maxOffsetInQueue, currentOffset, maxOffsetInQueue - currentOffset); + } else { + log.info("MessageDispatcher#dispatch, topic={}, queueId={}, offset={}-{}, current={}, remain={}", + topic, queueId, minOffsetInQueue, maxOffsetInQueue, currentOffset, maxOffsetInQueue - currentOffset); + } + } + message.release(); + + long offset = currentOffset; + for (; offset < targetOffset; offset++) { + cqUnit = consumeQueue.get(offset); + bufferSize += cqUnit.getSize(); + if (bufferSize >= groupCommitSize) { + break; + } + message = defaultStore.selectOneMessageByOffset(cqUnit.getPos(), cqUnit.getSize()); + + ByteBuffer byteBuffer = message.getByteBuffer(); + AppendResult result = flatFile.appendCommitLog(message); + if (!AppendResult.SUCCESS.equals(result)) { + break; + } + + long mappedCommitLogOffset = flatFile.getCommitLogMaxOffset() - byteBuffer.remaining(); + Map properties = MessageFormatUtil.getProperties(byteBuffer); + + DispatchRequest dispatchRequest = new DispatchRequest(topic, queueId, mappedCommitLogOffset, + cqUnit.getSize(), cqUnit.getTagsCode(), MessageFormatUtil.getStoreTimeStamp(byteBuffer), + cqUnit.getQueueOffset(), properties.getOrDefault(MessageConst.PROPERTY_KEYS, ""), + properties.getOrDefault(MessageConst.PROPERTY_UNIQ_CLIENT_MESSAGE_ID_KEYIDX, ""), + 0, 0, new HashMap<>()); + dispatchRequest.setOffsetId(MessageFormatUtil.getOffsetId(byteBuffer)); + + result = flatFile.appendConsumeQueue(dispatchRequest); + if (!AppendResult.SUCCESS.equals(result)) { + break; + } + } + + // If there are many messages waiting to be uploaded, call the upload logic immediately. + boolean repeat = timeout || maxOffsetInQueue - offset > storeConfig.getTieredStoreGroupCommitCount(); + + if (!flatFile.getDispatchRequestList().isEmpty()) { + Attributes attributes = TieredStoreMetricsManager.newAttributesBuilder() + .put(TieredStoreMetricsConstant.LABEL_TOPIC, topic) + .put(TieredStoreMetricsConstant.LABEL_QUEUE_ID, queueId) + .put(TieredStoreMetricsConstant.LABEL_FILE_TYPE, FileSegmentType.COMMIT_LOG.name().toLowerCase()) + .build(); + TieredStoreMetricsManager.messagesDispatchTotal.add(offset - currentOffset, attributes); + + this.commitAsync(flatFile).whenComplete((unused, throwable) -> { + if (repeat) { + storeExecutor.commonExecutor.submit(() -> dispatchWithSemaphore(flatFile)); + } + } + ); + } + } finally { + flatFile.getFileLock().unlock(); + } + return CompletableFuture.completedFuture(false); + } + + public CompletableFuture commitAsync(FlatFileInterface flatFile) { + return flatFile.commitAsync().thenAcceptAsync(success -> { + if (success) { + if (storeConfig.isMessageIndexEnable()) { + flatFile.getDispatchRequestList().forEach( + request -> constructIndexFile(flatFile.getTopicId(), request)); + } + flatFile.release(); + } + }, MessageStoreExecutor.getInstance().bufferCommitExecutor); + } + + /** + * Building indexes with offsetId is no longer supported because offsetId has changed in tiered storage + */ + public void constructIndexFile(long topicId, DispatchRequest request) { + Set keySet = new HashSet<>(); + if (StringUtils.isNotBlank(request.getUniqKey())) { + keySet.add(request.getUniqKey()); + } + if (StringUtils.isNotBlank(request.getKeys())) { + keySet.addAll(Arrays.asList(request.getKeys().split(MessageConst.KEY_SEPARATOR))); + } + indexService.putKey(request.getTopic(), (int) topicId, request.getQueueId(), keySet, + request.getCommitLogOffset(), request.getMsgSize(), request.getStoreTimestamp()); + } + + @Override + public void run() { + log.info("{} service started", this.getServiceName()); + while (!this.isStopped()) { + flatFileStore.deepCopyFlatFileToList().forEach(this::dispatchWithSemaphore); + this.waitForRunning(Duration.ofSeconds(20).toMillis()); + } + log.info("{} service shutdown", this.getServiceName()); + } +} diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/MessageStoreFetcher.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/core/MessageStoreFetcher.java similarity index 98% rename from tieredstore/src/main/java/org/apache/rocketmq/tieredstore/MessageStoreFetcher.java rename to tieredstore/src/main/java/org/apache/rocketmq/tieredstore/core/MessageStoreFetcher.java index 8ae4dc7f9ef..8e2e8bdef59 100644 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/MessageStoreFetcher.java +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/core/MessageStoreFetcher.java @@ -15,13 +15,13 @@ * limitations under the License. */ -package org.apache.rocketmq.tieredstore; +package org.apache.rocketmq.tieredstore.core; import java.util.concurrent.CompletableFuture; +import org.apache.rocketmq.common.BoundaryType; import org.apache.rocketmq.store.GetMessageResult; import org.apache.rocketmq.store.MessageFilter; import org.apache.rocketmq.store.QueryMessageResult; -import org.apache.rocketmq.common.BoundaryType; public interface MessageStoreFetcher { diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/core/MessageStoreFetcherImpl.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/core/MessageStoreFetcherImpl.java new file mode 100644 index 00000000000..2ffad2e3f4c --- /dev/null +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/core/MessageStoreFetcherImpl.java @@ -0,0 +1,427 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.rocketmq.tieredstore.core; + +import com.github.benmanes.caffeine.cache.Cache; +import com.github.benmanes.caffeine.cache.Caffeine; +import com.github.benmanes.caffeine.cache.Scheduler; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import org.apache.rocketmq.common.BoundaryType; +import org.apache.rocketmq.common.message.MessageQueue; +import org.apache.rocketmq.store.GetMessageResult; +import org.apache.rocketmq.store.GetMessageStatus; +import org.apache.rocketmq.store.MessageFilter; +import org.apache.rocketmq.store.QueryMessageResult; +import org.apache.rocketmq.store.SelectMappedBufferResult; +import org.apache.rocketmq.tieredstore.MessageStoreConfig; +import org.apache.rocketmq.tieredstore.TieredMessageStore; +import org.apache.rocketmq.tieredstore.common.GetMessageResultExt; +import org.apache.rocketmq.tieredstore.common.SelectBufferResult; +import org.apache.rocketmq.tieredstore.exception.TieredStoreException; +import org.apache.rocketmq.tieredstore.file.FlatFileStore; +import org.apache.rocketmq.tieredstore.file.FlatMessageFile; +import org.apache.rocketmq.tieredstore.index.IndexItem; +import org.apache.rocketmq.tieredstore.metadata.MetadataStore; +import org.apache.rocketmq.tieredstore.metadata.entity.TopicMetadata; +import org.apache.rocketmq.tieredstore.util.MessageFormatUtil; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class MessageStoreFetcherImpl implements MessageStoreFetcher { + + private static final Logger log = LoggerFactory.getLogger(MessageStoreUtil.TIERED_STORE_LOGGER_NAME); + + private static final String CACHE_KEY_FORMAT = "%s@%d@%d"; + + private final String brokerName; + private final MetadataStore metadataStore; + private final MessageStoreConfig storeConfig; + private final TieredMessageStore messageStore; + private final FlatFileStore flatFileStore; + private final long memoryMaxSize; + private final Cache fetcherCache; + + public MessageStoreFetcherImpl(TieredMessageStore messageStore) { + this.storeConfig = messageStore.getStoreConfig(); + this.brokerName = storeConfig.getBrokerName(); + this.flatFileStore = messageStore.getFlatFileStore(); + this.messageStore = messageStore; + this.metadataStore = flatFileStore.getMetadataStore(); + this.memoryMaxSize = + (long) (Runtime.getRuntime().maxMemory() * storeConfig.getReadAheadCacheSizeThresholdRate()); + this.fetcherCache = this.initCache(storeConfig); + log.info("MessageStoreFetcher init success, brokerName={}", storeConfig.getBrokerName()); + } + + private Cache initCache(MessageStoreConfig storeConfig) { + + return Caffeine.newBuilder() + .scheduler(Scheduler.systemScheduler()) + .expireAfterWrite(storeConfig.getReadAheadCacheExpireDuration(), TimeUnit.MILLISECONDS) + .maximumWeight(memoryMaxSize) + // Using the buffer size of messages to calculate memory usage + .weigher((String key, SelectBufferResult buffer) -> buffer.getSize()) + .recordStats() + .build(); + } + + public Cache getFetcherCache() { + return fetcherCache; + } + + protected void putMessageToCache(FlatMessageFile flatFile, long offset, SelectBufferResult result) { + MessageQueue mq = flatFile.getMessageQueue(); + this.fetcherCache.put(String.format(CACHE_KEY_FORMAT, mq.getTopic(), mq.getQueueId(), offset), result); + } + + protected SelectBufferResult getMessageFromCache(FlatMessageFile flatFile, long offset) { + MessageQueue mq = flatFile.getMessageQueue(); + SelectBufferResult buffer = this.fetcherCache.getIfPresent( + String.format(CACHE_KEY_FORMAT, mq.getTopic(), mq.getQueueId(), offset)); + // return duplicate buffer here + return buffer == null ? null : new SelectBufferResult( + buffer.getByteBuffer().asReadOnlyBuffer(), buffer.getStartOffset(), buffer.getSize(), buffer.getTagCode()); + } + + protected GetMessageResultExt getMessageFromCache(FlatMessageFile flatFile, long offset, int maxCount) { + GetMessageResultExt result = new GetMessageResultExt(); + for (long i = offset; i < offset + maxCount; i++) { + SelectBufferResult buffer = getMessageFromCache(flatFile, i); + if (buffer == null) { + break; + } + SelectMappedBufferResult bufferResult = new SelectMappedBufferResult( + buffer.getStartOffset(), buffer.getByteBuffer(), buffer.getSize(), null); + result.addMessageExt(bufferResult, i, buffer.getTagCode()); + } + result.setStatus(result.getMessageCount() > 0 ? + GetMessageStatus.FOUND : GetMessageStatus.OFFSET_OVERFLOW_ONE); + result.setMinOffset(flatFile.getConsumeQueueMinOffset()); + result.setMaxOffset(flatFile.getConsumeQueueCommitOffset()); + result.setNextBeginOffset(offset + result.getMessageCount()); + return result; + } + + protected CompletableFuture fetchMessageThenPutToCache( + FlatMessageFile flatFile, long queueOffset, int batchSize) { + + MessageQueue mq = flatFile.getMessageQueue(); + return this.getMessageFromTieredStoreAsync(flatFile, queueOffset, batchSize) + .thenApply(result -> { + if (result.getStatus() == GetMessageStatus.OFFSET_OVERFLOW_ONE || + result.getStatus() == GetMessageStatus.OFFSET_OVERFLOW_BADLY) { + return -1L; + } + if (result.getStatus() != GetMessageStatus.FOUND) { + log.warn("MessageFetcher prefetch message then put to cache failed, result={}, " + + "topic={}, queue={}, queue offset={}, batch size={}", + result.getStatus(), mq.getTopic(), mq.getQueueId(), queueOffset, batchSize); + return -1L; + } + List offsetList = result.getMessageQueueOffset(); + List tagCodeList = result.getTagCodeList(); + List msgList = result.getMessageMapedList(); + for (int i = 0; i < offsetList.size(); i++) { + SelectMappedBufferResult msg = msgList.get(i); + SelectBufferResult bufferResult = new SelectBufferResult( + msg.getByteBuffer(), msg.getStartOffset(), msg.getSize(), tagCodeList.get(i)); + this.putMessageToCache(flatFile, queueOffset + i, bufferResult); + } + return offsetList.get(offsetList.size() - 1); + }); + } + + public CompletableFuture getMessageFromCacheAsync( + FlatMessageFile flatFile, String group, long queueOffset, int maxCount) { + + MessageQueue mq = flatFile.getMessageQueue(); + GetMessageResultExt result = getMessageFromCache(flatFile, queueOffset, maxCount); + + if (GetMessageStatus.FOUND.equals(result.getStatus())) { + log.debug("MessageFetcher cache hit, group={}, topic={}, queueId={}, offset={}, maxCount={}, resultSize={}, lag={}", + group, mq.getTopic(), mq.getQueueId(), queueOffset, maxCount, + result.getMessageCount(), result.getMaxOffset() - result.getNextBeginOffset()); + return CompletableFuture.completedFuture(result); + } + + // If cache miss, pull messages immediately + log.debug("MessageFetcher cache miss, group={}, topic={}, queueId={}, offset={}, maxCount={}, lag={}", + group, mq.getTopic(), mq.getQueueId(), queueOffset, maxCount, result.getMaxOffset() - result.getNextBeginOffset()); + + return fetchMessageThenPutToCache(flatFile, queueOffset, storeConfig.getReadAheadMessageCountThreshold()) + .thenApply(maxOffset -> getMessageFromCache(flatFile, queueOffset, maxCount)); + } + + public CompletableFuture getMessageFromTieredStoreAsync( + FlatMessageFile flatFile, long queueOffset, int batchSize) { + + GetMessageResultExt result = new GetMessageResultExt(); + result.setMinOffset(flatFile.getConsumeQueueMinOffset()); + result.setMaxOffset(flatFile.getConsumeQueueCommitOffset()); + + if (queueOffset < result.getMinOffset()) { + result.setStatus(GetMessageStatus.OFFSET_TOO_SMALL); + result.setNextBeginOffset(result.getMinOffset()); + return CompletableFuture.completedFuture(result); + } else if (queueOffset == result.getMaxOffset()) { + result.setStatus(GetMessageStatus.OFFSET_OVERFLOW_ONE); + result.setNextBeginOffset(queueOffset); + return CompletableFuture.completedFuture(result); + } else if (queueOffset > result.getMaxOffset()) { + result.setStatus(GetMessageStatus.OFFSET_OVERFLOW_BADLY); + result.setNextBeginOffset(result.getMaxOffset()); + return CompletableFuture.completedFuture(result); + } + + if (queueOffset < result.getMaxOffset()) { + batchSize = Math.min(batchSize, (int) Math.min( + result.getMaxOffset() - queueOffset, storeConfig.getReadAheadMessageCountThreshold())); + } + + CompletableFuture readConsumeQueueFuture; + try { + readConsumeQueueFuture = flatFile.getConsumeQueueAsync(queueOffset, batchSize); + } catch (TieredStoreException e) { + switch (e.getErrorCode()) { + case ILLEGAL_PARAM: + case ILLEGAL_OFFSET: + default: + result.setStatus(GetMessageStatus.OFFSET_FOUND_NULL); + result.setNextBeginOffset(queueOffset); + return CompletableFuture.completedFuture(result); + } + } + + int finalBatchSize = batchSize; + CompletableFuture readCommitLogFuture = readConsumeQueueFuture.thenCompose(cqBuffer -> { + + long firstCommitLogOffset = MessageFormatUtil.getCommitLogOffsetFromItem(cqBuffer); + cqBuffer.position(cqBuffer.remaining() - MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE); + long lastCommitLogOffset = MessageFormatUtil.getCommitLogOffsetFromItem(cqBuffer); + if (lastCommitLogOffset < firstCommitLogOffset) { + log.error("MessageFetcher#getMessageFromTieredStoreAsync, last offset is smaller than first offset, " + + "topic={} queueId={}, offset={}, firstOffset={}, lastOffset={}", + flatFile.getMessageQueue().getTopic(), flatFile.getMessageQueue().getQueueId(), queueOffset, + firstCommitLogOffset, lastCommitLogOffset); + return CompletableFuture.completedFuture(ByteBuffer.allocate(0)); + } + + // Get at least one message + // Reducing the length limit of cq to prevent OOM + long length = lastCommitLogOffset - firstCommitLogOffset + MessageFormatUtil.getSizeFromItem(cqBuffer); + while (cqBuffer.limit() > MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE && + length > storeConfig.getReadAheadMessageSizeThreshold()) { + cqBuffer.limit(cqBuffer.position()); + cqBuffer.position(cqBuffer.limit() - MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE); + length = MessageFormatUtil.getCommitLogOffsetFromItem(cqBuffer) + - firstCommitLogOffset + MessageFormatUtil.getSizeFromItem(cqBuffer); + } + int messageCount = cqBuffer.position() / MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE + 1; + + log.info("MessageFetcher#getMessageFromTieredStoreAsync, " + + "topic={}, queueId={}, broker offset={}-{}, offset={}, expect={}, actually={}, lag={}", + flatFile.getMessageQueue().getTopic(), flatFile.getMessageQueue().getQueueId(), + result.getMinOffset(), result.getMaxOffset(), queueOffset, finalBatchSize, + messageCount, result.getMaxOffset() - queueOffset); + + return flatFile.getCommitLogAsync(firstCommitLogOffset, (int) length); + }); + + return readConsumeQueueFuture.thenCombine(readCommitLogFuture, (cqBuffer, msgBuffer) -> { + List bufferList = MessageFormatUtil.splitMessageBuffer(cqBuffer, msgBuffer); + int requestSize = cqBuffer.remaining() / MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE; + + // not use buffer list size to calculate next offset to prevent split error + if (bufferList.isEmpty()) { + result.setStatus(GetMessageStatus.NO_MATCHED_MESSAGE); + result.setNextBeginOffset(queueOffset + requestSize); + } else { + result.setStatus(GetMessageStatus.FOUND); + result.setNextBeginOffset(queueOffset + requestSize); + + for (SelectBufferResult bufferResult : bufferList) { + ByteBuffer slice = bufferResult.getByteBuffer().slice(); + slice.limit(bufferResult.getSize()); + SelectMappedBufferResult msg = new SelectMappedBufferResult(bufferResult.getStartOffset(), + bufferResult.getByteBuffer(), bufferResult.getSize(), null); + result.addMessageExt(msg, MessageFormatUtil.getQueueOffset(slice), bufferResult.getTagCode()); + } + } + return result; + }).exceptionally(e -> { + MessageQueue mq = flatFile.getMessageQueue(); + log.warn("MessageFetcher#getMessageFromTieredStoreAsync failed, " + + "topic={} queueId={}, offset={}, batchSize={}", mq.getTopic(), mq.getQueueId(), queueOffset, finalBatchSize, e); + result.setStatus(GetMessageStatus.OFFSET_FOUND_NULL); + result.setNextBeginOffset(queueOffset); + return result; + }); + } + + @Override + public CompletableFuture getMessageAsync( + String group, String topic, int queueId, long queueOffset, int maxCount, final MessageFilter messageFilter) { + + GetMessageResult result = new GetMessageResult(); + FlatMessageFile flatFile = flatFileStore.getFlatFile(new MessageQueue(topic, brokerName, queueId)); + + if (flatFile == null) { + result.setNextBeginOffset(queueOffset); + result.setStatus(GetMessageStatus.NO_MATCHED_LOGIC_QUEUE); + return CompletableFuture.completedFuture(result); + } + + // Max queue offset means next message put position + result.setMinOffset(flatFile.getConsumeQueueMinOffset()); + result.setMaxOffset(flatFile.getConsumeQueueCommitOffset()); + + // Fill result according file offset. + // Offset range | Result | Fix to + // (-oo, 0] | no message | current offset + // (0, min) | too small | min offset + // [min, max) | correct | + // [max, max] | overflow one | max offset + // (max, +oo) | overflow badly | max offset + + if (result.getMaxOffset() <= 0) { + result.setStatus(GetMessageStatus.NO_MESSAGE_IN_QUEUE); + result.setNextBeginOffset(queueOffset); + return CompletableFuture.completedFuture(result); + } else if (queueOffset < result.getMinOffset()) { + result.setStatus(GetMessageStatus.OFFSET_TOO_SMALL); + result.setNextBeginOffset(result.getMinOffset()); + return CompletableFuture.completedFuture(result); + } else if (queueOffset == result.getMaxOffset()) { + result.setStatus(GetMessageStatus.OFFSET_OVERFLOW_ONE); + result.setNextBeginOffset(result.getMaxOffset()); + return CompletableFuture.completedFuture(result); + } else if (queueOffset > result.getMaxOffset()) { + result.setStatus(GetMessageStatus.OFFSET_OVERFLOW_BADLY); + result.setNextBeginOffset(result.getMaxOffset()); + return CompletableFuture.completedFuture(result); + } + + boolean cacheBusy = fetcherCache.estimatedSize() > memoryMaxSize * 0.8; + if (storeConfig.isReadAheadCacheEnable() && !cacheBusy) { + return getMessageFromCacheAsync(flatFile, group, queueOffset, maxCount) + .thenApply(messageResultExt -> messageResultExt.doFilterMessage(messageFilter)); + } else { + return getMessageFromTieredStoreAsync(flatFile, queueOffset, maxCount) + .thenApply(messageResultExt -> messageResultExt.doFilterMessage(messageFilter)); + } + } + + @Override + public CompletableFuture getEarliestMessageTimeAsync(String topic, int queueId) { + FlatMessageFile flatFile = flatFileStore.getFlatFile(new MessageQueue(topic, brokerName, queueId)); + if (flatFile == null) { + return CompletableFuture.completedFuture(-1L); + } + + // read from timestamp to timestamp + length + int length = MessageFormatUtil.STORE_TIMESTAMP_POSITION + 8; + return flatFile.getCommitLogAsync(flatFile.getCommitLogMinOffset(), length) + .thenApply(MessageFormatUtil::getStoreTimeStamp); + } + + @Override + public CompletableFuture getMessageStoreTimeStampAsync(String topic, int queueId, long queueOffset) { + FlatMessageFile flatFile = flatFileStore.getFlatFile(new MessageQueue(topic, brokerName, queueId)); + if (flatFile == null) { + return CompletableFuture.completedFuture(-1L); + } + + return flatFile.getConsumeQueueAsync(queueOffset) + .thenComposeAsync(cqItem -> { + long commitLogOffset = MessageFormatUtil.getCommitLogOffsetFromItem(cqItem); + int size = MessageFormatUtil.getSizeFromItem(cqItem); + return flatFile.getCommitLogAsync(commitLogOffset, size); + }, messageStore.getStoreExecutor().bufferFetchExecutor) + .thenApply(MessageFormatUtil::getStoreTimeStamp) + .exceptionally(e -> { + log.error("MessageStoreFetcherImpl#getMessageStoreTimeStampAsync: " + + "get or decode message failed, topic={}, queue={}, offset={}", topic, queueId, queueOffset, e); + return -1L; + }); + } + + @Override + public long getOffsetInQueueByTime(String topic, int queueId, long timestamp, BoundaryType type) { + FlatMessageFile flatFile = flatFileStore.getFlatFile(new MessageQueue(topic, brokerName, queueId)); + if (flatFile == null) { + return -1L; + } + return flatFile.getQueueOffsetByTimeAsync(timestamp, type).join(); + } + + @Override + public CompletableFuture queryMessageAsync( + String topic, String key, int maxCount, long begin, long end) { + + long topicId; + try { + TopicMetadata topicMetadata = metadataStore.getTopic(topic); + if (topicMetadata == null) { + log.info("MessageFetcher#queryMessageAsync, topic metadata not found, topic={}", topic); + return CompletableFuture.completedFuture(new QueryMessageResult()); + } + topicId = topicMetadata.getTopicId(); + } catch (Exception e) { + log.error("MessageFetcher#queryMessageAsync, get topic id failed, topic={}", topic, e); + return CompletableFuture.completedFuture(new QueryMessageResult()); + } + + CompletableFuture> future = + messageStore.getIndexService().queryAsync(topic, key, maxCount, begin, end); + + return future.thenCompose(indexItemList -> { + QueryMessageResult result = new QueryMessageResult(); + List> futureList = new ArrayList<>(maxCount); + for (IndexItem indexItem : indexItemList) { + if (topicId != indexItem.getTopicId()) { + continue; + } + FlatMessageFile flatFile = + flatFileStore.getFlatFile(new MessageQueue(topic, brokerName, indexItem.getQueueId())); + if (flatFile == null) { + continue; + } + CompletableFuture getMessageFuture = flatFile + .getCommitLogAsync(indexItem.getOffset(), indexItem.getSize()) + .thenAccept(messageBuffer -> result.addMessage( + new SelectMappedBufferResult( + indexItem.getOffset(), messageBuffer, indexItem.getSize(), null))); + futureList.add(getMessageFuture); + if (futureList.size() >= maxCount) { + break; + } + } + return CompletableFuture.allOf(futureList.toArray(new CompletableFuture[0])).thenApply(v -> result); + }).whenComplete((result, throwable) -> { + if (result != null) { + log.info("MessageFetcher#queryMessageAsync, " + + "query result={}, topic={}, topicId={}, key={}, maxCount={}, timestamp={}-{}", + result.getMessageBufferList().size(), topic, topicId, key, maxCount, begin, end); + } + }); + } +} diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/TieredStoreTopicFilter.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/core/MessageStoreFilter.java similarity index 90% rename from tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/TieredStoreTopicFilter.java rename to tieredstore/src/main/java/org/apache/rocketmq/tieredstore/core/MessageStoreFilter.java index f983ed6e961..524761fd2b1 100644 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/TieredStoreTopicFilter.java +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/core/MessageStoreFilter.java @@ -15,9 +15,9 @@ * limitations under the License. */ -package org.apache.rocketmq.tieredstore.provider; +package org.apache.rocketmq.tieredstore.core; -public interface TieredStoreTopicFilter { +public interface MessageStoreFilter { boolean filterTopic(String topicName); diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/TieredStoreTopicBlackListFilter.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/core/MessageStoreTopicFilter.java similarity index 63% rename from tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/TieredStoreTopicBlackListFilter.java rename to tieredstore/src/main/java/org/apache/rocketmq/tieredstore/core/MessageStoreTopicFilter.java index f8bf165bc11..b64f163eb23 100644 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/TieredStoreTopicBlackListFilter.java +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/core/MessageStoreTopicFilter.java @@ -15,19 +15,24 @@ * limitations under the License. */ -package org.apache.rocketmq.tieredstore.provider; +package org.apache.rocketmq.tieredstore.core; import java.util.HashSet; import java.util.Set; import org.apache.commons.lang3.StringUtils; -import org.apache.rocketmq.tieredstore.util.TieredStoreUtil; +import org.apache.rocketmq.common.MixAll; +import org.apache.rocketmq.common.PopAckConstants; +import org.apache.rocketmq.common.topic.TopicValidator; +import org.apache.rocketmq.tieredstore.MessageStoreConfig; -public class TieredStoreTopicBlackListFilter implements TieredStoreTopicFilter { +public class MessageStoreTopicFilter implements MessageStoreFilter { private final Set topicBlackSet; - public TieredStoreTopicBlackListFilter() { + public MessageStoreTopicFilter(MessageStoreConfig storeConfig) { this.topicBlackSet = new HashSet<>(); + this.topicBlackSet.add(storeConfig.getBrokerClusterName()); + this.topicBlackSet.add(storeConfig.getBrokerName()); } @Override @@ -35,7 +40,10 @@ public boolean filterTopic(String topicName) { if (StringUtils.isBlank(topicName)) { return true; } - return TieredStoreUtil.isSystemTopic(topicName) || topicBlackSet.contains(topicName); + return TopicValidator.isSystemTopic(topicName) || + PopAckConstants.isStartWithRevivePrefix(topicName) || + this.topicBlackSet.contains(topicName) || + MixAll.isLmq(topicName); } @Override diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/exception/TieredStoreException.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/exception/TieredStoreException.java index 1c85181329c..3841643299b 100644 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/exception/TieredStoreException.java +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/exception/TieredStoreException.java @@ -18,28 +18,25 @@ public class TieredStoreException extends RuntimeException { - private TieredStoreErrorCode errorCode; - private long position = -1; - + private final TieredStoreErrorCode errorCode; private String requestId; + private long position = -1L; public TieredStoreException(TieredStoreErrorCode errorCode, String errorMessage) { super(errorMessage); this.errorCode = errorCode; } - public TieredStoreException(TieredStoreErrorCode errorCode, String errorMessage, String requestId) { - super(errorMessage); - this.errorCode = errorCode; - this.requestId = requestId; - } - public TieredStoreErrorCode getErrorCode() { return errorCode; } - public void setErrorCode(TieredStoreErrorCode errorCode) { - this.errorCode = errorCode; + public String getRequestId() { + return requestId; + } + + public void setRequestId(String requestId) { + this.requestId = requestId; } public long getPosition() { @@ -52,13 +49,13 @@ public void setPosition(long position) { @Override public String toString() { - String errStr = super.toString(); + StringBuilder errorStringBuilder = new StringBuilder(super.toString()); if (requestId != null) { - errStr += " requestId: " + requestId; + errorStringBuilder.append(" requestId: ").append(requestId); } - if (position != -1) { - errStr += ", position: " + position; + if (position != -1L) { + errorStringBuilder.append(", position: ").append(position); } - return errStr; + return errorStringBuilder.toString(); } } diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/CompositeFlatFile.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/CompositeFlatFile.java deleted file mode 100644 index 5ad3a6ff320..00000000000 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/CompositeFlatFile.java +++ /dev/null @@ -1,495 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.rocketmq.tieredstore.file; - -import com.github.benmanes.caffeine.cache.Cache; -import com.github.benmanes.caffeine.cache.Caffeine; -import com.github.benmanes.caffeine.cache.RemovalCause; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Comparator; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.locks.ReentrantLock; -import org.apache.commons.lang3.StringUtils; -import org.apache.commons.lang3.tuple.Pair; -import org.apache.rocketmq.logging.org.slf4j.Logger; -import org.apache.rocketmq.logging.org.slf4j.LoggerFactory; -import org.apache.rocketmq.store.DispatchRequest; -import org.apache.rocketmq.tieredstore.common.AppendResult; -import org.apache.rocketmq.tieredstore.common.FileSegmentType; -import org.apache.rocketmq.tieredstore.common.InFlightRequestFuture; -import org.apache.rocketmq.tieredstore.common.InFlightRequestKey; -import org.apache.rocketmq.tieredstore.common.TieredMessageStoreConfig; -import org.apache.rocketmq.tieredstore.metadata.TieredMetadataStore; -import org.apache.rocketmq.tieredstore.util.CQItemBufferUtil; -import org.apache.rocketmq.tieredstore.util.MessageBufferUtil; -import org.apache.rocketmq.tieredstore.util.TieredStoreUtil; -import org.apache.rocketmq.common.BoundaryType; - -public class CompositeFlatFile implements CompositeAccess { - - protected static final Logger LOGGER = LoggerFactory.getLogger(TieredStoreUtil.TIERED_STORE_LOGGER_NAME); - - protected volatile boolean closed = false; - protected int readAheadFactor; - - /** - * Dispatch offset represents the offset of the messages that have been - * dispatched to the current chunk, indicating the progress of the message distribution. - * It's consume queue current offset. - */ - protected final AtomicLong dispatchOffset; - - protected final ReentrantLock compositeFlatFileLock; - protected final TieredMessageStoreConfig storeConfig; - protected final TieredMetadataStore metadataStore; - - protected final String filePath; - protected final TieredCommitLog commitLog; - protected final TieredConsumeQueue consumeQueue; - protected final Cache groupOffsetCache; - protected final ConcurrentMap inFlightRequestMap; - - public CompositeFlatFile(TieredFileAllocator fileQueueFactory, String filePath) { - this.filePath = filePath; - this.storeConfig = fileQueueFactory.getStoreConfig(); - this.readAheadFactor = this.storeConfig.getReadAheadMinFactor(); - this.metadataStore = TieredStoreUtil.getMetadataStore(this.storeConfig); - this.compositeFlatFileLock = new ReentrantLock(); - this.inFlightRequestMap = new ConcurrentHashMap<>(); - this.commitLog = new TieredCommitLog(fileQueueFactory, filePath); - this.consumeQueue = new TieredConsumeQueue(fileQueueFactory, filePath); - this.dispatchOffset = new AtomicLong( - this.consumeQueue.isInitialized() ? this.getConsumeQueueCommitOffset() : -1L); - this.groupOffsetCache = this.initOffsetCache(); - } - - private Cache initOffsetCache() { - return Caffeine.newBuilder() - .expireAfterWrite(2, TimeUnit.MINUTES) - .removalListener((key, value, cause) -> { - if (cause.equals(RemovalCause.EXPIRED)) { - inFlightRequestMap.remove(new InFlightRequestKey((String) key)); - } - }).build(); - } - - public boolean isClosed() { - return closed; - } - - public ReentrantLock getCompositeFlatFileLock() { - return compositeFlatFileLock; - } - - public long getCommitLogMinOffset() { - return commitLog.getMinOffset(); - } - - public long getCommitLogMaxOffset() { - return commitLog.getMaxOffset(); - } - - public long getCommitLogBeginTimestamp() { - return commitLog.getBeginTimestamp(); - } - - @Override - public long getCommitLogDispatchCommitOffset() { - return commitLog.getDispatchCommitOffset(); - } - - public long getConsumeQueueBaseOffset() { - return consumeQueue.getBaseOffset(); - } - - public long getConsumeQueueMinOffset() { - long cqOffset = consumeQueue.getMinOffset() / TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE; - long effectiveOffset = this.commitLog.getMinConsumeQueueOffset(); - return Math.max(cqOffset, effectiveOffset); - } - - public long getConsumeQueueCommitOffset() { - return consumeQueue.getCommitOffset() / TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE; - } - - public long getConsumeQueueMaxOffset() { - return consumeQueue.getMaxOffset() / TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE; - } - - public long getConsumeQueueEndTimestamp() { - return consumeQueue.getEndTimestamp(); - } - - public long getDispatchOffset() { - return dispatchOffset.get(); - } - - @Override - public CompletableFuture getMessageAsync(long queueOffset) { - return getConsumeQueueAsync(queueOffset).thenComposeAsync(cqBuffer -> { - long commitLogOffset = CQItemBufferUtil.getCommitLogOffset(cqBuffer); - int length = CQItemBufferUtil.getSize(cqBuffer); - return getCommitLogAsync(commitLogOffset, length); - }); - } - - @Override - public long getOffsetInConsumeQueueByTime(long timestamp, BoundaryType boundaryType) { - Pair pair = consumeQueue.getQueueOffsetInFileByTime(timestamp, boundaryType); - long minQueueOffset = pair.getLeft(); - long maxQueueOffset = pair.getRight(); - - if (maxQueueOffset == -1 || maxQueueOffset < minQueueOffset) { - return -1L; - } - - long low = minQueueOffset; - long high = maxQueueOffset; - - long offset = 0; - - // Handle the following corner cases first: - // 1. store time of (high) < timestamp - // 2. store time of (low) > timestamp - long storeTime; - // Handle case 1 - ByteBuffer message = getMessageAsync(maxQueueOffset).join(); - storeTime = MessageBufferUtil.getStoreTimeStamp(message); - if (storeTime < timestamp) { - switch (boundaryType) { - case LOWER: - return maxQueueOffset + 1; - case UPPER: - return maxQueueOffset; - default: - LOGGER.warn("CompositeFlatFile#getQueueOffsetByTime: unknown boundary boundaryType"); - break; - } - } - - // Handle case 2 - message = getMessageAsync(minQueueOffset).join(); - storeTime = MessageBufferUtil.getStoreTimeStamp(message); - if (storeTime > timestamp) { - switch (boundaryType) { - case LOWER: - return minQueueOffset; - case UPPER: - return 0L; - default: - LOGGER.warn("CompositeFlatFile#getQueueOffsetByTime: unknown boundary boundaryType"); - break; - } - } - - // Perform binary search - long midOffset = -1; - long targetOffset = -1; - long leftOffset = -1; - long rightOffset = -1; - while (high >= low) { - midOffset = (low + high) / 2; - message = getMessageAsync(midOffset).join(); - storeTime = MessageBufferUtil.getStoreTimeStamp(message); - if (storeTime == timestamp) { - targetOffset = midOffset; - break; - } else if (storeTime > timestamp) { - high = midOffset - 1; - rightOffset = midOffset; - } else { - low = midOffset + 1; - leftOffset = midOffset; - } - } - - if (targetOffset != -1) { - // We just found ONE matched record. These next to it might also share the same store-timestamp. - offset = targetOffset; - long previousAttempt = targetOffset; - switch (boundaryType) { - case LOWER: - while (true) { - long attempt = previousAttempt - 1; - if (attempt < minQueueOffset) { - break; - } - message = getMessageAsync(attempt).join(); - storeTime = MessageBufferUtil.getStoreTimeStamp(message); - if (storeTime == timestamp) { - previousAttempt = attempt; - continue; - } - break; - } - offset = previousAttempt; - break; - case UPPER: - while (true) { - long attempt = previousAttempt + 1; - if (attempt > maxQueueOffset) { - break; - } - - message = getMessageAsync(attempt).join(); - storeTime = MessageBufferUtil.getStoreTimeStamp(message); - if (storeTime == timestamp) { - previousAttempt = attempt; - continue; - } - break; - } - offset = previousAttempt; - break; - default: - LOGGER.warn("CompositeFlatFile#getQueueOffsetByTime: unknown boundary boundaryType"); - break; - } - } else { - // Given timestamp does not have any message records. But we have a range enclosing the - // timestamp. - /* - * Consider the follow case: t2 has no consume queue entry and we are searching offset of - * t2 for lower and upper boundaries. - * -------------------------- - * timestamp Consume Queue - * t1 1 - * t1 2 - * t1 3 - * t3 4 - * t3 5 - * -------------------------- - * Now, we return 3 as upper boundary of t2 and 4 as its lower boundary. It looks - * contradictory at first sight, but it does make sense when performing range queries. - */ - switch (boundaryType) { - case LOWER: { - offset = rightOffset; - break; - } - - case UPPER: { - offset = leftOffset; - break; - } - default: { - LOGGER.warn("CompositeFlatFile#getQueueOffsetByTime: unknown boundary boundaryType"); - break; - } - } - } - return offset; - } - - @Override - public void initOffset(long offset) { - if (consumeQueue.isInitialized()) { - dispatchOffset.set(this.getConsumeQueueCommitOffset()); - } else { - consumeQueue.setBaseOffset(offset * TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE); - dispatchOffset.set(offset); - } - } - - @Override - public AppendResult appendCommitLog(ByteBuffer message) { - return appendCommitLog(message, false); - } - - @Override - public AppendResult appendCommitLog(ByteBuffer message, boolean commit) { - if (closed) { - return AppendResult.FILE_CLOSED; - } - - AppendResult result = commitLog.append(message, commit); - if (result == AppendResult.SUCCESS) { - dispatchOffset.incrementAndGet(); - } - return result; - } - - @Override - public AppendResult appendConsumeQueue(DispatchRequest request) { - return appendConsumeQueue(request, false); - } - - @Override - public AppendResult appendConsumeQueue(DispatchRequest request, boolean commit) { - if (closed) { - return AppendResult.FILE_CLOSED; - } - - if (request.getConsumeQueueOffset() != getConsumeQueueMaxOffset()) { - return AppendResult.OFFSET_INCORRECT; - } - - return consumeQueue.append(request.getCommitLogOffset(), - request.getMsgSize(), request.getTagsCode(), request.getStoreTimestamp(), commit); - } - - @Override - public CompletableFuture getCommitLogAsync(long offset, int length) { - return commitLog.readAsync(offset, length); - } - - @Override - public CompletableFuture getConsumeQueueAsync(long queueOffset) { - return getConsumeQueueAsync(queueOffset, 1); - } - - @Override - public CompletableFuture getConsumeQueueAsync(long queueOffset, int count) { - return consumeQueue.readAsync(queueOffset * TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE, - count * TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE); - } - - @Override - public void commitCommitLog() { - commitLog.commit(true); - } - - @Override - public void commitConsumeQueue() { - consumeQueue.commit(true); - } - - @Override - public void cleanExpiredFile(long expireTimestamp) { - commitLog.cleanExpiredFile(expireTimestamp); - consumeQueue.cleanExpiredFile(expireTimestamp); - } - - @Override - public void destroyExpiredFile() { - commitLog.destroyExpiredFile(); - consumeQueue.destroyExpiredFile(); - } - - @Override - public void commit(boolean sync) { - commitLog.commit(sync); - consumeQueue.commit(sync); - } - - public void increaseReadAheadFactor() { - readAheadFactor = Math.min(readAheadFactor + 1, storeConfig.getReadAheadMaxFactor()); - } - - public void decreaseReadAheadFactor() { - readAheadFactor = Math.max(readAheadFactor - 1, storeConfig.getReadAheadMinFactor()); - } - - public void setNotReadAhead() { - readAheadFactor = 1; - } - - public int getReadAheadFactor() { - return readAheadFactor; - } - - public void recordGroupAccess(String group, long offset) { - groupOffsetCache.put(group, offset); - } - - public long getActiveGroupCount(long minOffset, long maxOffset) { - return groupOffsetCache.asMap() - .values() - .stream() - .filter(offset -> offset >= minOffset && offset <= maxOffset) - .count(); - } - - public long getActiveGroupCount() { - return groupOffsetCache.estimatedSize(); - } - - public InFlightRequestFuture getInflightRequest(long offset, int batchSize) { - Optional optional = inFlightRequestMap.entrySet() - .stream() - .filter(entry -> { - InFlightRequestKey key = entry.getKey(); - return Math.max(key.getOffset(), offset) <= Math.min(key.getOffset() + key.getBatchSize(), offset + batchSize); - }) - .max(Comparator.comparing(entry -> entry.getKey().getRequestTime())) - .map(Map.Entry::getValue); - return optional.orElseGet(() -> new InFlightRequestFuture(Long.MAX_VALUE, new ArrayList<>())); - } - - public InFlightRequestFuture getInflightRequest(String group, long offset, int batchSize) { - InFlightRequestFuture future = inFlightRequestMap.get(new InFlightRequestKey(group)); - if (future != null && !future.isAllDone()) { - return future; - } - return getInflightRequest(offset, batchSize); - } - - public void putInflightRequest(String group, long offset, int requestMsgCount, - List>> futureList) { - InFlightRequestKey key = new InFlightRequestKey(group, offset, requestMsgCount); - inFlightRequestMap.remove(key); - inFlightRequestMap.putIfAbsent(key, new InFlightRequestFuture(offset, futureList)); - } - - @Override - public int hashCode() { - return filePath.hashCode(); - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - return StringUtils.equals(filePath, ((CompositeFlatFile) obj).filePath); - } - - public void shutdown() { - closed = true; - commitLog.commit(true); - consumeQueue.commit(true); - } - - public void destroy() { - try { - closed = true; - compositeFlatFileLock.lock(); - commitLog.destroy(); - consumeQueue.destroy(); - metadataStore.deleteFileSegment(filePath, FileSegmentType.COMMIT_LOG); - metadataStore.deleteFileSegment(filePath, FileSegmentType.CONSUME_QUEUE); - } catch (Exception e) { - LOGGER.error("CompositeFlatFile#destroy: delete file failed", e); - } finally { - compositeFlatFileLock.unlock(); - } - } -} diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/CompositeQueueFlatFile.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/CompositeQueueFlatFile.java deleted file mode 100644 index 67d2cf06462..00000000000 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/CompositeQueueFlatFile.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.rocketmq.tieredstore.file; - -import java.util.Arrays; -import java.util.HashSet; -import java.util.Set; -import org.apache.commons.lang3.StringUtils; -import org.apache.rocketmq.common.message.MessageConst; -import org.apache.rocketmq.common.message.MessageQueue; -import org.apache.rocketmq.store.DispatchRequest; -import org.apache.rocketmq.tieredstore.common.AppendResult; -import org.apache.rocketmq.tieredstore.index.IndexService; -import org.apache.rocketmq.tieredstore.metadata.QueueMetadata; -import org.apache.rocketmq.tieredstore.metadata.TopicMetadata; -import org.apache.rocketmq.tieredstore.util.TieredStoreUtil; - -public class CompositeQueueFlatFile extends CompositeFlatFile { - - private final MessageQueue messageQueue; - private long topicSequenceNumber; - private QueueMetadata queueMetadata; - private final IndexService indexStoreService; - - public CompositeQueueFlatFile(TieredFileAllocator fileQueueFactory, MessageQueue messageQueue) { - super(fileQueueFactory, TieredStoreUtil.toPath(messageQueue)); - this.messageQueue = messageQueue; - this.recoverQueueMetadata(); - this.indexStoreService = TieredFlatFileManager.getTieredIndexService(storeConfig); - } - - @Override - public void initOffset(long offset) { - if (!consumeQueue.isInitialized()) { - queueMetadata.setMinOffset(offset); - queueMetadata.setMaxOffset(offset); - metadataStore.updateQueue(queueMetadata); - } - super.initOffset(offset); - } - - public void recoverQueueMetadata() { - TopicMetadata topicMetadata = this.metadataStore.getTopic(messageQueue.getTopic()); - if (topicMetadata == null) { - topicMetadata = this.metadataStore.addTopic(messageQueue.getTopic(), -1L); - } - this.topicSequenceNumber = topicMetadata.getTopicId(); - - queueMetadata = this.metadataStore.getQueue(messageQueue); - if (queueMetadata == null) { - queueMetadata = this.metadataStore.addQueue(messageQueue, -1); - } - if (queueMetadata.getMaxOffset() < queueMetadata.getMinOffset()) { - queueMetadata.setMaxOffset(queueMetadata.getMinOffset()); - } - } - - public void flushMetadata() { - try { - queueMetadata.setMinOffset(super.getConsumeQueueMinOffset()); - queueMetadata.setMaxOffset(super.getConsumeQueueMaxOffset()); - metadataStore.updateQueue(queueMetadata); - } catch (Exception e) { - LOGGER.error("CompositeFlatFile#flushMetadata error, topic: {}, queue: {}", - messageQueue.getTopic(), messageQueue.getQueueId(), e); - } - } - - /** - * Building indexes with offsetId is no longer supported because offsetId has changed in tiered storage - */ - public AppendResult appendIndexFile(DispatchRequest request) { - if (closed) { - return AppendResult.FILE_CLOSED; - } - - Set keySet = new HashSet<>( - Arrays.asList(request.getKeys().split(MessageConst.KEY_SEPARATOR))); - if (StringUtils.isNotBlank(request.getUniqKey())) { - keySet.add(request.getUniqKey()); - } - - return indexStoreService.putKey( - messageQueue.getTopic(), (int) topicSequenceNumber, messageQueue.getQueueId(), keySet, - request.getCommitLogOffset(), request.getMsgSize(), request.getStoreTimestamp()); - } - - public MessageQueue getMessageQueue() { - return messageQueue; - } - - @Override - public void shutdown() { - super.shutdown(); - this.flushMetadata(); - } - - @Override - public void destroy() { - super.destroy(); - metadataStore.deleteQueue(messageQueue); - } -} diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/FlatAppendFile.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/FlatAppendFile.java new file mode 100644 index 00000000000..d0484137982 --- /dev/null +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/FlatAppendFile.java @@ -0,0 +1,269 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.rocketmq.tieredstore.file; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.stream.Collectors; +import org.apache.rocketmq.tieredstore.common.AppendResult; +import org.apache.rocketmq.tieredstore.common.FileSegmentType; +import org.apache.rocketmq.tieredstore.metadata.MetadataStore; +import org.apache.rocketmq.tieredstore.metadata.entity.FileSegmentMetadata; +import org.apache.rocketmq.tieredstore.provider.FileSegment; +import org.apache.rocketmq.tieredstore.provider.FileSegmentFactory; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class FlatAppendFile { + + protected static final Logger log = LoggerFactory.getLogger(MessageStoreUtil.TIERED_STORE_LOGGER_NAME); + public static final long GET_FILE_SIZE_ERROR = -1L; + + protected final String filePath; + protected final FileSegmentType fileType; + protected final MetadataStore metadataStore; + protected final FileSegmentFactory fileSegmentFactory; + protected final ReentrantReadWriteLock fileSegmentLock; + protected final CopyOnWriteArrayList fileSegmentTable; + + protected FlatAppendFile(FileSegmentFactory fileSegmentFactory, FileSegmentType fileType, String filePath) { + + this.fileType = fileType; + this.filePath = filePath; + this.metadataStore = fileSegmentFactory.getMetadataStore(); + this.fileSegmentFactory = fileSegmentFactory; + this.fileSegmentLock = new ReentrantReadWriteLock(); + this.fileSegmentTable = new CopyOnWriteArrayList<>(); + this.recover(); + this.recoverFileSize(); + } + + public void recover() { + List fileSegmentList = new ArrayList<>(); + this.metadataStore.iterateFileSegment(this.filePath, this.fileType, metadata -> { + FileSegment fileSegment = this.fileSegmentFactory.createSegment( + this.fileType, metadata.getPath(), metadata.getBaseOffset()); + fileSegment.initPosition(metadata.getSize()); + fileSegment.setMinTimestamp(metadata.getBeginTimestamp()); + fileSegment.setMaxTimestamp(metadata.getEndTimestamp()); + fileSegmentList.add(fileSegment); + }); + this.fileSegmentTable.addAll(fileSegmentList.stream().sorted().collect(Collectors.toList())); + } + + public void recoverFileSize() { + if (fileSegmentTable.isEmpty() || FileSegmentType.INDEX.equals(fileType)) { + return; + } + FileSegment fileSegment = fileSegmentTable.get(fileSegmentTable.size() - 1); + long fileSize = fileSegment.getSize(); + if (fileSize == GET_FILE_SIZE_ERROR) { + log.warn("FlatAppendFile get last file size error, filePath: {}", this.filePath); + return; + } + if (fileSegment.getCommitPosition() != fileSize) { + fileSegment.initPosition(fileSize); + flushFileSegmentMeta(fileSegment); + log.warn("FlatAppendFile last file size not correct, filePath: {}", this.filePath); + } + } + + public void initOffset(long offset) { + if (this.fileSegmentTable.isEmpty()) { + FileSegment fileSegment = fileSegmentFactory.createSegment(fileType, filePath, offset); + this.flushFileSegmentMeta(fileSegment); + this.fileSegmentTable.add(fileSegment); + } + } + + public void flushFileSegmentMeta(FileSegment fileSegment) { + FileSegmentMetadata metadata = this.metadataStore.getFileSegment( + this.filePath, fileSegment.getFileType(), fileSegment.getBaseOffset()); + if (metadata == null) { + metadata = new FileSegmentMetadata( + this.filePath, fileSegment.getBaseOffset(), fileSegment.getFileType().getCode()); + metadata.setCreateTimestamp(System.currentTimeMillis()); + } + metadata.setSize(fileSegment.getCommitPosition()); + metadata.setBeginTimestamp(fileSegment.getMinTimestamp()); + metadata.setEndTimestamp(fileSegment.getMaxTimestamp()); + this.metadataStore.updateFileSegment(metadata); + } + + public String getFilePath() { + return filePath; + } + + public FileSegmentType getFileType() { + return fileType; + } + + public List getFileSegmentList() { + return fileSegmentTable; + } + + public long getMinOffset() { + List list = this.fileSegmentTable; + return list.isEmpty() ? GET_FILE_SIZE_ERROR : list.get(0).getBaseOffset(); + } + + public long getCommitOffset() { + List list = this.fileSegmentTable; + return list.isEmpty() ? GET_FILE_SIZE_ERROR : list.get(list.size() - 1).getCommitOffset(); + } + + public long getAppendOffset() { + List list = this.fileSegmentTable; + return list.isEmpty() ? GET_FILE_SIZE_ERROR : list.get(list.size() - 1).getAppendOffset(); + } + + public long getMinTimestamp() { + List list = this.fileSegmentTable; + return list.isEmpty() ? GET_FILE_SIZE_ERROR : list.get(0).getMinTimestamp(); + } + + public long getMaxTimestamp() { + List list = this.fileSegmentTable; + return list.isEmpty() ? GET_FILE_SIZE_ERROR : list.get(list.size() - 1).getMaxTimestamp(); + } + + public FileSegment rollingNewFile(long offset) { + FileSegment fileSegment; + fileSegmentLock.writeLock().lock(); + try { + fileSegment = this.fileSegmentFactory.createSegment(this.fileType, this.filePath, offset); + this.flushFileSegmentMeta(fileSegment); + this.fileSegmentTable.add(fileSegment); + } finally { + fileSegmentLock.writeLock().unlock(); + } + return fileSegment; + } + + public FileSegment getFileToWrite() { + List fileSegmentList = this.fileSegmentTable; + if (fileSegmentList.isEmpty()) { + throw new IllegalStateException("Need to set base offset before create file segment"); + } else { + return fileSegmentList.get(fileSegmentList.size() - 1); + } + } + + public AppendResult append(ByteBuffer buffer, long timestamp) { + AppendResult result; + fileSegmentLock.writeLock().lock(); + try { + FileSegment fileSegment = this.getFileToWrite(); + result = fileSegment.append(buffer, timestamp); + if (result == AppendResult.FILE_FULL) { + fileSegment.commitAsync().join(); + return this.rollingNewFile(this.getAppendOffset()).append(buffer, timestamp); + } + } finally { + fileSegmentLock.writeLock().unlock(); + } + return result; + } + + public CompletableFuture commitAsync() { + List fileSegmentsList = this.fileSegmentTable; + if (fileSegmentsList.isEmpty()) { + return CompletableFuture.completedFuture(true); + } + FileSegment fileSegment = fileSegmentsList.get(fileSegmentsList.size() - 1); + return fileSegment.commitAsync().thenApply(success -> { + if (success) { + this.flushFileSegmentMeta(fileSegment); + } + return success; + }); + } + + public CompletableFuture readAsync(long offset, int length) { + List fileSegmentList = this.fileSegmentTable; + int index = fileSegmentList.size() - 1; + for (; index >= 0; index--) { + if (fileSegmentList.get(index).getBaseOffset() <= offset) { + break; + } + } + + FileSegment fileSegment1 = fileSegmentList.get(index); + FileSegment fileSegment2 = offset + length > fileSegment1.getCommitOffset() && + fileSegmentList.size() > index + 1 ? fileSegmentList.get(index + 1) : null; + + if (fileSegment2 == null) { + return fileSegment1.readAsync(offset - fileSegment1.getBaseOffset(), length); + } + + int segment1Length = (int) (fileSegment1.getCommitOffset() - offset); + return fileSegment1.readAsync(offset - fileSegment1.getBaseOffset(), segment1Length) + .thenCombine(fileSegment2.readAsync(0, length - segment1Length), + (buffer1, buffer2) -> { + ByteBuffer buffer = ByteBuffer.allocate(buffer1.remaining() + buffer2.remaining()); + buffer.put(buffer1).put(buffer2); + buffer.flip(); + return buffer; + }); + } + + public void shutdown() { + fileSegmentLock.writeLock().lock(); + try { + fileSegmentTable.forEach(FileSegment::close); + } finally { + fileSegmentLock.writeLock().unlock(); + } + } + + public void destroyExpiredFile(long expireTimestamp) { + fileSegmentLock.writeLock().lock(); + try { + while (!fileSegmentTable.isEmpty()) { + + // first remove expired file from fileSegmentTable + // then close and delete expired file + FileSegment fileSegment = fileSegmentTable.get(0); + + if (fileSegment.getMaxTimestamp() != Long.MAX_VALUE && + fileSegment.getMaxTimestamp() > expireTimestamp) { + log.debug("FileSegment has not expired, filePath={}, fileType={}, " + + "offset={}, expireTimestamp={}, maxTimestamp={}", filePath, fileType, + fileSegment.getBaseOffset(), expireTimestamp, fileSegment.getMaxTimestamp()); + break; + } + + fileSegment.destroyFile(); + if (!fileSegment.exists()) { + fileSegmentTable.remove(0); + metadataStore.deleteFileSegment(filePath, fileType, fileSegment.getBaseOffset()); + } + } + } finally { + fileSegmentLock.writeLock().unlock(); + } + } + + public void destroy() { + this.destroyExpiredFile(Long.MAX_VALUE); + } +} diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/FlatCommitLogFile.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/FlatCommitLogFile.java new file mode 100644 index 00000000000..8a319ed3899 --- /dev/null +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/FlatCommitLogFile.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.rocketmq.tieredstore.file; + +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.atomic.AtomicLong; +import org.apache.rocketmq.tieredstore.common.FileSegmentType; +import org.apache.rocketmq.tieredstore.provider.FileSegmentFactory; +import org.apache.rocketmq.tieredstore.util.MessageFormatUtil; + +public class FlatCommitLogFile extends FlatAppendFile { + + private static final long GET_OFFSET_ERROR = -1L; + + private final AtomicLong firstOffset = new AtomicLong(GET_OFFSET_ERROR); + + public FlatCommitLogFile(FileSegmentFactory fileSegmentFactory, String filePath) { + super(fileSegmentFactory, FileSegmentType.COMMIT_LOG, filePath); + this.initOffset(0L); + } + + public boolean tryRollingFile(long interval) { + long timestamp = this.getFileToWrite().getMinTimestamp(); + if (timestamp != Long.MAX_VALUE && + timestamp + interval < System.currentTimeMillis()) { + this.rollingNewFile(this.getAppendOffset()); + return true; + } + return false; + } + + public long getMinOffsetFromFile() { + return firstOffset.get() == GET_OFFSET_ERROR ? + this.getMinOffsetFromFileAsync().join() : firstOffset.get(); + } + + public CompletableFuture getMinOffsetFromFileAsync() { + int length = MessageFormatUtil.QUEUE_OFFSET_POSITION + Long.BYTES; + if (this.fileSegmentTable.isEmpty() || + this.getCommitOffset() - this.getMinOffset() < length) { + return CompletableFuture.completedFuture(GET_OFFSET_ERROR); + } + return this.readAsync(this.getMinOffset(), length) + .thenApply(buffer -> { + firstOffset.set(MessageFormatUtil.getQueueOffset(buffer)); + return firstOffset.get(); + }); + } +} diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/util/CQItemBufferUtil.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/FlatConsumeQueueFile.java similarity index 64% rename from tieredstore/src/main/java/org/apache/rocketmq/tieredstore/util/CQItemBufferUtil.java rename to tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/FlatConsumeQueueFile.java index 2acc133d830..caad4749b5d 100644 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/util/CQItemBufferUtil.java +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/FlatConsumeQueueFile.java @@ -14,20 +14,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.rocketmq.tieredstore.util; +package org.apache.rocketmq.tieredstore.file; -import java.nio.ByteBuffer; +import org.apache.rocketmq.tieredstore.common.FileSegmentType; +import org.apache.rocketmq.tieredstore.provider.FileSegmentFactory; -public class CQItemBufferUtil { - public static long getCommitLogOffset(ByteBuffer cqItem) { - return cqItem.getLong(cqItem.position()); - } - - public static int getSize(ByteBuffer cqItem) { - return cqItem.getInt(cqItem.position() + 8); - } +public class FlatConsumeQueueFile extends FlatAppendFile { - public static long getTagCode(ByteBuffer cqItem) { - return cqItem.getLong(cqItem.position() + 12); + public FlatConsumeQueueFile(FileSegmentFactory fileSegmentFactory, String filePath) { + super(fileSegmentFactory, FileSegmentType.CONSUME_QUEUE, filePath); } } diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/FlatFileFactory.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/FlatFileFactory.java new file mode 100644 index 00000000000..ccaa58e4c22 --- /dev/null +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/FlatFileFactory.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.rocketmq.tieredstore.file; + +import org.apache.rocketmq.tieredstore.MessageStoreConfig; +import org.apache.rocketmq.tieredstore.common.FileSegmentType; +import org.apache.rocketmq.tieredstore.metadata.MetadataStore; +import org.apache.rocketmq.tieredstore.provider.FileSegmentFactory; + +public class FlatFileFactory { + + private final MetadataStore metadataStore; + private final MessageStoreConfig storeConfig; + private final FileSegmentFactory fileSegmentFactory; + + public FlatFileFactory(MetadataStore metadataStore, MessageStoreConfig storeConfig) { + this.metadataStore = metadataStore; + this.storeConfig = storeConfig; + this.fileSegmentFactory = new FileSegmentFactory(metadataStore, storeConfig); + } + + public MessageStoreConfig getStoreConfig() { + return storeConfig; + } + + public MetadataStore getMetadataStore() { + return metadataStore; + } + + public FlatCommitLogFile createFlatFileForCommitLog(String filePath) { + return new FlatCommitLogFile(this.fileSegmentFactory, filePath); + } + + public FlatConsumeQueueFile createFlatFileForConsumeQueue(String filePath) { + return new FlatConsumeQueueFile(this.fileSegmentFactory, filePath); + } + + public FlatAppendFile createFlatFileForIndexFile(String filePath) { + return new FlatAppendFile(this.fileSegmentFactory, FileSegmentType.INDEX, filePath); + } +} diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/CompositeAccess.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/FlatFileInterface.java similarity index 67% rename from tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/CompositeAccess.java rename to tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/FlatFileInterface.java index 3d962e40d65..773f3cbecac 100644 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/CompositeAccess.java +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/FlatFileInterface.java @@ -17,36 +17,38 @@ package org.apache.rocketmq.tieredstore.file; import java.nio.ByteBuffer; +import java.util.List; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.locks.Lock; +import org.apache.rocketmq.common.BoundaryType; +import org.apache.rocketmq.common.message.MessageQueue; import org.apache.rocketmq.store.DispatchRequest; +import org.apache.rocketmq.store.SelectMappedBufferResult; import org.apache.rocketmq.tieredstore.common.AppendResult; -import org.apache.rocketmq.common.BoundaryType; -interface CompositeAccess { +public interface FlatFileInterface { + + long getTopicId(); + + Lock getFileLock(); + + MessageQueue getMessageQueue(); + + boolean isFlatFileInit(); - /** - * Initializes the offset for the flat file. - * Will only affect the distribution site if the file has already been initialized. - * - * @param offset init offset for consume queue - */ void initOffset(long offset); - /** - * Appends a message to the commit log file, but does not commit it immediately - * - * @param message the message to append - * @return append result - */ - AppendResult appendCommitLog(ByteBuffer message); + boolean rollingFile(long interval); /** * Appends a message to the commit log file * - * @param message the message to append + * @param message thByteBuffere message to append * @return append result */ - AppendResult appendCommitLog(ByteBuffer message, boolean commit); + AppendResult appendCommitLog(ByteBuffer message); + + AppendResult appendCommitLog(SelectMappedBufferResult message); /** * Append message to consume queue file, but does not commit it immediately @@ -56,29 +58,32 @@ interface CompositeAccess { */ AppendResult appendConsumeQueue(DispatchRequest request); - /** - * Append message to consume queue file - * - * @param request the dispatch request - * @param commit whether to commit - * @return append result - */ - AppendResult appendConsumeQueue(DispatchRequest request, boolean commit); + List getDispatchRequestList(); - /** - * Persist commit log file - */ - void commitCommitLog(); + void release(); - /** - * Persist the consume queue file - */ - void commitConsumeQueue(); + long getMinStoreTimestamp(); + + long getMaxStoreTimestamp(); + + long getFirstMessageOffset(); + + long getCommitLogMinOffset(); + + long getCommitLogMaxOffset(); + + long getCommitLogCommitOffset(); + + long getConsumeQueueMinOffset(); + + long getConsumeQueueMaxOffset(); + + long getConsumeQueueCommitOffset(); /** * Persist commit log file and consume queue file */ - void commit(boolean sync); + CompletableFuture commitAsync(); /** * Asynchronously retrieves the message at the specified consume queue offset @@ -89,7 +94,7 @@ interface CompositeAccess { CompletableFuture getMessageAsync(long consumeQueueOffset); /** - * Get message from commitlog file at specified offset and length + * Get message from commitLog file at specified offset and length * * @param offset the offset * @param length the length @@ -114,13 +119,6 @@ interface CompositeAccess { */ CompletableFuture getConsumeQueueAsync(long consumeQueueOffset, int count); - /** - * Return the consensus queue site corresponding to the confirmed site in the commitlog - * - * @return the maximum offset - */ - long getCommitLogDispatchCommitOffset(); - /** * Gets the offset in the consume queue by timestamp and boundary type * @@ -128,24 +126,17 @@ interface CompositeAccess { * @param boundaryType lower or upper to decide boundary * @return Returns the offset of the message */ - long getOffsetInConsumeQueueByTime(long timestamp, BoundaryType boundaryType); + CompletableFuture getQueueOffsetByTimeAsync(long timestamp, BoundaryType boundaryType); /** - * Mark some commit log and consume file sealed and expired - * - * @param expireTimestamp expire timestamp, usually several days before the current time + * Shutdown process */ - void cleanExpiredFile(long expireTimestamp); + void shutdown(); /** * Destroys expired files */ - void destroyExpiredFile(); - - /** - * Shutdown process - */ - void shutdown(); + void destroyExpiredFile(long timestamp); /** * Delete file diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/FlatFileStore.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/FlatFileStore.java new file mode 100644 index 00000000000..0d7044a5447 --- /dev/null +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/FlatFileStore.java @@ -0,0 +1,163 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.rocketmq.tieredstore.file; + +import com.google.common.base.Stopwatch; +import com.google.common.collect.ImmutableList; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import org.apache.rocketmq.common.constant.LoggerName; +import org.apache.rocketmq.common.message.MessageQueue; +import org.apache.rocketmq.tieredstore.MessageStoreConfig; +import org.apache.rocketmq.tieredstore.MessageStoreExecutor; +import org.apache.rocketmq.tieredstore.metadata.MetadataStore; +import org.apache.rocketmq.tieredstore.metadata.entity.TopicMetadata; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class FlatFileStore { + + private static final Logger log = LoggerFactory.getLogger(MessageStoreUtil.TIERED_STORE_LOGGER_NAME); + + private final MetadataStore metadataStore; + private final MessageStoreConfig storeConfig; + private final MessageStoreExecutor executor; + private final FlatFileFactory flatFileFactory; + private final ConcurrentMap flatFileConcurrentMap; + + public FlatFileStore(MessageStoreConfig storeConfig, MetadataStore metadataStore, MessageStoreExecutor executor) { + this.storeConfig = storeConfig; + this.metadataStore = metadataStore; + this.executor = executor; + this.flatFileFactory = new FlatFileFactory(metadataStore, storeConfig); + this.flatFileConcurrentMap = new ConcurrentHashMap<>(); + } + + public boolean load() { + Stopwatch stopwatch = Stopwatch.createStarted(); + try { + this.flatFileConcurrentMap.clear(); + this.recover(); + this.executor.commonExecutor.scheduleWithFixedDelay(() -> { + long expiredTimeStamp = System.currentTimeMillis() - + TimeUnit.HOURS.toMillis(storeConfig.getTieredStoreFileReservedTime()); + for (FlatMessageFile flatFile : deepCopyFlatFileToList()) { + flatFile.destroyExpiredFile(expiredTimeStamp); + if (flatFile.consumeQueue.fileSegmentTable.isEmpty()) { + this.destroyFile(flatFile.getMessageQueue()); + } + } + }, 60, 60, TimeUnit.SECONDS); + log.info("FlatFileStore recover finished, total cost={}ms", stopwatch.elapsed(TimeUnit.MILLISECONDS)); + } catch (Exception e) { + long costTime = stopwatch.elapsed(TimeUnit.MILLISECONDS); + log.info("FlatFileStore recover error, total cost={}ms", costTime); + LoggerFactory.getLogger(LoggerName.BROKER_LOGGER_NAME) + .error("FlatFileStore recover error, total cost={}ms", costTime, e); + return false; + } + return true; + } + + public void recover() { + Semaphore semaphore = new Semaphore(storeConfig.getTieredStoreMaxPendingLimit() / 4); + List> futures = new ArrayList<>(); + metadataStore.iterateTopic(topicMetadata -> { + semaphore.acquireUninterruptibly(); + futures.add(this.recoverAsync(topicMetadata) + .whenComplete((unused, throwable) -> { + if (throwable != null) { + log.error("FlatFileStore recover file error, topic={}", topicMetadata.getTopic(), throwable); + } + semaphore.release(); + })); + }); + CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).join(); + } + + public CompletableFuture recoverAsync(TopicMetadata topicMetadata) { + return CompletableFuture.runAsync(() -> { + Stopwatch stopwatch = Stopwatch.createStarted(); + AtomicLong queueCount = new AtomicLong(); + metadataStore.iterateQueue(topicMetadata.getTopic(), queueMetadata -> { + FlatMessageFile flatFile = this.computeIfAbsent(new MessageQueue( + topicMetadata.getTopic(), storeConfig.getBrokerName(), queueMetadata.getQueue().getQueueId())); + queueCount.incrementAndGet(); + log.debug("FlatFileStore recover file, topicId={}, topic={}, queueId={}, cost={}ms", + flatFile.getTopicId(), flatFile.getMessageQueue().getTopic(), + flatFile.getMessageQueue().getQueueId(), stopwatch.elapsed(TimeUnit.MILLISECONDS)); + }); + log.info("FlatFileStore recover file, topic={}, total={}, cost={}ms", + topicMetadata.getTopic(), queueCount.get(), stopwatch.elapsed(TimeUnit.MILLISECONDS)); + }, executor.bufferCommitExecutor); + } + + public MetadataStore getMetadataStore() { + return metadataStore; + } + + public MessageStoreConfig getStoreConfig() { + return storeConfig; + } + + public FlatFileFactory getFlatFileFactory() { + return flatFileFactory; + } + + public FlatMessageFile computeIfAbsent(MessageQueue messageQueue) { + return flatFileConcurrentMap.computeIfAbsent(messageQueue, + mq -> new FlatMessageFile(flatFileFactory, mq.getTopic(), mq.getQueueId())); + } + + public FlatMessageFile getFlatFile(MessageQueue messageQueue) { + return flatFileConcurrentMap.get(messageQueue); + } + + public ImmutableList deepCopyFlatFileToList() { + return ImmutableList.copyOf(flatFileConcurrentMap.values()); + } + + public void shutdown() { + flatFileConcurrentMap.values().forEach(FlatMessageFile::shutdown); + } + + public void destroyFile(MessageQueue mq) { + if (mq == null) { + return; + } + + FlatMessageFile flatFile = flatFileConcurrentMap.remove(mq); + if (flatFile != null) { + flatFile.shutdown(); + flatFile.destroy(); + } + log.info("FlatFileStore destroy file, topic={}, queueId={}", mq.getTopic(), mq.getQueueId()); + } + + public void destroy() { + this.shutdown(); + flatFileConcurrentMap.values().forEach(FlatMessageFile::destroy); + flatFileConcurrentMap.clear(); + } +} diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/FlatMessageFile.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/FlatMessageFile.java new file mode 100644 index 00000000000..7123332410c --- /dev/null +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/FlatMessageFile.java @@ -0,0 +1,386 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.rocketmq.tieredstore.file; + +import com.alibaba.fastjson.JSON; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import org.apache.commons.lang3.StringUtils; +import org.apache.rocketmq.common.BoundaryType; +import org.apache.rocketmq.common.message.MessageQueue; +import org.apache.rocketmq.store.DispatchRequest; +import org.apache.rocketmq.store.SelectMappedBufferResult; +import org.apache.rocketmq.tieredstore.MessageStoreConfig; +import org.apache.rocketmq.tieredstore.common.AppendResult; +import org.apache.rocketmq.tieredstore.metadata.MetadataStore; +import org.apache.rocketmq.tieredstore.metadata.entity.QueueMetadata; +import org.apache.rocketmq.tieredstore.metadata.entity.TopicMetadata; +import org.apache.rocketmq.tieredstore.util.MessageFormatUtil; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class FlatMessageFile implements FlatFileInterface { + + protected static final Logger log = LoggerFactory.getLogger(MessageStoreUtil.TIERED_STORE_LOGGER_NAME); + protected volatile boolean closed = false; + + protected TopicMetadata topicMetadata; + protected QueueMetadata queueMetadata; + + protected final String filePath; + protected final ReentrantLock fileLock; + protected final MessageStoreConfig storeConfig; + protected final MetadataStore metadataStore; + protected final FlatCommitLogFile commitLog; + protected final FlatConsumeQueueFile consumeQueue; + protected final AtomicLong lastDestroyTime; + + protected final List bufferResultList; + protected final List dispatchRequestList; + protected final ConcurrentMap> inFlightRequestMap; + + public FlatMessageFile(FlatFileFactory fileFactory, String topic, int queueId) { + this(fileFactory, MessageStoreUtil.toFilePath( + new MessageQueue(topic, fileFactory.getStoreConfig().getBrokerName(), queueId))); + this.topicMetadata = this.recoverTopicMetadata(topic); + this.queueMetadata = this.recoverQueueMetadata(topic, queueId); + } + + public FlatMessageFile(FlatFileFactory fileFactory, String filePath) { + this.filePath = filePath; + this.fileLock = new ReentrantLock(false); + this.storeConfig = fileFactory.getStoreConfig(); + this.metadataStore = fileFactory.getMetadataStore(); + this.commitLog = fileFactory.createFlatFileForCommitLog(filePath); + this.consumeQueue = fileFactory.createFlatFileForConsumeQueue(filePath); + this.lastDestroyTime = new AtomicLong(); + this.bufferResultList = new ArrayList<>(); + this.dispatchRequestList = new ArrayList<>(); + this.inFlightRequestMap = new ConcurrentHashMap<>(); + } + + @Override + public long getTopicId() { + return topicMetadata.getTopicId(); + } + + @Override + public MessageQueue getMessageQueue() { + return queueMetadata != null ? queueMetadata.getQueue() : null; + } + + @Override + public boolean isFlatFileInit() { + return !this.consumeQueue.fileSegmentTable.isEmpty(); + } + + public TopicMetadata recoverTopicMetadata(String topic) { + TopicMetadata topicMetadata = this.metadataStore.getTopic(topic); + if (topicMetadata == null) { + topicMetadata = this.metadataStore.addTopic(topic, -1L); + } + return topicMetadata; + } + + public QueueMetadata recoverQueueMetadata(String topic, int queueId) { + MessageQueue mq = new MessageQueue(topic, storeConfig.getBrokerName(), queueId); + QueueMetadata queueMetadata = this.metadataStore.getQueue(mq); + if (queueMetadata == null) { + queueMetadata = this.metadataStore.addQueue(mq, -1L); + } + return queueMetadata; + } + + public void flushMetadata() { + if (queueMetadata != null) { + queueMetadata.setMinOffset(this.getConsumeQueueMinOffset()); + queueMetadata.setMaxOffset(this.getConsumeQueueCommitOffset()); + queueMetadata.setUpdateTimestamp(System.currentTimeMillis()); + metadataStore.updateQueue(queueMetadata); + } + } + + @Override + public Lock getFileLock() { + return this.fileLock; + } + + @Override + public boolean rollingFile(long interval) { + return this.commitLog.tryRollingFile(interval); + } + + @Override + public void initOffset(long offset) { + fileLock.lock(); + try { + this.commitLog.initOffset(0L); + this.consumeQueue.initOffset(offset * MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE); + } finally { + fileLock.unlock(); + } + } + + @Override + public AppendResult appendCommitLog(ByteBuffer message) { + if (closed) { + return AppendResult.FILE_CLOSED; + } + return commitLog.append(message, MessageFormatUtil.getStoreTimeStamp(message)); + } + + @Override + public AppendResult appendCommitLog(SelectMappedBufferResult message) { + if (closed) { + return AppendResult.FILE_CLOSED; + } + this.bufferResultList.add(message); + return this.appendCommitLog(message.getByteBuffer()); + } + + @Override + public AppendResult appendConsumeQueue(DispatchRequest request) { + if (closed) { + return AppendResult.FILE_CLOSED; + } + + ByteBuffer buffer = ByteBuffer.allocate(MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE); + buffer.putLong(request.getCommitLogOffset()); + buffer.putInt(request.getMsgSize()); + buffer.putLong(request.getTagsCode()); + buffer.flip(); + + this.dispatchRequestList.add(request); + return consumeQueue.append(buffer, request.getStoreTimestamp()); + } + + @Override + public List getDispatchRequestList() { + return dispatchRequestList; + } + + @Override + public void release() { + for (SelectMappedBufferResult bufferResult : bufferResultList) { + bufferResult.release(); + } + + if (queueMetadata != null) { + log.trace("FlatMessageFile release, topic={}, queueId={}, bufferSize={}, requestListSize={}", + queueMetadata.getQueue().getTopic(), queueMetadata.getQueue().getQueueId(), + bufferResultList.size(), dispatchRequestList.size()); + } + + bufferResultList.clear(); + dispatchRequestList.clear(); + } + + @Override + public long getMinStoreTimestamp() { + return commitLog.getMinTimestamp(); + } + + @Override + public long getMaxStoreTimestamp() { + return commitLog.getMaxTimestamp(); + } + + @Override + public long getFirstMessageOffset() { + return commitLog.getMinOffsetFromFile(); + } + + @Override + public long getCommitLogMinOffset() { + return commitLog.getMinOffset(); + } + + @Override + public long getCommitLogMaxOffset() { + return commitLog.getAppendOffset(); + } + + @Override + public long getCommitLogCommitOffset() { + return commitLog.getCommitOffset(); + } + + @Override + public long getConsumeQueueMinOffset() { + long cqOffset = consumeQueue.getMinOffset() / MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE; + long effectiveOffset = this.commitLog.getMinOffsetFromFile(); + return Math.max(cqOffset, effectiveOffset); + } + + @Override + public long getConsumeQueueMaxOffset() { + return consumeQueue.getAppendOffset() / MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE; + } + + @Override + public long getConsumeQueueCommitOffset() { + return consumeQueue.getCommitOffset() / MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE; + } + + @Override + public CompletableFuture commitAsync() { + return this.commitLog.commitAsync() + .thenCompose(result -> { + if (result) { + return consumeQueue.commitAsync(); + } + return CompletableFuture.completedFuture(false); + }); + } + + @Override + public CompletableFuture getMessageAsync(long queueOffset) { + return getConsumeQueueAsync(queueOffset).thenCompose(cqBuffer -> { + long commitLogOffset = MessageFormatUtil.getCommitLogOffsetFromItem(cqBuffer); + int length = MessageFormatUtil.getSizeFromItem(cqBuffer); + return getCommitLogAsync(commitLogOffset, length); + }); + } + + @Override + public CompletableFuture getCommitLogAsync(long offset, int length) { + return commitLog.readAsync(offset, length); + } + + @Override + public CompletableFuture getConsumeQueueAsync(long queueOffset) { + return this.getConsumeQueueAsync(queueOffset, 1); + } + + @Override + public CompletableFuture getConsumeQueueAsync(long queueOffset, int count) { + return consumeQueue.readAsync( + queueOffset * MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE, + count * MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE); + } + + @Override + public CompletableFuture getQueueOffsetByTimeAsync(long timestamp, BoundaryType boundaryType) { + long cqMin = getConsumeQueueMinOffset(); + long cqMax = getConsumeQueueCommitOffset() - 1; + if (cqMax == -1 || cqMax < cqMin) { + return CompletableFuture.completedFuture(cqMin); + } + + long minOffset = cqMin; + long maxOffset = cqMax; + List queryLog = new ArrayList<>(); + while (minOffset < maxOffset) { + long middle = minOffset + (maxOffset - minOffset) / 2; + ByteBuffer buffer = this.getMessageAsync(middle).join(); + long storeTime = MessageFormatUtil.getStoreTimeStamp(buffer); + queryLog.add(String.format( + "(range=%d-%d, middle=%d, timestamp=%d)", minOffset, maxOffset, middle, storeTime)); + if (storeTime == timestamp) { + minOffset = middle; + break; + } else if (storeTime < timestamp) { + minOffset = middle + 1; + } else { + maxOffset = middle - 1; + } + } + + long offset = minOffset; + while (true) { + long next = boundaryType == BoundaryType.LOWER ? offset - 1 : offset + 1; + if (next < cqMin || next > cqMax) { + break; + } + ByteBuffer buffer = this.getMessageAsync(next).join(); + long storeTime = MessageFormatUtil.getStoreTimeStamp(buffer); + if (storeTime == timestamp) { + offset = next; + continue; + } + break; + } + + log.info("FlatMessageFile getQueueOffsetByTimeAsync, filePath={}, timestamp={}, result={}, log={}", + filePath, timestamp, offset, JSON.toJSONString(queryLog)); + return CompletableFuture.completedFuture(offset); + } + + @Override + public int hashCode() { + return filePath.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + return StringUtils.equals(filePath, ((FlatMessageFile) obj).filePath); + } + + @Override + public void shutdown() { + closed = true; + fileLock.lock(); + try { + commitLog.shutdown(); + consumeQueue.shutdown(); + } finally { + fileLock.unlock(); + } + } + + @Override + public void destroyExpiredFile(long timestamp) { + fileLock.lock(); + try { + commitLog.destroyExpiredFile(timestamp); + consumeQueue.destroyExpiredFile(timestamp); + } finally { + fileLock.unlock(); + } + } + + public void destroy() { + this.shutdown(); + fileLock.lock(); + try { + commitLog.destroyExpiredFile(Long.MAX_VALUE); + consumeQueue.destroyExpiredFile(Long.MAX_VALUE); + if (queueMetadata != null) { + metadataStore.deleteQueue(queueMetadata.getQueue()); + } + } finally { + fileLock.unlock(); + } + } +} diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/TieredCommitLog.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/TieredCommitLog.java deleted file mode 100644 index 0e5f79132f0..00000000000 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/TieredCommitLog.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.rocketmq.tieredstore.file; - -import com.google.common.annotations.VisibleForTesting; -import java.nio.ByteBuffer; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; -import org.apache.rocketmq.logging.org.slf4j.Logger; -import org.apache.rocketmq.logging.org.slf4j.LoggerFactory; -import org.apache.rocketmq.tieredstore.common.AppendResult; -import org.apache.rocketmq.tieredstore.common.TieredMessageStoreConfig; -import org.apache.rocketmq.tieredstore.provider.TieredFileSegment; -import org.apache.rocketmq.tieredstore.util.MessageBufferUtil; -import org.apache.rocketmq.tieredstore.util.TieredStoreUtil; - -public class TieredCommitLog { - - private static final Logger log = LoggerFactory.getLogger(TieredStoreUtil.TIERED_STORE_LOGGER_NAME); - private static final Long NOT_EXIST_MIN_OFFSET = -1L; - - /** - * item size: int, 4 bytes - * magic code: int, 4 bytes - * max store timestamp: long, 8 bytes - */ - public static final int CODA_SIZE = 4 + 8 + 4; - public static final int BLANK_MAGIC_CODE = 0xBBCCDDEE ^ 1880681586 + 8; - - private final TieredMessageStoreConfig storeConfig; - private final TieredFlatFile flatFile; - private final AtomicLong minConsumeQueueOffset; - - public TieredCommitLog(TieredFileAllocator fileQueueFactory, String filePath) { - this.storeConfig = fileQueueFactory.getStoreConfig(); - this.flatFile = fileQueueFactory.createFlatFileForCommitLog(filePath); - this.minConsumeQueueOffset = new AtomicLong(NOT_EXIST_MIN_OFFSET); - this.correctMinOffsetAsync(); - } - - @VisibleForTesting - public TieredFlatFile getFlatFile() { - return flatFile; - } - - public long getMinOffset() { - return flatFile.getMinOffset(); - } - - public long getCommitOffset() { - return flatFile.getCommitOffset(); - } - - public long getMinConsumeQueueOffset() { - return minConsumeQueueOffset.get() != NOT_EXIST_MIN_OFFSET ? minConsumeQueueOffset.get() : correctMinOffset(); - } - - public long getDispatchCommitOffset() { - return flatFile.getDispatchCommitOffset(); - } - - public long getMaxOffset() { - return flatFile.getMaxOffset(); - } - - public long getBeginTimestamp() { - TieredFileSegment firstIndexFile = flatFile.getFileByIndex(0); - if (firstIndexFile == null) { - return -1L; - } - long beginTimestamp = firstIndexFile.getMinTimestamp(); - return beginTimestamp != Long.MAX_VALUE ? beginTimestamp : -1; - } - - public long getEndTimestamp() { - return flatFile.getFileToWrite().getMaxTimestamp(); - } - - public long correctMinOffset() { - try { - return correctMinOffsetAsync().get(); - } catch (Exception e) { - log.error("Correct min offset failed in clean expired file", e); - } - return NOT_EXIST_MIN_OFFSET; - } - - public synchronized CompletableFuture correctMinOffsetAsync() { - if (flatFile.getFileSegmentCount() == 0) { - this.minConsumeQueueOffset.set(NOT_EXIST_MIN_OFFSET); - return CompletableFuture.completedFuture(NOT_EXIST_MIN_OFFSET); - } - - // queue offset field length is 8 - int length = MessageBufferUtil.QUEUE_OFFSET_POSITION + 8; - if (flatFile.getCommitOffset() - flatFile.getMinOffset() < length) { - this.minConsumeQueueOffset.set(NOT_EXIST_MIN_OFFSET); - return CompletableFuture.completedFuture(NOT_EXIST_MIN_OFFSET); - } - - try { - return this.flatFile.readAsync(this.flatFile.getMinOffset(), length) - .thenApply(buffer -> { - long offset = MessageBufferUtil.getQueueOffset(buffer); - minConsumeQueueOffset.set(offset); - log.debug("Correct commitlog min cq offset success, " + - "filePath={}, min cq offset={}, commitlog range={}-{}", - flatFile.getFilePath(), offset, flatFile.getMinOffset(), flatFile.getCommitOffset()); - return offset; - }) - .exceptionally(throwable -> { - log.warn("Correct commitlog min cq offset error, filePath={}, range={}-{}", - flatFile.getFilePath(), flatFile.getMinOffset(), flatFile.getCommitOffset(), throwable); - return minConsumeQueueOffset.get(); - }); - } catch (Exception e) { - log.error("Correct commitlog min cq offset error, filePath={}", flatFile.getFilePath(), e); - } - return CompletableFuture.completedFuture(minConsumeQueueOffset.get()); - } - - public AppendResult append(ByteBuffer byteBuf) { - return flatFile.append(byteBuf, MessageBufferUtil.getStoreTimeStamp(byteBuf)); - } - - public AppendResult append(ByteBuffer byteBuf, boolean commit) { - return flatFile.append(byteBuf, MessageBufferUtil.getStoreTimeStamp(byteBuf), commit); - } - - public CompletableFuture readAsync(long offset, int length) { - return flatFile.readAsync(offset, length); - } - - public void commit(boolean sync) { - flatFile.commit(sync); - } - - public void cleanExpiredFile(long expireTimestamp) { - if (flatFile.cleanExpiredFile(expireTimestamp) > 0) { - correctMinOffset(); - } - } - - public void destroyExpiredFile() { - flatFile.destroyExpiredFile(); - if (flatFile.getFileSegmentCount() == 0) { - return; - } - TieredFileSegment fileSegment = flatFile.getFileToWrite(); - try { - if (System.currentTimeMillis() - fileSegment.getMaxTimestamp() > - TimeUnit.HOURS.toMillis(storeConfig.getCommitLogRollingInterval()) - && fileSegment.getAppendPosition() > storeConfig.getCommitLogRollingMinimumSize()) { - flatFile.rollingNewFile(); - } - } catch (Exception e) { - log.error("Rolling to next file failed", e); - } - } - - public void destroy() { - flatFile.destroy(); - } -} diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/TieredConsumeQueue.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/TieredConsumeQueue.java deleted file mode 100644 index 6953db032d6..00000000000 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/TieredConsumeQueue.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.rocketmq.tieredstore.file; - -import com.google.common.annotations.VisibleForTesting; -import java.nio.ByteBuffer; -import java.util.concurrent.CompletableFuture; -import org.apache.commons.lang3.tuple.Pair; -import org.apache.rocketmq.common.BoundaryType; -import org.apache.rocketmq.tieredstore.common.AppendResult; -import org.apache.rocketmq.tieredstore.provider.TieredFileSegment; - -public class TieredConsumeQueue { - - /** - * commit log offset: long, 8 bytes - * message size: int, 4 bytes - * tag hash code: long, 8 bytes - */ - public static final int CONSUME_QUEUE_STORE_UNIT_SIZE = 8 + 4 + 8; - - private final TieredFlatFile flatFile; - - public TieredConsumeQueue(TieredFileAllocator fileQueueFactory, String filePath) { - this.flatFile = fileQueueFactory.createFlatFileForConsumeQueue(filePath); - } - - public boolean isInitialized() { - return flatFile.getBaseOffset() != -1; - } - - @VisibleForTesting - public TieredFlatFile getFlatFile() { - return flatFile; - } - - public long getBaseOffset() { - return flatFile.getBaseOffset(); - } - - public void setBaseOffset(long baseOffset) { - flatFile.setBaseOffset(baseOffset); - } - - public long getMinOffset() { - return flatFile.getMinOffset(); - } - - public long getCommitOffset() { - return flatFile.getCommitOffset(); - } - - public long getMaxOffset() { - return flatFile.getMaxOffset(); - } - - public long getEndTimestamp() { - return flatFile.getFileToWrite().getMaxTimestamp(); - } - - public AppendResult append(final long offset, final int size, final long tagsCode, long timeStamp) { - return append(offset, size, tagsCode, timeStamp, false); - } - - public AppendResult append(final long offset, final int size, final long tagsCode, long timeStamp, boolean commit) { - ByteBuffer cqItem = ByteBuffer.allocate(CONSUME_QUEUE_STORE_UNIT_SIZE); - cqItem.putLong(offset); - cqItem.putInt(size); - cqItem.putLong(tagsCode); - cqItem.flip(); - return flatFile.append(cqItem, timeStamp, commit); - } - - public CompletableFuture readAsync(long offset, int length) { - return flatFile.readAsync(offset, length); - } - - public void commit(boolean sync) { - flatFile.commit(sync); - } - - public void cleanExpiredFile(long expireTimestamp) { - flatFile.cleanExpiredFile(expireTimestamp); - } - - public void destroyExpiredFile() { - flatFile.destroyExpiredFile(); - } - - protected Pair getQueueOffsetInFileByTime(long timestamp, BoundaryType boundaryType) { - TieredFileSegment fileSegment = flatFile.getFileByTime(timestamp, boundaryType); - if (fileSegment == null) { - return Pair.of(-1L, -1L); - } - return Pair.of(fileSegment.getBaseOffset() / TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE, - fileSegment.getCommitOffset() / TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE - 1); - } - - public void destroy() { - flatFile.destroy(); - } -} diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/TieredFileAllocator.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/TieredFileAllocator.java deleted file mode 100644 index 51a88e57256..00000000000 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/TieredFileAllocator.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.rocketmq.tieredstore.file; - -import org.apache.rocketmq.tieredstore.common.FileSegmentType; -import org.apache.rocketmq.tieredstore.common.TieredMessageStoreConfig; -import org.apache.rocketmq.tieredstore.provider.FileSegmentAllocator; - -public class TieredFileAllocator { - - private final FileSegmentAllocator fileSegmentAllocator; - private final TieredMessageStoreConfig storeConfig; - - public TieredFileAllocator(TieredMessageStoreConfig storeConfig) - throws ClassNotFoundException, NoSuchMethodException { - - this.storeConfig = storeConfig; - this.fileSegmentAllocator = new FileSegmentAllocator(storeConfig); - } - - public TieredMessageStoreConfig getStoreConfig() { - return storeConfig; - } - - public TieredFlatFile createFlatFileForCommitLog(String filePath) { - TieredFlatFile tieredFlatFile = - new TieredFlatFile(fileSegmentAllocator, FileSegmentType.COMMIT_LOG, filePath); - if (tieredFlatFile.getBaseOffset() == -1L) { - tieredFlatFile.setBaseOffset(0L); - } - return tieredFlatFile; - } - - public TieredFlatFile createFlatFileForConsumeQueue(String filePath) { - return new TieredFlatFile(fileSegmentAllocator, FileSegmentType.CONSUME_QUEUE, filePath); - } - - public TieredFlatFile createFlatFileForIndexFile(String filePath) { - return new TieredFlatFile(fileSegmentAllocator, FileSegmentType.INDEX, filePath); - } -} diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/TieredFlatFile.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/TieredFlatFile.java deleted file mode 100644 index a41d562d108..00000000000 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/TieredFlatFile.java +++ /dev/null @@ -1,590 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.rocketmq.tieredstore.file; - -import com.google.common.annotations.VisibleForTesting; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Set; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.stream.Collectors; -import javax.annotation.Nullable; -import org.apache.rocketmq.common.BoundaryType; -import org.apache.rocketmq.logging.org.slf4j.Logger; -import org.apache.rocketmq.logging.org.slf4j.LoggerFactory; -import org.apache.rocketmq.tieredstore.common.AppendResult; -import org.apache.rocketmq.tieredstore.common.FileSegmentType; -import org.apache.rocketmq.tieredstore.exception.TieredStoreErrorCode; -import org.apache.rocketmq.tieredstore.exception.TieredStoreException; -import org.apache.rocketmq.tieredstore.metadata.FileSegmentMetadata; -import org.apache.rocketmq.tieredstore.metadata.TieredMetadataStore; -import org.apache.rocketmq.tieredstore.provider.FileSegmentAllocator; -import org.apache.rocketmq.tieredstore.provider.TieredFileSegment; -import org.apache.rocketmq.tieredstore.util.TieredStoreUtil; - -public class TieredFlatFile { - - private static final Logger logger = LoggerFactory.getLogger(TieredStoreUtil.TIERED_STORE_LOGGER_NAME); - - private final String filePath; - private final FileSegmentType fileType; - private final TieredMetadataStore tieredMetadataStore; - - private volatile long baseOffset = -1L; - private final FileSegmentAllocator fileSegmentAllocator; - private final List fileSegmentList; - private final List needCommitFileSegmentList; - private final ReentrantReadWriteLock fileSegmentLock; - - public TieredFlatFile(FileSegmentAllocator fileSegmentAllocator, - FileSegmentType fileType, String filePath) { - - this.fileType = fileType; - this.filePath = filePath; - this.fileSegmentList = new LinkedList<>(); - this.fileSegmentLock = new ReentrantReadWriteLock(); - this.fileSegmentAllocator = fileSegmentAllocator; - this.needCommitFileSegmentList = new CopyOnWriteArrayList<>(); - this.tieredMetadataStore = TieredStoreUtil.getMetadataStore(fileSegmentAllocator.getStoreConfig()); - this.recoverMetadata(); - - if (fileType != FileSegmentType.INDEX) { - checkAndFixFileSize(); - } - } - - public long getBaseOffset() { - return baseOffset; - } - - public void setBaseOffset(long baseOffset) { - if (fileSegmentList.size() > 0) { - throw new IllegalStateException("Can not set base offset after file segment has been created"); - } - this.baseOffset = baseOffset; - } - - public long getMinOffset() { - fileSegmentLock.readLock().lock(); - try { - if (fileSegmentList.isEmpty()) { - return baseOffset; - } - return fileSegmentList.get(0).getBaseOffset(); - } finally { - fileSegmentLock.readLock().unlock(); - } - } - - public long getCommitOffset() { - fileSegmentLock.readLock().lock(); - try { - if (fileSegmentList.isEmpty()) { - return baseOffset; - } - return fileSegmentList.get(fileSegmentList.size() - 1).getCommitOffset(); - } finally { - fileSegmentLock.readLock().unlock(); - } - } - - public long getMaxOffset() { - fileSegmentLock.readLock().lock(); - try { - if (fileSegmentList.isEmpty()) { - return baseOffset; - } - return fileSegmentList.get(fileSegmentList.size() - 1).getMaxOffset(); - } finally { - fileSegmentLock.readLock().unlock(); - } - } - - public long getDispatchCommitOffset() { - fileSegmentLock.readLock().lock(); - try { - if (fileSegmentList.isEmpty()) { - return 0; - } - return fileSegmentList.get(fileSegmentList.size() - 1).getDispatchCommitOffset(); - } finally { - fileSegmentLock.readLock().unlock(); - } - } - - public String getFilePath() { - return filePath; - } - - public FileSegmentType getFileType() { - return fileType; - } - - public List getFileSegmentList() { - return fileSegmentList; - } - - protected void recoverMetadata() { - fileSegmentList.clear(); - needCommitFileSegmentList.clear(); - - tieredMetadataStore.iterateFileSegment(filePath, fileType, metadata -> { - if (metadata.getStatus() == FileSegmentMetadata.STATUS_DELETED) { - return; - } - - TieredFileSegment segment = this.newSegment(fileType, metadata.getBaseOffset(), false); - segment.initPosition(metadata.getSize()); - segment.setMinTimestamp(metadata.getBeginTimestamp()); - segment.setMaxTimestamp(metadata.getEndTimestamp()); - if (metadata.getStatus() == FileSegmentMetadata.STATUS_SEALED) { - segment.setFull(false); - } - - // TODO check coda/size - fileSegmentList.add(segment); - }); - - if (!fileSegmentList.isEmpty()) { - fileSegmentList.sort(Comparator.comparingLong(TieredFileSegment::getBaseOffset)); - baseOffset = fileSegmentList.get(0).getBaseOffset(); - needCommitFileSegmentList.addAll( - fileSegmentList.stream().filter(segment -> !segment.isFull()).collect(Collectors.toList())); - } - } - - /** - * FileQueue Status: Sealed | Sealed | Sealed | Not sealed, Allow appended && Not Full - */ - public void updateFileSegment(TieredFileSegment fileSegment) { - - FileSegmentMetadata metadata = tieredMetadataStore.getFileSegment( - this.filePath, fileSegment.getFileType(), fileSegment.getBaseOffset()); - - // Note: file segment path may not the same as file base path, use base path here. - if (metadata == null) { - metadata = new FileSegmentMetadata( - this.filePath, fileSegment.getBaseOffset(), fileSegment.getFileType().getType()); - metadata.setCreateTimestamp(System.currentTimeMillis()); - } - - metadata.setSize(fileSegment.getCommitPosition()); - metadata.setBeginTimestamp(fileSegment.getMinTimestamp()); - metadata.setEndTimestamp(fileSegment.getMaxTimestamp()); - - if (fileSegment.isFull() && !fileSegment.needCommit()) { - if (metadata.getStatus() == FileSegmentMetadata.STATUS_NEW) { - metadata.markSealed(); - } - } - - if (fileSegment.isClosed()) { - metadata.setStatus(FileSegmentMetadata.STATUS_DELETED); - } - - this.tieredMetadataStore.updateFileSegment(metadata); - } - - private void checkAndFixFileSize() { - for (int i = 1; i < fileSegmentList.size(); i++) { - TieredFileSegment pre = fileSegmentList.get(i - 1); - TieredFileSegment cur = fileSegmentList.get(i); - if (pre.getCommitOffset() != cur.getBaseOffset()) { - logger.warn("TieredFlatFile#checkAndFixFileSize: file segment has incorrect size: " + - "filePath:{}, file type: {}, base offset: {}", filePath, fileType, pre.getBaseOffset()); - try { - long actualSize = pre.getSize(); - if (pre.getBaseOffset() + actualSize != cur.getBaseOffset()) { - logger.error("[Bug]TieredFlatFile#checkAndFixFileSize: " + - "file segment has incorrect size and can not fix: " + - "filePath:{}, file type: {}, base offset: {}, actual size: {}, next file offset: {}", - filePath, fileType, pre.getBaseOffset(), actualSize, cur.getBaseOffset()); - continue; - } - pre.initPosition(actualSize); - this.updateFileSegment(pre); - } catch (Exception e) { - logger.error("TieredFlatFile#checkAndFixFileSize: " + - "fix file segment size failed: filePath: {}, file type: {}, base offset: {}", - filePath, fileType, pre.getBaseOffset()); - } - } - } - - if (!fileSegmentList.isEmpty()) { - TieredFileSegment lastFile = fileSegmentList.get(fileSegmentList.size() - 1); - long lastFileSize = lastFile.getSize(); - if (lastFile.getCommitPosition() != lastFileSize) { - logger.warn("TieredFlatFile#checkAndFixFileSize: fix last file {} size: origin: {}, actual: {}", - lastFile.getPath(), lastFile.getCommitOffset() - lastFile.getBaseOffset(), lastFileSize); - lastFile.initPosition(lastFileSize); - this.updateFileSegment(lastFile); - } - } - } - - private TieredFileSegment newSegment(FileSegmentType fileType, long baseOffset, boolean createMetadata) { - TieredFileSegment segment = null; - try { - segment = fileSegmentAllocator.createSegment(fileType, filePath, baseOffset); - if (fileType != FileSegmentType.INDEX) { - segment.createFile(); - } - if (createMetadata) { - this.updateFileSegment(segment); - } - } catch (Exception e) { - logger.error("create file segment failed: filePath:{}, file type: {}, base offset: {}", - filePath, fileType, baseOffset, e); - } - return segment; - } - - public void rollingNewFile() { - TieredFileSegment segment = getFileToWrite(); - segment.setFull(); - // create new segment - getFileToWrite(); - } - - public int getFileSegmentCount() { - return fileSegmentList.size(); - } - - @Nullable - public TieredFileSegment getFileByIndex(int index) { - fileSegmentLock.readLock().lock(); - try { - if (index < fileSegmentList.size()) { - return fileSegmentList.get(index); - } - return null; - } finally { - fileSegmentLock.readLock().unlock(); - } - } - - protected TieredFileSegment getFileToWrite() { - if (baseOffset == -1) { - throw new IllegalStateException("need to set base offset before create file segment"); - } - fileSegmentLock.readLock().lock(); - try { - if (!fileSegmentList.isEmpty()) { - TieredFileSegment fileSegment = fileSegmentList.get(fileSegmentList.size() - 1); - if (!fileSegment.isFull()) { - return fileSegment; - } - } - } finally { - fileSegmentLock.readLock().unlock(); - } - // Create new file segment - fileSegmentLock.writeLock().lock(); - try { - long offset = baseOffset; - if (!fileSegmentList.isEmpty()) { - TieredFileSegment segment = fileSegmentList.get(fileSegmentList.size() - 1); - if (!segment.isFull()) { - return segment; - } - if (segment.commit()) { - try { - this.updateFileSegment(segment); - } catch (Exception e) { - return segment; - } - } else { - return segment; - } - - offset = segment.getMaxOffset(); - } - TieredFileSegment fileSegment = this.newSegment(fileType, offset, true); - fileSegmentList.add(fileSegment); - needCommitFileSegmentList.add(fileSegment); - Collections.sort(fileSegmentList); - logger.debug("Create a new file segment: baseOffset: {}, file: {}, file type: {}", - offset, fileSegment.getPath(), fileType); - return fileSegment; - } finally { - fileSegmentLock.writeLock().unlock(); - } - } - - @Nullable - protected TieredFileSegment getFileByTime(long timestamp, BoundaryType boundaryType) { - fileSegmentLock.readLock().lock(); - try { - List segmentList = fileSegmentList.stream() - .sorted(boundaryType == BoundaryType.UPPER ? Comparator.comparingLong(TieredFileSegment::getMaxTimestamp) : Comparator.comparingLong(TieredFileSegment::getMinTimestamp)) - .filter(segment -> boundaryType == BoundaryType.UPPER ? segment.getMaxTimestamp() >= timestamp : segment.getMinTimestamp() <= timestamp) - .collect(Collectors.toList()); - if (!segmentList.isEmpty()) { - return boundaryType == BoundaryType.UPPER ? segmentList.get(0) : segmentList.get(segmentList.size() - 1); - } - if (fileSegmentList.isEmpty()) { - return null; - } - return boundaryType == BoundaryType.UPPER ? fileSegmentList.get(fileSegmentList.size() - 1) : fileSegmentList.get(0); - } finally { - fileSegmentLock.readLock().unlock(); - } - } - - public List getFileListByTime(long beginTime, long endTime) { - fileSegmentLock.readLock().lock(); - try { - return fileSegmentList.stream() - .filter(segment -> Math.max(beginTime, segment.getMinTimestamp()) <= Math.min(endTime, segment.getMaxTimestamp())) - .collect(Collectors.toList()); - } finally { - fileSegmentLock.readLock().unlock(); - } - } - - protected int getSegmentIndexByOffset(long offset) { - fileSegmentLock.readLock().lock(); - try { - if (fileSegmentList.size() == 0) { - return -1; - } - - int left = 0; - int right = fileSegmentList.size() - 1; - int mid = (left + right) / 2; - - long firstSegmentOffset = fileSegmentList.get(left).getBaseOffset(); - long lastSegmentOffset = fileSegmentList.get(right).getCommitOffset(); - long midSegmentOffset = fileSegmentList.get(mid).getBaseOffset(); - - if (offset < firstSegmentOffset || offset > lastSegmentOffset) { - return -1; - } - - while (left < right - 1) { - if (offset == midSegmentOffset) { - return mid; - } - if (offset < midSegmentOffset) { - right = mid; - } else { - left = mid; - } - mid = (left + right) / 2; - midSegmentOffset = fileSegmentList.get(mid).getBaseOffset(); - } - return offset < fileSegmentList.get(right).getBaseOffset() ? mid : right; - } finally { - fileSegmentLock.readLock().unlock(); - } - } - - public AppendResult append(ByteBuffer byteBuf) { - return append(byteBuf, Long.MAX_VALUE, false); - } - - public AppendResult append(ByteBuffer byteBuf, long timeStamp) { - return append(byteBuf, timeStamp, false); - } - - public AppendResult append(ByteBuffer byteBuf, long timeStamp, boolean commit) { - TieredFileSegment fileSegment = getFileToWrite(); - AppendResult result = fileSegment.append(byteBuf, timeStamp); - if (commit && result == AppendResult.BUFFER_FULL && fileSegment.commit()) { - result = fileSegment.append(byteBuf, timeStamp); - } - if (result == AppendResult.FILE_FULL) { - // write to new file - return getFileToWrite().append(byteBuf, timeStamp); - } - return result; - } - - public int cleanExpiredFile(long expireTimestamp) { - Set needToDeleteSet = new HashSet<>(); - try { - tieredMetadataStore.iterateFileSegment(filePath, fileType, metadata -> { - if (metadata.getEndTimestamp() < expireTimestamp) { - needToDeleteSet.add(metadata.getBaseOffset()); - } - }); - } catch (Exception e) { - logger.error("Clean expired file, filePath: {}, file type: {}, expire timestamp: {}", - filePath, fileType, expireTimestamp); - } - - if (needToDeleteSet.isEmpty()) { - return 0; - } - - fileSegmentLock.writeLock().lock(); - try { - for (int i = 0; i < fileSegmentList.size(); i++) { - TieredFileSegment fileSegment = fileSegmentList.get(i); - try { - if (needToDeleteSet.contains(fileSegment.getBaseOffset())) { - fileSegment.close(); - fileSegmentList.remove(fileSegment); - needCommitFileSegmentList.remove(fileSegment); - i--; - this.updateFileSegment(fileSegment); - logger.debug("Clean expired file, filePath: {}", fileSegment.getPath()); - } else { - break; - } - } catch (Exception e) { - logger.error("Clean expired file failed: filePath: {}, file type: {}, expire timestamp: {}", - fileSegment.getPath(), fileSegment.getFileType(), expireTimestamp, e); - } - } - if (fileSegmentList.size() > 0) { - baseOffset = fileSegmentList.get(0).getBaseOffset(); - } else if (fileType == FileSegmentType.CONSUME_QUEUE) { - baseOffset = -1; - } else { - baseOffset = 0; - } - } finally { - fileSegmentLock.writeLock().unlock(); - } - return needToDeleteSet.size(); - } - - @VisibleForTesting - protected List getNeedCommitFileSegmentList() { - return needCommitFileSegmentList; - } - - public void destroyExpiredFile() { - try { - tieredMetadataStore.iterateFileSegment(filePath, fileType, metadata -> { - if (metadata.getStatus() == FileSegmentMetadata.STATUS_DELETED) { - try { - TieredFileSegment fileSegment = - this.newSegment(fileType, metadata.getBaseOffset(), false); - fileSegment.destroyFile(); - if (!fileSegment.exists()) { - tieredMetadataStore.deleteFileSegment(filePath, fileType, metadata.getBaseOffset()); - } - } catch (Exception e) { - logger.error("Destroyed expired file failed, file path: {}, file type: {}", - filePath, fileType, e); - } - } - }); - } catch (Exception e) { - logger.error("Destroyed expired file, file path: {}, file type: {}", filePath, fileType); - } - } - - public void commit(boolean sync) { - ArrayList> futureList = new ArrayList<>(); - try { - for (TieredFileSegment segment : needCommitFileSegmentList) { - if (segment.isClosed()) { - continue; - } - futureList.add(segment - .commitAsync() - .thenAccept(success -> { - try { - this.updateFileSegment(segment); - } catch (Exception e) { - // TODO handle update segment metadata failed exception - logger.error("Update file segment metadata failed: " + - "file path: {}, file type: {}, base offset: {}", - filePath, fileType, segment.getBaseOffset(), e); - } - if (segment.isFull() && !segment.needCommit()) { - needCommitFileSegmentList.remove(segment); - } - }) - ); - } - } catch (Exception e) { - logger.error("Commit file segment failed: topic: {}, queue: {}, file type: {}", filePath, fileType, e); - } - if (sync) { - CompletableFuture.allOf(futureList.toArray(new CompletableFuture[0])).join(); - } - } - - public CompletableFuture readAsync(long offset, int length) { - int index = getSegmentIndexByOffset(offset); - if (index == -1) { - String errorMsg = String.format("TieredFlatFile#readAsync: offset is illegal, " + - "file path: %s, file type: %s, start: %d, length: %d, file num: %d", - filePath, fileType, offset, length, fileSegmentList.size()); - logger.error(errorMsg); - throw new TieredStoreException(TieredStoreErrorCode.ILLEGAL_OFFSET, errorMsg); - } - TieredFileSegment fileSegment1; - TieredFileSegment fileSegment2 = null; - fileSegmentLock.readLock().lock(); - try { - fileSegment1 = fileSegmentList.get(index); - if (offset + length > fileSegment1.getCommitOffset()) { - if (fileSegmentList.size() > index + 1) { - fileSegment2 = fileSegmentList.get(index + 1); - } - } - } finally { - fileSegmentLock.readLock().unlock(); - } - if (fileSegment2 == null) { - return fileSegment1.readAsync(offset - fileSegment1.getBaseOffset(), length); - } - int segment1Length = (int) (fileSegment1.getCommitOffset() - offset); - return fileSegment1.readAsync(offset - fileSegment1.getBaseOffset(), segment1Length) - .thenCombine(fileSegment2.readAsync(0, length - segment1Length), (buffer1, buffer2) -> { - ByteBuffer compositeBuffer = ByteBuffer.allocate(buffer1.remaining() + buffer2.remaining()); - compositeBuffer.put(buffer1).put(buffer2); - compositeBuffer.flip(); - return compositeBuffer; - }); - } - - public void destroy() { - fileSegmentLock.writeLock().lock(); - try { - for (TieredFileSegment fileSegment : fileSegmentList) { - fileSegment.close(); - try { - this.updateFileSegment(fileSegment); - } catch (Exception e) { - logger.error("TieredFlatFile#destroy: mark file segment: {} is deleted failed", fileSegment.getPath(), e); - } - fileSegment.destroyFile(); - if (!fileSegment.exists()) { - tieredMetadataStore.deleteFileSegment(filePath, fileType, fileSegment.getBaseOffset()); - } - } - fileSegmentList.clear(); - needCommitFileSegmentList.clear(); - } finally { - fileSegmentLock.writeLock().unlock(); - } - } -} diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/TieredFlatFileManager.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/TieredFlatFileManager.java deleted file mode 100644 index ffe0836f126..00000000000 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/file/TieredFlatFileManager.java +++ /dev/null @@ -1,300 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.rocketmq.tieredstore.file; - -import com.google.common.base.Stopwatch; -import com.google.common.collect.ImmutableList; -import java.util.ArrayList; -import java.util.List; -import java.util.Random; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; -import javax.annotation.Nullable; -import org.apache.rocketmq.common.constant.LoggerName; -import org.apache.rocketmq.common.message.MessageQueue; -import org.apache.rocketmq.logging.org.slf4j.Logger; -import org.apache.rocketmq.logging.org.slf4j.LoggerFactory; -import org.apache.rocketmq.tieredstore.common.TieredMessageStoreConfig; -import org.apache.rocketmq.tieredstore.common.TieredStoreExecutor; -import org.apache.rocketmq.tieredstore.index.IndexService; -import org.apache.rocketmq.tieredstore.index.IndexStoreService; -import org.apache.rocketmq.tieredstore.metadata.TieredMetadataStore; -import org.apache.rocketmq.tieredstore.util.TieredStoreUtil; - -public class TieredFlatFileManager { - - private static final Logger BROKER_LOG = LoggerFactory.getLogger(LoggerName.BROKER_LOGGER_NAME); - private static final Logger logger = LoggerFactory.getLogger(TieredStoreUtil.TIERED_STORE_LOGGER_NAME); - - private static volatile TieredFlatFileManager instance; - private static volatile IndexStoreService indexStoreService; - - private final TieredMetadataStore metadataStore; - private final TieredMessageStoreConfig storeConfig; - private final TieredFileAllocator tieredFileAllocator; - private final ConcurrentMap flatFileConcurrentMap; - - public TieredFlatFileManager(TieredMessageStoreConfig storeConfig) - throws ClassNotFoundException, NoSuchMethodException { - - this.storeConfig = storeConfig; - this.metadataStore = TieredStoreUtil.getMetadataStore(storeConfig); - this.tieredFileAllocator = new TieredFileAllocator(storeConfig); - this.flatFileConcurrentMap = new ConcurrentHashMap<>(); - this.doScheduleTask(); - } - - public static TieredFlatFileManager getInstance(TieredMessageStoreConfig storeConfig) { - if (storeConfig == null || instance != null) { - return instance; - } - synchronized (TieredFlatFileManager.class) { - if (instance == null) { - try { - instance = new TieredFlatFileManager(storeConfig); - } catch (Exception e) { - logger.error("Construct FlatFileManager instance error", e); - } - } - } - return instance; - } - - public static IndexService getTieredIndexService(TieredMessageStoreConfig storeConfig) { - if (storeConfig == null) { - return indexStoreService; - } - - if (indexStoreService == null) { - synchronized (TieredFlatFileManager.class) { - if (indexStoreService == null) { - try { - String filePath = TieredStoreUtil.toPath(new MessageQueue( - TieredStoreUtil.RMQ_SYS_TIERED_STORE_INDEX_TOPIC, storeConfig.getBrokerName(), 0)); - indexStoreService = new IndexStoreService(new TieredFileAllocator(storeConfig), filePath); - indexStoreService.start(); - } catch (Exception e) { - logger.error("Construct FlatFileManager indexFile error", e); - } - } - } - } - return indexStoreService; - } - - public void doCommit() { - Random random = new Random(); - for (CompositeQueueFlatFile flatFile : deepCopyFlatFileToList()) { - int delay = random.nextInt(storeConfig.getMaxCommitJitter()); - TieredStoreExecutor.commitExecutor.schedule(() -> { - try { - flatFile.commitCommitLog(); - } catch (Throwable e) { - MessageQueue mq = flatFile.getMessageQueue(); - logger.error("Commit commitLog periodically failed: topic: {}, queue: {}", - mq.getTopic(), mq.getQueueId(), e); - } - }, delay, TimeUnit.MILLISECONDS); - TieredStoreExecutor.commitExecutor.schedule(() -> { - try { - flatFile.commitConsumeQueue(); - } catch (Throwable e) { - MessageQueue mq = flatFile.getMessageQueue(); - logger.error("Commit consumeQueue periodically failed: topic: {}, queue: {}", - mq.getTopic(), mq.getQueueId(), e); - } - }, delay, TimeUnit.MILLISECONDS); - } - } - - public void doCleanExpiredFile() { - long expiredTimeStamp = System.currentTimeMillis() - - TimeUnit.HOURS.toMillis(storeConfig.getTieredStoreFileReservedTime()); - for (CompositeQueueFlatFile flatFile : deepCopyFlatFileToList()) { - TieredStoreExecutor.cleanExpiredFileExecutor.submit(() -> { - try { - flatFile.getCompositeFlatFileLock().lock(); - flatFile.cleanExpiredFile(expiredTimeStamp); - flatFile.destroyExpiredFile(); - } catch (Throwable t) { - logger.error("Do Clean expired file error, topic={}, queueId={}", - flatFile.getMessageQueue().getTopic(), flatFile.getMessageQueue().getQueueId(), t); - } finally { - flatFile.getCompositeFlatFileLock().unlock(); - } - }); - } - } - - private void doScheduleTask() { - TieredStoreExecutor.commonScheduledExecutor.scheduleWithFixedDelay(() -> { - try { - doCommit(); - } catch (Throwable e) { - logger.error("Commit flat file periodically failed: ", e); - } - }, 60, 60, TimeUnit.SECONDS); - - TieredStoreExecutor.commonScheduledExecutor.scheduleWithFixedDelay(() -> { - try { - doCleanExpiredFile(); - } catch (Throwable e) { - logger.error("Clean expired flat file failed: ", e); - } - }, 30, 30, TimeUnit.SECONDS); - } - - public boolean load() { - Stopwatch stopwatch = Stopwatch.createStarted(); - try { - flatFileConcurrentMap.clear(); - this.recoverSequenceNumber(); - this.recoverTieredFlatFile(); - logger.info("Message store recover end, total cost={}ms", stopwatch.elapsed(TimeUnit.MILLISECONDS)); - } catch (Exception e) { - long costTime = stopwatch.elapsed(TimeUnit.MILLISECONDS); - logger.info("Message store recover error, total cost={}ms", costTime); - BROKER_LOG.error("Message store recover error, total cost={}ms", costTime, e); - return false; - } - return true; - } - - public void recoverSequenceNumber() { - AtomicLong topicSequenceNumber = new AtomicLong(); - metadataStore.iterateTopic(topicMetadata -> { - if (topicMetadata != null && topicMetadata.getTopicId() > 0) { - topicSequenceNumber.set(Math.max(topicSequenceNumber.get(), topicMetadata.getTopicId())); - } - }); - metadataStore.setTopicSequenceNumber(topicSequenceNumber.incrementAndGet()); - } - - public void recoverTieredFlatFile() { - Semaphore semaphore = new Semaphore((int) (TieredStoreExecutor.QUEUE_CAPACITY * 0.75)); - List> futures = new ArrayList<>(); - metadataStore.iterateTopic(topicMetadata -> { - try { - semaphore.acquire(); - CompletableFuture future = CompletableFuture.runAsync(() -> { - try { - Stopwatch subWatch = Stopwatch.createStarted(); - if (topicMetadata.getStatus() != 0) { - return; - } - AtomicLong queueCount = new AtomicLong(); - metadataStore.iterateQueue(topicMetadata.getTopic(), queueMetadata -> { - this.getOrCreateFlatFileIfAbsent(new MessageQueue(topicMetadata.getTopic(), - storeConfig.getBrokerName(), queueMetadata.getQueue().getQueueId())); - queueCount.incrementAndGet(); - }); - - if (queueCount.get() == 0L) { - metadataStore.deleteTopic(topicMetadata.getTopic()); - } else { - logger.info("Recover TopicFlatFile, topic: {}, queueCount: {}, cost: {}ms", - topicMetadata.getTopic(), queueCount.get(), subWatch.elapsed(TimeUnit.MILLISECONDS)); - } - } catch (Exception e) { - logger.error("Recover TopicFlatFile error, topic: {}", topicMetadata.getTopic(), e); - } finally { - semaphore.release(); - } - }, TieredStoreExecutor.commitExecutor); - futures.add(future); - } catch (Exception e) { - throw new RuntimeException(e); - } - }); - CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).join(); - } - - public void cleanup() { - flatFileConcurrentMap.clear(); - cleanStaticReference(); - } - - private static void cleanStaticReference() { - instance = null; - indexStoreService = null; - } - - @Nullable - public CompositeQueueFlatFile getOrCreateFlatFileIfAbsent(MessageQueue messageQueue) { - return flatFileConcurrentMap.computeIfAbsent(messageQueue, mq -> { - try { - logger.debug("Create new TopicFlatFile, topic: {}, queueId: {}", - messageQueue.getTopic(), messageQueue.getQueueId()); - return new CompositeQueueFlatFile(tieredFileAllocator, mq); - } catch (Exception e) { - logger.debug("Create new TopicFlatFile failed, topic: {}, queueId: {}", - messageQueue.getTopic(), messageQueue.getQueueId(), e); - } - return null; - }); - } - - public CompositeQueueFlatFile getFlatFile(MessageQueue messageQueue) { - return flatFileConcurrentMap.get(messageQueue); - } - - public ImmutableList deepCopyFlatFileToList() { - return ImmutableList.copyOf(flatFileConcurrentMap.values()); - } - - public void shutdown() { - if (indexStoreService != null) { - indexStoreService.shutdown(); - } - for (CompositeFlatFile flatFile : deepCopyFlatFileToList()) { - flatFile.shutdown(); - } - } - - public void destroy() { - if (indexStoreService != null) { - indexStoreService.destroy(); - } - ImmutableList flatFileList = deepCopyFlatFileToList(); - cleanup(); - for (CompositeFlatFile flatFile : flatFileList) { - flatFile.destroy(); - } - } - - public void destroyCompositeFile(MessageQueue mq) { - if (mq == null) { - return; - } - - // delete memory reference - CompositeQueueFlatFile flatFile = flatFileConcurrentMap.remove(mq); - if (flatFile != null) { - MessageQueue messageQueue = flatFile.getMessageQueue(); - logger.info("TieredFlatFileManager#destroyCompositeFile: " + - "try to destroy composite flat file: topic: {}, queueId: {}", - messageQueue.getTopic(), messageQueue.getQueueId()); - - // delete queue metadata - flatFile.destroy(); - } - } -} diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/index/IndexFile.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/index/IndexFile.java index d131b9b53ea..63d1193d6a9 100644 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/index/IndexFile.java +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/index/IndexFile.java @@ -29,6 +29,8 @@ enum IndexStatusEnum { long getTimestamp(); + long getEndTimestamp(); + IndexStatusEnum getFileStatus(); ByteBuffer doCompaction(); diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/index/IndexService.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/index/IndexService.java index d4eb854a2e8..70c36c88042 100644 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/index/IndexService.java +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/index/IndexService.java @@ -24,6 +24,8 @@ public interface IndexService { + void start(); + /** * Puts a key into the index. * diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/index/IndexStoreFile.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/index/IndexStoreFile.java index def5c8f2d06..180399332e4 100644 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/index/IndexStoreFile.java +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/index/IndexStoreFile.java @@ -35,15 +35,16 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.stream.Collectors; import org.apache.commons.lang3.StringUtils; -import org.apache.rocketmq.logging.org.slf4j.Logger; -import org.apache.rocketmq.logging.org.slf4j.LoggerFactory; import org.apache.rocketmq.store.logfile.DefaultMappedFile; import org.apache.rocketmq.store.logfile.MappedFile; +import org.apache.rocketmq.tieredstore.MessageStoreConfig; +import org.apache.rocketmq.tieredstore.MessageStoreExecutor; import org.apache.rocketmq.tieredstore.common.AppendResult; -import org.apache.rocketmq.tieredstore.common.TieredMessageStoreConfig; -import org.apache.rocketmq.tieredstore.common.TieredStoreExecutor; -import org.apache.rocketmq.tieredstore.provider.TieredFileSegment; -import org.apache.rocketmq.tieredstore.util.TieredStoreUtil; +import org.apache.rocketmq.tieredstore.provider.FileSegment; +import org.apache.rocketmq.tieredstore.provider.PosixFileSegment; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static org.apache.rocketmq.tieredstore.index.IndexFile.IndexStatusEnum.SEALED; import static org.apache.rocketmq.tieredstore.index.IndexFile.IndexStatusEnum.UNSEALED; @@ -57,7 +58,7 @@ */ public class IndexStoreFile implements IndexFile { - private static final Logger log = LoggerFactory.getLogger(TieredStoreUtil.TIERED_STORE_LOGGER_NAME); + private static final Logger log = LoggerFactory.getLogger(MessageStoreUtil.TIERED_STORE_LOGGER_NAME); /** * header format: @@ -93,9 +94,9 @@ public class IndexStoreFile implements IndexFile { private MappedFile mappedFile; private ByteBuffer byteBuffer; private MappedFile compactMappedFile; - private TieredFileSegment fileSegment; + private FileSegment fileSegment; - public IndexStoreFile(TieredMessageStoreConfig storeConfig, long timestamp) throws IOException { + public IndexStoreFile(MessageStoreConfig storeConfig, long timestamp) throws IOException { this.hashSlotMaxCount = storeConfig.getTieredStoreIndexFileMaxHashSlotNum(); this.indexItemMaxCount = storeConfig.getTieredStoreIndexFileMaxIndexNum(); this.fileStatus = new AtomicReference<>(UNSEALED); @@ -112,7 +113,7 @@ public IndexStoreFile(TieredMessageStoreConfig storeConfig, long timestamp) thro this.flushNewMetadata(byteBuffer, indexItemMaxCount == this.indexItemCount.get() + 1); } - public IndexStoreFile(TieredMessageStoreConfig storeConfig, TieredFileSegment fileSegment) { + public IndexStoreFile(MessageStoreConfig storeConfig, FileSegment fileSegment) { this.fileSegment = fileSegment; this.fileStatus = new AtomicReference<>(UPLOAD); this.fileReadWriteLock = new ReentrantReadWriteLock(); @@ -130,6 +131,7 @@ public long getTimestamp() { return this.beginTimestamp.get(); } + @Override public long getEndTimestamp() { return this.endTimestamp.get(); } @@ -176,6 +178,11 @@ protected int getItemPosition(int itemIndex) { return INDEX_HEADER_SIZE + hashSlotMaxCount * HASH_SLOT_SIZE + itemIndex * IndexItem.INDEX_ITEM_SIZE; } + @Override + public void start() { + + } + @Override public AppendResult putKey( String topic, int topicId, int queueId, Set keySet, long offset, int size, long timestamp) { @@ -301,7 +308,7 @@ protected CompletableFuture> queryAsyncFromUnsealedFile( mappedFile.release(); } return result; - }, TieredStoreExecutor.fetchDataExecutor); + }, MessageStoreExecutor.getInstance().bufferFetchExecutor); } protected CompletableFuture> queryAsyncFromSegmentFile( @@ -455,6 +462,9 @@ public void shutdown() { try { fileReadWriteLock.writeLock().lock(); this.fileStatus.set(IndexStatusEnum.SHUTDOWN); + if (this.fileSegment != null && this.fileSegment instanceof PosixFileSegment) { + ((PosixFileSegment) this.fileSegment).close(); + } if (this.mappedFile != null) { this.mappedFile.shutdown(TimeUnit.SECONDS.toMillis(10)); } @@ -483,7 +493,7 @@ public void destroy() { if (this.compactMappedFile != null) { this.compactMappedFile.destroy(TimeUnit.SECONDS.toMillis(10)); } - log.info("IndexStoreService destroy local file, timestamp: {}, status: {}", this.getTimestamp(), fileStatus.get()); + log.debug("IndexStoreService destroy local file, timestamp: {}, status: {}", this.getTimestamp(), fileStatus.get()); break; case UPLOAD: log.warn("[BUG] IndexStoreService destroy remote file, timestamp: {}", this.getTimestamp()); diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/index/IndexStoreService.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/index/IndexStoreService.java index e99ea0de182..9e53d97b98c 100644 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/index/IndexStoreService.java +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/index/IndexStoreService.java @@ -38,20 +38,20 @@ import org.apache.commons.lang3.StringUtils; import org.apache.rocketmq.common.ServiceThread; import org.apache.rocketmq.common.UtilAll; -import org.apache.rocketmq.logging.org.slf4j.Logger; -import org.apache.rocketmq.logging.org.slf4j.LoggerFactory; import org.apache.rocketmq.store.logfile.DefaultMappedFile; import org.apache.rocketmq.store.logfile.MappedFile; +import org.apache.rocketmq.tieredstore.MessageStoreConfig; import org.apache.rocketmq.tieredstore.common.AppendResult; -import org.apache.rocketmq.tieredstore.common.TieredMessageStoreConfig; -import org.apache.rocketmq.tieredstore.file.TieredFileAllocator; -import org.apache.rocketmq.tieredstore.file.TieredFlatFile; -import org.apache.rocketmq.tieredstore.provider.TieredFileSegment; -import org.apache.rocketmq.tieredstore.util.TieredStoreUtil; +import org.apache.rocketmq.tieredstore.file.FlatAppendFile; +import org.apache.rocketmq.tieredstore.file.FlatFileFactory; +import org.apache.rocketmq.tieredstore.provider.FileSegment; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class IndexStoreService extends ServiceThread implements IndexService { - private static final Logger log = LoggerFactory.getLogger(TieredStoreUtil.TIERED_STORE_LOGGER_NAME); + private static final Logger log = LoggerFactory.getLogger(MessageStoreUtil.TIERED_STORE_LOGGER_NAME); public static final String FILE_DIRECTORY_NAME = "tiered_index_file"; public static final String FILE_COMPACTED_DIRECTORY_NAME = "compacting"; @@ -60,20 +60,20 @@ public class IndexStoreService extends ServiceThread implements IndexService { * File status in table example: * upload, upload, upload, sealed, sealed, unsealed */ - private final TieredMessageStoreConfig storeConfig; + private final MessageStoreConfig storeConfig; private final ConcurrentSkipListMap timeStoreTable; private final ReadWriteLock readWriteLock; private final AtomicLong compactTimestamp; private final String filePath; - private final TieredFileAllocator fileAllocator; + private final FlatFileFactory fileAllocator; private IndexFile currentWriteFile; - private TieredFlatFile flatFile; + private FlatAppendFile flatAppendFile; - public IndexStoreService(TieredFileAllocator fileAllocator, String filePath) { - this.storeConfig = fileAllocator.getStoreConfig(); + public IndexStoreService(FlatFileFactory flatFileFactory, String filePath) { + this.storeConfig = flatFileFactory.getStoreConfig(); this.filePath = filePath; - this.fileAllocator = fileAllocator; + this.fileAllocator = flatFileFactory; this.timeStoreTable = new ConcurrentSkipListMap<>(); this.compactTimestamp = new AtomicLong(0L); this.readWriteLock = new ReentrantReadWriteLock(); @@ -139,22 +139,20 @@ private void recover() { this.setCompactTimestamp(this.timeStoreTable.firstKey() - 1); // recover remote - this.flatFile = fileAllocator.createFlatFileForIndexFile(filePath); - if (this.flatFile.getBaseOffset() == -1) { - this.flatFile.setBaseOffset(0); - } + this.flatAppendFile = fileAllocator.createFlatFileForIndexFile(filePath); - for (TieredFileSegment fileSegment : flatFile.getFileSegmentList()) { + for (FileSegment fileSegment : flatAppendFile.getFileSegmentList()) { IndexFile indexFile = new IndexStoreFile(storeConfig, fileSegment); IndexFile localFile = timeStoreTable.get(indexFile.getTimestamp()); if (localFile != null) { localFile.destroy(); } timeStoreTable.put(indexFile.getTimestamp(), indexFile); - log.info("IndexStoreService recover load remote file, timestamp: {}", indexFile.getTimestamp()); + log.info("IndexStoreService recover load remote file, timestamp: {}, end timestamp: {}", + indexFile.getTimestamp(), indexFile.getEndTimestamp()); } - log.info("IndexStoreService recover finished, entrySize: {}, cost: {}ms, directory: {}", + log.info("IndexStoreService recover finished, total: {}, cost: {}ms, directory: {}", timeStoreTable.size(), stopwatch.elapsed(TimeUnit.MILLISECONDS), dir.getAbsolutePath()); } @@ -201,7 +199,8 @@ public AppendResult putKey( if (AppendResult.SUCCESS.equals(result)) { return AppendResult.SUCCESS; } else if (AppendResult.FILE_FULL.equals(result)) { - this.createNewIndexFile(timestamp); + // use current time to ensure the order of file + this.createNewIndexFile(System.currentTimeMillis()); } } @@ -253,51 +252,67 @@ public CompletableFuture> queryAsync( return future; } - public void doCompactThenUploadFile(IndexFile indexFile) { + public boolean doCompactThenUploadFile(IndexFile indexFile) { if (IndexFile.IndexStatusEnum.UPLOAD.equals(indexFile.getFileStatus())) { log.error("IndexStoreService file status not correct, so skip, timestamp: {}, status: {}", indexFile.getTimestamp(), indexFile.getFileStatus()); indexFile.destroy(); - return; + return true; } Stopwatch stopwatch = Stopwatch.createStarted(); - ByteBuffer byteBuffer = indexFile.doCompaction(); - if (byteBuffer == null) { - log.error("IndexStoreService found compaction buffer is null, timestamp: {}", indexFile.getTimestamp()); - return; + if (flatAppendFile.getCommitOffset() == flatAppendFile.getAppendOffset()) { + ByteBuffer byteBuffer = indexFile.doCompaction(); + if (byteBuffer == null) { + log.error("IndexStoreService found compaction buffer is null, timestamp: {}", indexFile.getTimestamp()); + return false; + } + flatAppendFile.rollingNewFile(Math.max(0L, flatAppendFile.getAppendOffset())); + flatAppendFile.append(byteBuffer, indexFile.getTimestamp()); + flatAppendFile.getFileToWrite().setMinTimestamp(indexFile.getTimestamp()); + flatAppendFile.getFileToWrite().setMaxTimestamp(indexFile.getEndTimestamp()); } - flatFile.append(byteBuffer); - flatFile.commit(true); - - TieredFileSegment fileSegment = flatFile.getFileByIndex(flatFile.getFileSegmentCount() - 1); - if (fileSegment == null || fileSegment.getMinTimestamp() != indexFile.getTimestamp()) { - log.warn("IndexStoreService submit compacted file to server failed, timestamp: {}", indexFile.getTimestamp()); - return; + boolean result = flatAppendFile.commitAsync().join(); + + List fileSegmentList = flatAppendFile.getFileSegmentList(); + FileSegment fileSegment = fileSegmentList.get(fileSegmentList.size() - 1); + if (!result || fileSegment == null || fileSegment.getMinTimestamp() != indexFile.getTimestamp()) { + log.warn("IndexStoreService upload compacted file error, timestamp: {}", indexFile.getTimestamp()); + return false; + } else { + log.info("IndexStoreService upload compacted file success, timestamp: {}", indexFile.getTimestamp()); } + readWriteLock.writeLock().lock(); try { - readWriteLock.writeLock().lock(); IndexFile storeFile = new IndexStoreFile(storeConfig, fileSegment); - timeStoreTable.put(indexFile.getTimestamp(), storeFile); + timeStoreTable.put(storeFile.getTimestamp(), storeFile); indexFile.destroy(); } catch (Exception e) { - log.error("IndexStoreService switch file failed, timestamp: {}, cost: {}ms", + log.error("IndexStoreService rolling file error, timestamp: {}, cost: {}ms", indexFile.getTimestamp(), stopwatch.elapsed(TimeUnit.MILLISECONDS), e); } finally { readWriteLock.writeLock().unlock(); } + return true; } public void destroyExpiredFile(long expireTimestamp) { - flatFile.cleanExpiredFile(expireTimestamp); - flatFile.destroyExpiredFile(); + // delete file in time store table + readWriteLock.writeLock().lock(); + try { + timeStoreTable.entrySet().removeIf(entry -> + entry.getKey() < expireTimestamp && + IndexFile.IndexStatusEnum.UPLOAD.equals(entry.getValue().getFileStatus())); + flatAppendFile.destroyExpiredFile(expireTimestamp); + } finally { + readWriteLock.writeLock().unlock(); + } } public void destroy() { + readWriteLock.writeLock().lock(); try { - readWriteLock.writeLock().lock(); - // delete local store file for (Map.Entry entry : timeStoreTable.entrySet()) { IndexFile indexFile = entry.getValue(); @@ -306,10 +321,9 @@ public void destroy() { } indexFile.destroy(); } - // delete remote - if (flatFile != null) { - flatFile.destroy(); + if (flatAppendFile != null) { + flatAppendFile.destroy(); } } catch (Exception e) { log.error("IndexStoreService destroy all file error", e); @@ -325,48 +339,50 @@ public String getServiceName() { public void setCompactTimestamp(long timestamp) { this.compactTimestamp.set(timestamp); - log.info("IndexStoreService compact timestamp has been set to: {}", timestamp); + log.debug("IndexStoreService set compact timestamp to: {}", timestamp); } protected IndexFile getNextSealedFile() { + Map.Entry entry = + this.timeStoreTable.higherEntry(this.compactTimestamp.get()); + if (entry != null && entry.getKey() < this.timeStoreTable.lastKey()) { + return entry.getValue(); + } + return null; + } + + @Override + public void shutdown() { + super.shutdown(); + readWriteLock.writeLock().lock(); try { - Map.Entry entry = - this.timeStoreTable.higherEntry(this.compactTimestamp.get()); - if (entry != null && entry.getKey() < this.timeStoreTable.lastKey()) { - return entry.getValue(); + for (Map.Entry entry : timeStoreTable.entrySet()) { + entry.getValue().shutdown(); } - } catch (Throwable e) { - log.error("Error occurred in " + getServiceName(), e); + this.timeStoreTable.clear(); + } catch (Exception e) { + log.error("IndexStoreService shutdown error", e); + } finally { + readWriteLock.writeLock().unlock(); } - return null; } @Override public void run() { - log.info(this.getServiceName() + " service started"); while (!this.isStopped()) { long expireTimestamp = System.currentTimeMillis() - TimeUnit.HOURS.toMillis(storeConfig.getTieredStoreFileReservedTime()); this.destroyExpiredFile(expireTimestamp); IndexFile indexFile = this.getNextSealedFile(); - if (indexFile == null) { - this.waitForRunning(TimeUnit.SECONDS.toMillis(10)); - continue; + if (indexFile != null) { + if (this.doCompactThenUploadFile(indexFile)) { + this.setCompactTimestamp(indexFile.getTimestamp()); + continue; + } } - this.doCompactThenUploadFile(indexFile); - this.setCompactTimestamp(indexFile.getTimestamp()); + this.waitForRunning(TimeUnit.SECONDS.toMillis(10)); } log.info(this.getServiceName() + " service shutdown"); } - - @Override - public void shutdown() { - super.shutdown(); - for (Map.Entry entry : timeStoreTable.entrySet()) { - entry.getValue().shutdown(); - } - this.timeStoreTable.clear(); - log.info("IndexStoreService shutdown gracefully"); - } } diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/TieredMetadataManager.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/DefaultMetadataStore.java similarity index 73% rename from tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/TieredMetadataManager.java rename to tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/DefaultMetadataStore.java index f091020241a..630276a97f6 100644 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/TieredMetadataManager.java +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/DefaultMetadataStore.java @@ -29,27 +29,31 @@ import java.util.function.Consumer; import org.apache.rocketmq.common.ConfigManager; import org.apache.rocketmq.common.message.MessageQueue; +import org.apache.rocketmq.remoting.protocol.RemotingSerializable; +import org.apache.rocketmq.tieredstore.MessageStoreConfig; import org.apache.rocketmq.tieredstore.common.FileSegmentType; -import org.apache.rocketmq.tieredstore.common.TieredMessageStoreConfig; +import org.apache.rocketmq.tieredstore.metadata.entity.FileSegmentMetadata; +import org.apache.rocketmq.tieredstore.metadata.entity.QueueMetadata; +import org.apache.rocketmq.tieredstore.metadata.entity.TopicMetadata; -public class TieredMetadataManager extends ConfigManager implements TieredMetadataStore { +public class DefaultMetadataStore extends ConfigManager implements MetadataStore { private static final int DEFAULT_CAPACITY = 1024; private static final String DEFAULT_CONFIG_NAME = "config"; private static final String DEFAULT_FILE_NAME = "tieredStoreMetadata.json"; private final AtomicLong topicSequenceNumber; - private final TieredMessageStoreConfig storeConfig; + private final MessageStoreConfig storeConfig; private final ConcurrentMap topicMetadataTable; private final ConcurrentMap> queueMetadataTable; - // Declare concurrent mapping tables to store file segment metadata for different types of files + // Declare concurrent mapping tables to store file segment metadata // Key: filePath -> Value: private final ConcurrentMap> commitLogFileSegmentTable; private final ConcurrentMap> consumeQueueFileSegmentTable; private final ConcurrentMap> indexFileSegmentTable; - public TieredMetadataManager(TieredMessageStoreConfig storeConfig) { + public DefaultMetadataStore(MessageStoreConfig storeConfig) { this.storeConfig = storeConfig; this.topicSequenceNumber = new AtomicLong(-1L); this.topicMetadataTable = new ConcurrentHashMap<>(DEFAULT_CAPACITY); @@ -89,6 +93,11 @@ public String configFilePath() { return Paths.get(storeConfig.getStorePathRootDir(), DEFAULT_CONFIG_NAME, DEFAULT_FILE_NAME).toString(); } + @Override + public boolean load() { + return super.load(); + } + @Override public void decode(String jsonString) { if (jsonString != null) { @@ -109,11 +118,6 @@ public void decode(String jsonString) { } } - @Override - public void setTopicSequenceNumber(long topicSequenceNumber) { - this.topicSequenceNumber.set(topicSequenceNumber); - } - @Override public TopicMetadata getTopic(String topic) { return topicMetadataTable.get(topic); @@ -274,4 +278,79 @@ public void destroy() { indexFileSegmentTable.clear(); persist(); } + + static class TieredMetadataSerializeWrapper extends RemotingSerializable { + + private AtomicLong topicSerialNumber = new AtomicLong(0L); + + private ConcurrentMap topicMetadataTable; + private ConcurrentMap> queueMetadataTable; + + // Declare concurrent mapping tables to store file segment metadata + // Key: filePath -> Value: + private ConcurrentMap> commitLogFileSegmentTable; + private ConcurrentMap> consumeQueueFileSegmentTable; + private ConcurrentMap> indexFileSegmentTable; + + public TieredMetadataSerializeWrapper() { + this.topicMetadataTable = new ConcurrentHashMap<>(DEFAULT_CAPACITY); + this.queueMetadataTable = new ConcurrentHashMap<>(DEFAULT_CAPACITY); + this.commitLogFileSegmentTable = new ConcurrentHashMap<>(DEFAULT_CAPACITY); + this.consumeQueueFileSegmentTable = new ConcurrentHashMap<>(DEFAULT_CAPACITY); + this.indexFileSegmentTable = new ConcurrentHashMap<>(DEFAULT_CAPACITY); + } + + public AtomicLong getTopicSerialNumber() { + return topicSerialNumber; + } + + public void setTopicSerialNumber(AtomicLong topicSerialNumber) { + this.topicSerialNumber = topicSerialNumber; + } + + public ConcurrentMap getTopicMetadataTable() { + return topicMetadataTable; + } + + public void setTopicMetadataTable( + ConcurrentMap topicMetadataTable) { + this.topicMetadataTable = topicMetadataTable; + } + + public ConcurrentMap> getQueueMetadataTable() { + return queueMetadataTable; + } + + public void setQueueMetadataTable( + ConcurrentMap> queueMetadataTable) { + this.queueMetadataTable = queueMetadataTable; + } + + public ConcurrentMap> getCommitLogFileSegmentTable() { + return commitLogFileSegmentTable; + } + + public void setCommitLogFileSegmentTable( + ConcurrentMap> commitLogFileSegmentTable) { + this.commitLogFileSegmentTable = commitLogFileSegmentTable; + } + + public ConcurrentMap> getConsumeQueueFileSegmentTable() { + return consumeQueueFileSegmentTable; + } + + public void setConsumeQueueFileSegmentTable( + ConcurrentMap> consumeQueueFileSegmentTable) { + this.consumeQueueFileSegmentTable = consumeQueueFileSegmentTable; + } + + public ConcurrentMap> getIndexFileSegmentTable() { + return indexFileSegmentTable; + } + + public void setIndexFileSegmentTable( + ConcurrentMap> indexFileSegmentTable) { + this.indexFileSegmentTable = indexFileSegmentTable; + } + } } diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/TieredMetadataStore.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/MetadataStore.java similarity index 60% rename from tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/TieredMetadataStore.java rename to tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/MetadataStore.java index 9d89e7582e2..0b053127d21 100644 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/TieredMetadataStore.java +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/MetadataStore.java @@ -19,18 +19,14 @@ import java.util.function.Consumer; import org.apache.rocketmq.common.message.MessageQueue; import org.apache.rocketmq.tieredstore.common.FileSegmentType; +import org.apache.rocketmq.tieredstore.metadata.entity.FileSegmentMetadata; +import org.apache.rocketmq.tieredstore.metadata.entity.QueueMetadata; +import org.apache.rocketmq.tieredstore.metadata.entity.TopicMetadata; /** * Provides tiered metadata storage service to store metadata information of Topic, Queue, FileSegment, etc. */ -public interface TieredMetadataStore { - - /** - * Set the sequence number of Topic, the start index from 0. - * - * @param topicSequenceNumber The sequence number of Topic. - */ - void setTopicSequenceNumber(long topicSequenceNumber); +public interface MetadataStore { /** * Get the metadata information of specified Topic. @@ -55,11 +51,6 @@ public interface TieredMetadataStore { void deleteTopic(String topic); - /** - * Queue metadata operation - * - * @see QueueMetadata - */ QueueMetadata getQueue(MessageQueue mq); QueueMetadata addQueue(MessageQueue mq, long baseOffset); @@ -70,58 +61,17 @@ public interface TieredMetadataStore { void deleteQueue(MessageQueue mq); - /** - * Get the metadata information of specified file segment. - * - * @param basePath The file path. - * @param fileType The file type. - * @param baseOffset The start offset of file segment. - * @return The metadata information of specified file segment, or null if it does not exist. - */ FileSegmentMetadata getFileSegment(String basePath, FileSegmentType fileType, long baseOffset); - /** - * Update the metadata information of a file segment. - * - * @param fileSegmentMetadata The metadata information of the file segment. - */ void updateFileSegment(FileSegmentMetadata fileSegmentMetadata); - /** - * Traverse all metadata information of file segment - * and execute the callback function for each metadata information. - * - * @param callback The traversal callback function. - */ void iterateFileSegment(Consumer callback); - /** - * Traverse all the metadata information of the file segments in the specified file path - * and execute the callback function for each metadata information. - * - * @param basePath The file path. - * @param callback The traversal callback function. - */ void iterateFileSegment(String basePath, FileSegmentType fileType, Consumer callback); - /** - * Delete all the metadata information of the file segments. - * - * @param basePath The file path. - */ void deleteFileSegment(String basePath, FileSegmentType fileType); - /** - * Delete the metadata information of a specified file segment. - * - * @param basePath The file path. - * @param fileType The file type. - * @param baseOffset The start offset of the file segment. - */ void deleteFileSegment(String basePath, FileSegmentType fileType, long baseOffset); - /** - * Clean all metadata in disk - */ void destroy(); } diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/TieredMetadataSerializeWrapper.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/TieredMetadataSerializeWrapper.java deleted file mode 100644 index fa01606dbdb..00000000000 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/TieredMetadataSerializeWrapper.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.rocketmq.tieredstore.metadata; - -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.atomic.AtomicLong; -import org.apache.rocketmq.remoting.protocol.RemotingSerializable; - -public class TieredMetadataSerializeWrapper extends RemotingSerializable { - - private AtomicLong topicSerialNumber = new AtomicLong(0L); - - private ConcurrentMap topicMetadataTable; - private ConcurrentMap> queueMetadataTable; - - // Declare concurrent mapping tables to store file segment metadata for different types of files - // Key: filePath -> Value: - private ConcurrentMap> commitLogFileSegmentTable; - private ConcurrentMap> consumeQueueFileSegmentTable; - private ConcurrentMap> indexFileSegmentTable; - - public TieredMetadataSerializeWrapper() { - this.topicMetadataTable = new ConcurrentHashMap<>(1024); - this.queueMetadataTable = new ConcurrentHashMap<>(1024); - this.commitLogFileSegmentTable = new ConcurrentHashMap<>(1024); - this.consumeQueueFileSegmentTable = new ConcurrentHashMap<>(1024); - this.indexFileSegmentTable = new ConcurrentHashMap<>(1024); - } - - public AtomicLong getTopicSerialNumber() { - return topicSerialNumber; - } - - public void setTopicSerialNumber(AtomicLong topicSerialNumber) { - this.topicSerialNumber = topicSerialNumber; - } - - public ConcurrentMap getTopicMetadataTable() { - return topicMetadataTable; - } - - public void setTopicMetadataTable( - ConcurrentMap topicMetadataTable) { - this.topicMetadataTable = topicMetadataTable; - } - - public ConcurrentMap> getQueueMetadataTable() { - return queueMetadataTable; - } - - public void setQueueMetadataTable( - ConcurrentMap> queueMetadataTable) { - this.queueMetadataTable = queueMetadataTable; - } - - public ConcurrentMap> getCommitLogFileSegmentTable() { - return commitLogFileSegmentTable; - } - - public void setCommitLogFileSegmentTable( - ConcurrentMap> commitLogFileSegmentTable) { - this.commitLogFileSegmentTable = commitLogFileSegmentTable; - } - - public ConcurrentMap> getConsumeQueueFileSegmentTable() { - return consumeQueueFileSegmentTable; - } - - public void setConsumeQueueFileSegmentTable( - ConcurrentMap> consumeQueueFileSegmentTable) { - this.consumeQueueFileSegmentTable = consumeQueueFileSegmentTable; - } - - public ConcurrentMap> getIndexFileSegmentTable() { - return indexFileSegmentTable; - } - - public void setIndexFileSegmentTable( - ConcurrentMap> indexFileSegmentTable) { - this.indexFileSegmentTable = indexFileSegmentTable; - } -} diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/FileSegmentMetadata.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/entity/FileSegmentMetadata.java similarity index 90% rename from tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/FileSegmentMetadata.java rename to tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/entity/FileSegmentMetadata.java index 2f0fd71debb..4f988ca2411 100644 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/FileSegmentMetadata.java +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/entity/FileSegmentMetadata.java @@ -14,8 +14,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.rocketmq.tieredstore.metadata; +package org.apache.rocketmq.tieredstore.metadata.entity; +import com.alibaba.fastjson.annotation.JSONField; import java.util.Objects; public class FileSegmentMetadata { @@ -24,20 +25,36 @@ public class FileSegmentMetadata { public static final int STATUS_SEALED = 1; public static final int STATUS_DELETED = 2; - private int type; + @JSONField(ordinal = 1) private String path; + + @JSONField(ordinal = 2) + private int type; + + @JSONField(ordinal = 3) private long baseOffset; + + @JSONField(ordinal = 4) private int status; + + @JSONField(ordinal = 5) private long size; + @JSONField(ordinal = 6) private long createTimestamp; + + @JSONField(ordinal = 7) private long beginTimestamp; + + @JSONField(ordinal = 8) private long endTimestamp; + + @JSONField(ordinal = 9) private long sealTimestamp; // default constructor is used by fastjson + @SuppressWarnings("unused") public FileSegmentMetadata() { - } public FileSegmentMetadata(String path, long baseOffset, int type) { diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/QueueMetadata.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/entity/QueueMetadata.java similarity index 88% rename from tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/QueueMetadata.java rename to tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/entity/QueueMetadata.java index d479330d78f..6720f1d08ac 100644 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/QueueMetadata.java +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/entity/QueueMetadata.java @@ -14,20 +14,28 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.rocketmq.tieredstore.metadata; +package org.apache.rocketmq.tieredstore.metadata.entity; +import com.alibaba.fastjson.annotation.JSONField; import org.apache.rocketmq.common.message.MessageQueue; public class QueueMetadata { + @JSONField(ordinal = 1) private MessageQueue queue; + + @JSONField(ordinal = 2) private long minOffset; + + @JSONField(ordinal = 3) private long maxOffset; + + @JSONField(ordinal = 4) private long updateTimestamp; // default constructor is used by fastjson + @SuppressWarnings("unused") public QueueMetadata() { - } public QueueMetadata(MessageQueue queue, long minOffset, long maxOffset) { diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/TopicMetadata.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/entity/TopicMetadata.java similarity index 88% rename from tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/TopicMetadata.java rename to tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/entity/TopicMetadata.java index 4847dafd064..80e5230e7a3 100644 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/TopicMetadata.java +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metadata/entity/TopicMetadata.java @@ -14,26 +14,30 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.rocketmq.tieredstore.metadata; +package org.apache.rocketmq.tieredstore.metadata.entity; -import com.google.common.annotations.VisibleForTesting; +import com.alibaba.fastjson.annotation.JSONField; public class TopicMetadata { + @JSONField(ordinal = 1) private long topicId; + + @JSONField(ordinal = 2) private String topic; + + @JSONField(ordinal = 3) private int status; + + @JSONField(ordinal = 4) private long reserveTime; + + @JSONField(ordinal = 5) private long updateTimestamp; // default constructor is used by fastjson + @SuppressWarnings("unused") public TopicMetadata() { - - } - - @VisibleForTesting - public TopicMetadata(String topic) { - this.topic = topic; } public TopicMetadata(long topicId, String topic, long reserveTime) { diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metrics/TieredStoreMetricsManager.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metrics/TieredStoreMetricsManager.java index 2b9fc59d821..e76c86d79bf 100644 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metrics/TieredStoreMetricsManager.java +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/metrics/TieredStoreMetricsManager.java @@ -16,7 +16,6 @@ */ package org.apache.rocketmq.tieredstore.metrics; -import com.github.benmanes.caffeine.cache.Policy; import io.opentelemetry.api.common.Attributes; import io.opentelemetry.api.common.AttributesBuilder; import io.opentelemetry.api.metrics.LongCounter; @@ -33,25 +32,23 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Optional; import java.util.function.Supplier; import org.apache.rocketmq.common.Pair; import org.apache.rocketmq.common.message.MessageQueue; import org.apache.rocketmq.common.metrics.NopLongCounter; import org.apache.rocketmq.common.metrics.NopLongHistogram; import org.apache.rocketmq.common.metrics.NopObservableLongGauge; -import org.apache.rocketmq.logging.org.slf4j.Logger; -import org.apache.rocketmq.logging.org.slf4j.LoggerFactory; import org.apache.rocketmq.store.MessageStore; -import org.apache.rocketmq.tieredstore.TieredMessageFetcher; +import org.apache.rocketmq.tieredstore.MessageStoreConfig; import org.apache.rocketmq.tieredstore.common.FileSegmentType; -import org.apache.rocketmq.tieredstore.common.MessageCacheKey; -import org.apache.rocketmq.tieredstore.common.SelectBufferResultWrapper; -import org.apache.rocketmq.tieredstore.common.TieredMessageStoreConfig; -import org.apache.rocketmq.tieredstore.file.CompositeQueueFlatFile; -import org.apache.rocketmq.tieredstore.file.TieredFlatFileManager; -import org.apache.rocketmq.tieredstore.metadata.TieredMetadataStore; -import org.apache.rocketmq.tieredstore.util.TieredStoreUtil; +import org.apache.rocketmq.tieredstore.core.MessageStoreFetcher; +import org.apache.rocketmq.tieredstore.core.MessageStoreFetcherImpl; +import org.apache.rocketmq.tieredstore.file.FlatFileStore; +import org.apache.rocketmq.tieredstore.file.FlatMessageFile; +import org.apache.rocketmq.tieredstore.metadata.MetadataStore; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static org.apache.rocketmq.store.metrics.DefaultStoreMetricsConstant.GAUGE_STORAGE_SIZE; import static org.apache.rocketmq.store.metrics.DefaultStoreMetricsConstant.LABEL_STORAGE_MEDIUM; @@ -77,7 +74,7 @@ public class TieredStoreMetricsManager { - private static final Logger logger = LoggerFactory.getLogger(TieredStoreUtil.TIERED_STORE_LOGGER_NAME); + private static final Logger log = LoggerFactory.getLogger(MessageStoreUtil.TIERED_STORE_LOGGER_NAME); public static Supplier attributesBuilderSupplier; private static String storageMedium = STORAGE_MEDIUM_BLOB; @@ -130,7 +127,7 @@ public static List> getMetricsView() { .build(); ViewBuilder bufferSizeViewBuilder = View.builder() - .setAggregation(Aggregation.explicitBucketHistogram(Arrays.asList(1d * TieredStoreUtil.KB, 10d * TieredStoreUtil.KB, 100d * TieredStoreUtil.KB, 1d * TieredStoreUtil.MB, 10d * TieredStoreUtil.MB, 32d * TieredStoreUtil.MB, 50d * TieredStoreUtil.MB, 100d * TieredStoreUtil.MB))) + .setAggregation(Aggregation.explicitBucketHistogram(Arrays.asList(1d * MessageStoreUtil.KB, 10d * MessageStoreUtil.KB, 100d * MessageStoreUtil.KB, 1d * MessageStoreUtil.MB, 10d * MessageStoreUtil.MB, 32d * MessageStoreUtil.MB, 50d * MessageStoreUtil.MB, 100d * MessageStoreUtil.MB))) .setDescription("tiered_store_buffer_size_view"); res.add(new Pair<>(rpcLatencySelector, rpcLatencyViewBuilder)); @@ -145,7 +142,9 @@ public static void setStorageMedium(String storageMedium) { } public static void init(Meter meter, Supplier attributesBuilderSupplier, - TieredMessageStoreConfig storeConfig, TieredMessageFetcher fetcher, MessageStore next) { + MessageStoreConfig storeConfig, MessageStoreFetcher fetcher, + FlatFileStore flatFileStore, MessageStore next) { + TieredStoreMetricsManager.attributesBuilderSupplier = attributesBuilderSupplier; apiLatency = meter.histogramBuilder(HISTOGRAM_API_LATENCY) @@ -176,8 +175,7 @@ public static void init(Meter meter, Supplier attributesBuild .setDescription("Tiered store dispatch behind message count") .ofLongs() .buildWithCallback(measurement -> { - for (CompositeQueueFlatFile flatFile : - TieredFlatFileManager.getInstance(storeConfig).deepCopyFlatFileToList()) { + for (FlatMessageFile flatFile : flatFileStore.deepCopyFlatFileToList()) { MessageQueue mq = flatFile.getMessageQueue(); long maxOffset = next.getMaxOffsetInQueue(mq.getTopic(), mq.getQueueId()); @@ -191,7 +189,7 @@ public static void init(Meter meter, Supplier attributesBuild .put(LABEL_QUEUE_ID, mq.getQueueId()) .put(LABEL_FILE_TYPE, FileSegmentType.COMMIT_LOG.name().toLowerCase()) .build(); - measurement.record(Math.max(maxOffset - flatFile.getDispatchOffset(), 0), commitLogAttributes); + Attributes consumeQueueAttributes = newAttributesBuilder() .put(LABEL_TOPIC, mq.getTopic()) .put(LABEL_QUEUE_ID, mq.getQueueId()) @@ -206,8 +204,7 @@ public static void init(Meter meter, Supplier attributesBuild .setUnit("seconds") .ofLongs() .buildWithCallback(measurement -> { - for (CompositeQueueFlatFile flatFile : - TieredFlatFileManager.getInstance(storeConfig).deepCopyFlatFileToList()) { + for (FlatMessageFile flatFile : flatFileStore.deepCopyFlatFileToList()) { MessageQueue mq = flatFile.getMessageQueue(); long maxOffset = next.getMaxOffsetInQueue(mq.getTopic(), mq.getQueueId()); @@ -221,12 +218,6 @@ public static void init(Meter meter, Supplier attributesBuild .put(LABEL_QUEUE_ID, mq.getQueueId()) .put(LABEL_FILE_TYPE, FileSegmentType.COMMIT_LOG.name().toLowerCase()) .build(); - long commitLogDispatchLatency = next.getMessageStoreTimeStamp(mq.getTopic(), mq.getQueueId(), flatFile.getDispatchOffset()); - if (maxOffset <= flatFile.getDispatchOffset() || commitLogDispatchLatency < 0) { - measurement.record(0, commitLogAttributes); - } else { - measurement.record(System.currentTimeMillis() - commitLogDispatchLatency, commitLogAttributes); - } Attributes consumeQueueAttributes = newAttributesBuilder() .put(LABEL_TOPIC, mq.getTopic()) @@ -258,15 +249,22 @@ public static void init(Meter meter, Supplier attributesBuild cacheCount = meter.gaugeBuilder(GAUGE_CACHE_COUNT) .setDescription("Tiered store cache message count") .ofLongs() - .buildWithCallback(measurement -> measurement.record(fetcher.getMessageCache().estimatedSize(), newAttributesBuilder().build())); + .buildWithCallback(measurement -> { + if (fetcher instanceof MessageStoreFetcherImpl) { + long count = ((MessageStoreFetcherImpl) fetcher).getFetcherCache().stats().loadCount(); + measurement.record(count, newAttributesBuilder().build()); + } + }); cacheBytes = meter.gaugeBuilder(GAUGE_CACHE_BYTES) .setDescription("Tiered store cache message bytes") .setUnit("bytes") .ofLongs() .buildWithCallback(measurement -> { - Optional> eviction = fetcher.getMessageCache().policy().eviction(); - eviction.ifPresent(resultEviction -> measurement.record(resultEviction.weightedSize().orElse(0), newAttributesBuilder().build())); + if (fetcher instanceof MessageStoreFetcherImpl) { + long count = ((MessageStoreFetcherImpl) fetcher).getFetcherCache().estimatedSize(); + measurement.record(count, newAttributesBuilder().build()); + } }); cacheAccess = meter.counterBuilder(COUNTER_CACHE_ACCESS) @@ -284,7 +282,7 @@ public static void init(Meter meter, Supplier attributesBuild .buildWithCallback(measurement -> { Map> topicFileSizeMap = new HashMap<>(); try { - TieredMetadataStore metadataStore = TieredStoreUtil.getMetadataStore(storeConfig); + MetadataStore metadataStore = flatFileStore.getMetadataStore(); metadataStore.iterateFileSegment(fileSegment -> { Map subMap = topicFileSizeMap.computeIfAbsent(fileSegment.getPath(), k -> new HashMap<>()); @@ -294,7 +292,7 @@ public static void init(Meter meter, Supplier attributesBuild subMap.put(fileSegmentType, size + fileSegment.getSize()); }); } catch (Exception e) { - logger.error("Failed to get storage size", e); + log.error("Failed to get storage size", e); } topicFileSizeMap.forEach((topic, subMap) -> { subMap.forEach((fileSegmentType, size) -> { @@ -312,8 +310,8 @@ public static void init(Meter meter, Supplier attributesBuild .setUnit("milliseconds") .ofLongs() .buildWithCallback(measurement -> { - for (CompositeQueueFlatFile flatFile : TieredFlatFileManager.getInstance(storeConfig).deepCopyFlatFileToList()) { - long timestamp = flatFile.getCommitLogBeginTimestamp(); + for (FlatMessageFile flatFile : flatFileStore.deepCopyFlatFileToList()) { + long timestamp = flatFile.getMinStoreTimestamp(); if (timestamp > 0) { MessageQueue mq = flatFile.getMessageQueue(); Attributes attributes = newAttributesBuilder() diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/FileSegment.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/FileSegment.java new file mode 100644 index 00000000000..f60fc95d23e --- /dev/null +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/FileSegment.java @@ -0,0 +1,346 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.rocketmq.tieredstore.provider; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Semaphore; +import java.util.concurrent.locks.ReentrantLock; +import org.apache.rocketmq.tieredstore.MessageStoreConfig; +import org.apache.rocketmq.tieredstore.common.AppendResult; +import org.apache.rocketmq.tieredstore.common.FileSegmentType; +import org.apache.rocketmq.tieredstore.exception.TieredStoreErrorCode; +import org.apache.rocketmq.tieredstore.exception.TieredStoreException; +import org.apache.rocketmq.tieredstore.stream.FileSegmentInputStream; +import org.apache.rocketmq.tieredstore.stream.FileSegmentInputStreamFactory; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public abstract class FileSegment implements Comparable, FileSegmentProvider { + + private static final Logger log = LoggerFactory.getLogger(MessageStoreUtil.TIERED_STORE_LOGGER_NAME); + + protected static final Long GET_FILE_SIZE_ERROR = -1L; + + protected final long baseOffset; + protected final String filePath; + protected final FileSegmentType fileType; + protected final MessageStoreConfig storeConfig; + + protected final long maxSize; + protected final ReentrantLock fileLock = new ReentrantLock(); + protected final Semaphore commitLock = new Semaphore(1); + + protected volatile boolean closed = false; + protected volatile long minTimestamp = Long.MAX_VALUE; + protected volatile long maxTimestamp = Long.MAX_VALUE; + protected volatile long commitPosition = 0L; + protected volatile long appendPosition = 0L; + + protected volatile List bufferList = new ArrayList<>(); + protected volatile FileSegmentInputStream fileSegmentInputStream; + protected volatile CompletableFuture flightCommitRequest; + + public FileSegment(MessageStoreConfig storeConfig, + FileSegmentType fileType, String filePath, long baseOffset) { + + this.storeConfig = storeConfig; + this.fileType = fileType; + this.filePath = filePath; + this.baseOffset = baseOffset; + this.maxSize = this.getMaxSizeByFileType(); + } + + @Override + public int compareTo(FileSegment o) { + return Long.compare(this.baseOffset, o.baseOffset); + } + + public long getBaseOffset() { + return baseOffset; + } + + public void initPosition(long pos) { + fileLock.lock(); + try { + this.commitPosition = pos; + this.appendPosition = pos; + } finally { + fileLock.unlock(); + } + } + + public long getCommitPosition() { + return commitPosition; + } + + public long getAppendPosition() { + return appendPosition; + } + + public long getCommitOffset() { + return baseOffset + commitPosition; + } + + public long getAppendOffset() { + return baseOffset + appendPosition; + } + + public FileSegmentType getFileType() { + return fileType; + } + + public long getMaxSizeByFileType() { + switch (fileType) { + case COMMIT_LOG: + return storeConfig.getTieredStoreCommitLogMaxSize(); + case CONSUME_QUEUE: + return storeConfig.getTieredStoreConsumeQueueMaxSize(); + case INDEX: + default: + return Long.MAX_VALUE; + } + } + + public long getMaxSize() { + return maxSize; + } + + public long getMinTimestamp() { + return minTimestamp; + } + + public void setMinTimestamp(long minTimestamp) { + this.minTimestamp = minTimestamp; + } + + public long getMaxTimestamp() { + return maxTimestamp; + } + + public void setMaxTimestamp(long maxTimestamp) { + this.maxTimestamp = maxTimestamp; + } + + public boolean isClosed() { + return closed; + } + + public void close() { + fileLock.lock(); + try { + this.closed = true; + } finally { + fileLock.unlock(); + } + } + + protected List borrowBuffer() { + List temp; + fileLock.lock(); + try { + temp = bufferList; + bufferList = new ArrayList<>(); + } finally { + fileLock.unlock(); + } + return temp; + } + + @SuppressWarnings("NonAtomicOperationOnVolatileField") + protected void updateTimestamp(long timestamp) { + fileLock.lock(); + try { + if (maxTimestamp == Long.MAX_VALUE && minTimestamp == Long.MAX_VALUE) { + maxTimestamp = timestamp; + minTimestamp = timestamp; + return; + } + maxTimestamp = Math.max(maxTimestamp, timestamp); + minTimestamp = Math.min(minTimestamp, timestamp); + } finally { + fileLock.unlock(); + } + } + + @SuppressWarnings("NonAtomicOperationOnVolatileField") + public AppendResult append(ByteBuffer buffer, long timestamp) { + fileLock.lock(); + try { + if (closed) { + return AppendResult.FILE_CLOSED; + } + if (appendPosition + buffer.remaining() > maxSize) { + return AppendResult.FILE_FULL; + } + if (bufferList.size() >= storeConfig.getTieredStoreMaxGroupCommitCount()) { + return AppendResult.BUFFER_FULL; + } + this.appendPosition += buffer.remaining(); + this.bufferList.add(buffer); + this.updateTimestamp(timestamp); + } finally { + fileLock.unlock(); + } + return AppendResult.SUCCESS; + } + + public boolean needCommit() { + return appendPosition > commitPosition; + } + + @SuppressWarnings("NonAtomicOperationOnVolatileField") + public CompletableFuture commitAsync() { + if (closed) { + return CompletableFuture.completedFuture(false); + } + + if (!needCommit()) { + return CompletableFuture.completedFuture(true); + } + + // acquire lock + if (commitLock.drainPermits() <= 0) { + return CompletableFuture.completedFuture(false); + } + + // handle last commit error + if (fileSegmentInputStream != null) { + long fileSize = this.getSize(); + if (fileSize == GET_FILE_SIZE_ERROR) { + log.error("FileSegment correct position error, fileName={}, commit={}, append={}, buffer={}", + this.getPath(), commitPosition, appendPosition, fileSegmentInputStream.getContentLength()); + releaseCommitLock(); + return CompletableFuture.completedFuture(false); + } + if (correctPosition(fileSize)) { + fileSegmentInputStream = null; + } + } + + int bufferSize; + if (fileSegmentInputStream != null) { + fileSegmentInputStream.rewind(); + bufferSize = fileSegmentInputStream.available(); + } else { + List bufferList = this.borrowBuffer(); + bufferSize = bufferList.stream().mapToInt(ByteBuffer::remaining).sum(); + if (bufferSize == 0) { + releaseCommitLock(); + return CompletableFuture.completedFuture(true); + } + fileSegmentInputStream = FileSegmentInputStreamFactory.build( + fileType, this.getCommitOffset(), bufferList, null, bufferSize); + } + + boolean append = fileType != FileSegmentType.INDEX; + return flightCommitRequest = + this.commit0(fileSegmentInputStream, commitPosition, bufferSize, append) + .thenApply(result -> { + if (result) { + commitPosition += bufferSize; + fileSegmentInputStream = null; + return true; + } else { + fileSegmentInputStream.rewind(); + return false; + } + }) + .exceptionally(this::handleCommitException) + .whenComplete((result, e) -> releaseCommitLock()); + } + + private boolean handleCommitException(Throwable e) { + + log.warn("FileSegment commit exception, filePath={}", this.filePath, e); + + // Get root cause here + Throwable rootCause = e.getCause() != null ? e.getCause() : e; + + long fileSize = rootCause instanceof TieredStoreException ? + ((TieredStoreException) rootCause).getPosition() : this.getSize(); + + long expectPosition = commitPosition + fileSegmentInputStream.getContentLength(); + if (fileSize == GET_FILE_SIZE_ERROR) { + log.error("Get file size error after commit, FileName: {}, Commit: {}, Content: {}, Expect: {}, Append: {}", + this.getPath(), commitPosition, fileSegmentInputStream.getContentLength(), expectPosition, appendPosition); + return false; + } + + if (correctPosition(fileSize)) { + fileSegmentInputStream = null; + return true; + } else { + fileSegmentInputStream.rewind(); + return false; + } + } + + private void releaseCommitLock() { + if (commitLock.availablePermits() == 0) { + commitLock.release(); + } + } + + /** + * return true to clear buffer + */ + private boolean correctPosition(long fileSize) { + + // Current we have three offsets here: commit offset, expect offset, file size. + // We guarantee that the commit offset is less than or equal to the expect offset. + // Max offset will increase because we can continuously put in new buffers + + // We are believing that the file size returned by the server is correct, + // can reset the commit offset to the file size reported by the storage system. + + long expectPosition = commitPosition + fileSegmentInputStream.getContentLength(); + commitPosition = fileSize; + return expectPosition == fileSize; + } + + public ByteBuffer read(long position, int length) { + return readAsync(position, length).join(); + } + + public CompletableFuture readAsync(long position, int length) { + CompletableFuture future = new CompletableFuture<>(); + if (position < 0 || position >= commitPosition) { + future.completeExceptionally(new TieredStoreException( + TieredStoreErrorCode.ILLEGAL_PARAM, "FileSegment read position is illegal position")); + return future; + } + + if (length <= 0) { + future.completeExceptionally(new TieredStoreException( + TieredStoreErrorCode.ILLEGAL_PARAM, "FileSegment read length illegal")); + return future; + } + + int readableBytes = (int) (commitPosition - position); + if (readableBytes < length) { + length = readableBytes; + log.debug("FileSegment#readAsync, expect request position is greater than commit position, " + + "file: {}, request position: {}, commit position: {}, change length from {} to {}", + getPath(), position, commitPosition, length, readableBytes); + } + return this.read0(position, length); + } +} diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/FileSegmentAllocator.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/FileSegmentAllocator.java deleted file mode 100644 index c4b1e67afe2..00000000000 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/FileSegmentAllocator.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.rocketmq.tieredstore.provider; - -import java.lang.reflect.Constructor; -import org.apache.rocketmq.logging.org.slf4j.Logger; -import org.apache.rocketmq.logging.org.slf4j.LoggerFactory; -import org.apache.rocketmq.tieredstore.common.FileSegmentType; -import org.apache.rocketmq.tieredstore.common.TieredMessageStoreConfig; -import org.apache.rocketmq.tieredstore.metadata.TieredMetadataStore; -import org.apache.rocketmq.tieredstore.util.TieredStoreUtil; - -public class FileSegmentAllocator { - - private static final Logger log = LoggerFactory.getLogger(TieredStoreUtil.TIERED_STORE_LOGGER_NAME); - - private final TieredMessageStoreConfig storeConfig; - - private final Constructor fileSegmentConstructor; - - public FileSegmentAllocator( - TieredMessageStoreConfig storeConfig) throws ClassNotFoundException, NoSuchMethodException { - this.storeConfig = storeConfig; - Class clazz = - Class.forName(storeConfig.getTieredBackendServiceProvider()).asSubclass(TieredFileSegment.class); - fileSegmentConstructor = clazz.getConstructor( - TieredMessageStoreConfig.class, FileSegmentType.class, String.class, Long.TYPE); - } - - public TieredMessageStoreConfig getStoreConfig() { - return storeConfig; - } - - public TieredMetadataStore getMetadataStore() { - return TieredStoreUtil.getMetadataStore(storeConfig); - } - - public TieredFileSegment createSegment( - FileSegmentType fileType, String filePath, long baseOffset) { - - switch (fileType) { - case COMMIT_LOG: - return this.createCommitLogFileSegment(filePath, baseOffset); - case CONSUME_QUEUE: - return this.createConsumeQueueFileSegment(filePath, baseOffset); - case INDEX: - return this.createIndexFileSegment(filePath, baseOffset); - } - return null; - } - - public TieredFileSegment createCommitLogFileSegment(String filePath, long baseOffset) { - TieredFileSegment segment = null; - try { - segment = fileSegmentConstructor.newInstance( - this.storeConfig, FileSegmentType.COMMIT_LOG, filePath, baseOffset); - } catch (Exception e) { - log.error("create file segment of commitlog failed, filePath: {}, baseOffset: {}", - filePath, baseOffset, e); - } - return segment; - } - - public TieredFileSegment createConsumeQueueFileSegment(String filePath, long baseOffset) { - TieredFileSegment segment = null; - try { - segment = fileSegmentConstructor.newInstance( - this.storeConfig, FileSegmentType.CONSUME_QUEUE, filePath, baseOffset); - } catch (Exception e) { - log.error("create file segment of commitlog failed, filePath: {}, baseOffset: {}", - filePath, baseOffset, e); - } - return segment; - } - - public TieredFileSegment createIndexFileSegment(String filePath, long baseOffset) { - TieredFileSegment segment = null; - try { - segment = fileSegmentConstructor.newInstance( - this.storeConfig, FileSegmentType.INDEX, filePath, baseOffset); - } catch (Exception e) { - log.error("create file segment of commitlog failed, filePath: {}, baseOffset: {}", - filePath, baseOffset, e); - } - return segment; - } -} diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/FileSegmentFactory.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/FileSegmentFactory.java new file mode 100644 index 00000000000..5146d46dbc1 --- /dev/null +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/FileSegmentFactory.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.rocketmq.tieredstore.provider; + +import java.lang.reflect.Constructor; +import org.apache.rocketmq.tieredstore.MessageStoreConfig; +import org.apache.rocketmq.tieredstore.common.FileSegmentType; +import org.apache.rocketmq.tieredstore.metadata.MetadataStore; + +public class FileSegmentFactory { + + private final MetadataStore metadataStore; + private final MessageStoreConfig storeConfig; + private final Constructor fileSegmentConstructor; + + public FileSegmentFactory(MetadataStore metadataStore, MessageStoreConfig storeConfig) { + try { + this.storeConfig = storeConfig; + this.metadataStore = metadataStore; + Class clazz = + Class.forName(storeConfig.getTieredBackendServiceProvider()).asSubclass(FileSegment.class); + fileSegmentConstructor = clazz.getConstructor( + MessageStoreConfig.class, FileSegmentType.class, String.class, Long.TYPE); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public MetadataStore getMetadataStore() { + return metadataStore; + } + + public MessageStoreConfig getStoreConfig() { + return storeConfig; + } + + public FileSegment createSegment(FileSegmentType fileType, String filePath, long baseOffset) { + try { + return fileSegmentConstructor.newInstance(this.storeConfig, fileType, filePath, baseOffset); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public FileSegment createCommitLogFileSegment(String filePath, long baseOffset) { + return this.createSegment(FileSegmentType.COMMIT_LOG, filePath, baseOffset); + } + + public FileSegment createConsumeQueueFileSegment(String filePath, long baseOffset) { + return this.createSegment(FileSegmentType.CONSUME_QUEUE, filePath, baseOffset); + } + + public FileSegment createIndexServiceFileSegment(String filePath, long baseOffset) { + return this.createSegment(FileSegmentType.INDEX, filePath, baseOffset); + } +} diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/TieredStoreProvider.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/FileSegmentProvider.java similarity index 95% rename from tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/TieredStoreProvider.java rename to tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/FileSegmentProvider.java index b9938b7a8a0..1ce643e0e8c 100644 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/TieredStoreProvider.java +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/FileSegmentProvider.java @@ -18,9 +18,9 @@ import java.nio.ByteBuffer; import java.util.concurrent.CompletableFuture; -import org.apache.rocketmq.tieredstore.provider.stream.FileSegmentInputStream; +import org.apache.rocketmq.tieredstore.stream.FileSegmentInputStream; -public interface TieredStoreProvider { +public interface FileSegmentProvider { /** * Get file path in backend file system diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/memory/MemoryFileSegment.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/MemoryFileSegment.java similarity index 61% rename from tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/memory/MemoryFileSegment.java rename to tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/MemoryFileSegment.java index 80ad41f6859..b3f10113939 100644 --- a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/memory/MemoryFileSegment.java +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/MemoryFileSegment.java @@ -14,62 +14,51 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.rocketmq.tieredstore.provider.memory; +package org.apache.rocketmq.tieredstore.provider; import java.nio.ByteBuffer; import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; -import org.apache.rocketmq.common.message.MessageQueue; +import org.apache.rocketmq.tieredstore.MessageStoreConfig; import org.apache.rocketmq.tieredstore.common.FileSegmentType; -import org.apache.rocketmq.tieredstore.common.TieredMessageStoreConfig; -import org.apache.rocketmq.tieredstore.provider.TieredFileSegment; -import org.apache.rocketmq.tieredstore.provider.stream.FileSegmentInputStream; -import org.apache.rocketmq.tieredstore.util.TieredStoreUtil; -import org.junit.Assert; +import org.apache.rocketmq.tieredstore.stream.FileSegmentInputStream; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -public class MemoryFileSegment extends TieredFileSegment { +public class MemoryFileSegment extends FileSegment { - protected final ByteBuffer memStore; - - public CompletableFuture blocker; + private static final Logger log = LoggerFactory.getLogger(MessageStoreUtil.TIERED_STORE_LOGGER_NAME); + protected final ByteBuffer memStore; + protected CompletableFuture blocker; protected int size = 0; - protected boolean checkSize = true; - public MemoryFileSegment(FileSegmentType fileType, MessageQueue messageQueue, long baseOffset, - TieredMessageStoreConfig storeConfig) { - this(storeConfig, fileType, TieredStoreUtil.toPath(messageQueue), baseOffset); - } - - public MemoryFileSegment(TieredMessageStoreConfig storeConfig, + public MemoryFileSegment(MessageStoreConfig storeConfig, FileSegmentType fileType, String filePath, long baseOffset) { + super(storeConfig, fileType, filePath, baseOffset); - switch (fileType) { - case COMMIT_LOG: - case INDEX: - case CONSUME_QUEUE: - memStore = ByteBuffer.allocate(10000); - break; - default: - memStore = null; - break; - } + memStore = ByteBuffer.allocate(10000); memStore.position((int) getSize()); } - public boolean isCheckSize() { - return checkSize; + @Override + public boolean exists() { + return false; } - public void setCheckSize(boolean checkSize) { - this.checkSize = checkSize; + @Override + public void createFile() { } public ByteBuffer getMemStore() { return memStore; } + public void setCheckSize(boolean checkSize) { + this.checkSize = checkSize; + } + @Override public String getPath() { return filePath; @@ -87,11 +76,6 @@ public void setSize(int size) { this.size = size; } - @Override - public void createFile() { - - } - @Override public CompletableFuture read0(long position, int length) { ByteBuffer buffer = memStore.duplicate(); @@ -107,36 +91,22 @@ public CompletableFuture commit0( try { if (blocker != null && !blocker.get()) { - throw new IllegalStateException("Commit Exception for Memory Test"); + log.info("Commit Blocker Exception for Memory Test"); + return CompletableFuture.completedFuture(false); } - } catch (InterruptedException | ExecutionException e) { - Assert.fail(e.getMessage()); - } - Assert.assertTrue(!checkSize || position >= getSize()); - - byte[] buffer = new byte[1024]; - int startPos = memStore.position(); - try { int len; + byte[] buffer = new byte[1024]; while ((len = inputStream.read(buffer)) > 0) { memStore.put(buffer, 0, len); } - Assert.assertEquals(length, memStore.position() - startPos); } catch (Exception e) { - Assert.fail(e.getMessage()); return CompletableFuture.completedFuture(false); } return CompletableFuture.completedFuture(true); } - @Override - public boolean exists() { - return false; - } - @Override public void destroyFile() { - } } diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/posix/PosixFileSegment.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/PosixFileSegment.java similarity index 53% rename from tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/posix/PosixFileSegment.java rename to tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/PosixFileSegment.java index ee56b1e68bd..fb150c928cf 100644 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/posix/PosixFileSegment.java +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/PosixFileSegment.java @@ -14,7 +14,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.rocketmq.tieredstore.provider.posix; +package org.apache.rocketmq.tieredstore.provider; import com.google.common.base.Stopwatch; import com.google.common.io.ByteStreams; @@ -29,15 +29,14 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import org.apache.commons.lang3.StringUtils; -import org.apache.rocketmq.logging.org.slf4j.Logger; -import org.apache.rocketmq.logging.org.slf4j.LoggerFactory; +import org.apache.rocketmq.tieredstore.MessageStoreConfig; +import org.apache.rocketmq.tieredstore.MessageStoreExecutor; import org.apache.rocketmq.tieredstore.common.FileSegmentType; -import org.apache.rocketmq.tieredstore.common.TieredMessageStoreConfig; -import org.apache.rocketmq.tieredstore.common.TieredStoreExecutor; import org.apache.rocketmq.tieredstore.metrics.TieredStoreMetricsManager; -import org.apache.rocketmq.tieredstore.provider.TieredFileSegment; -import org.apache.rocketmq.tieredstore.provider.stream.FileSegmentInputStream; -import org.apache.rocketmq.tieredstore.util.TieredStoreUtil; +import org.apache.rocketmq.tieredstore.stream.FileSegmentInputStream; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static org.apache.rocketmq.tieredstore.metrics.TieredStoreMetricsConstant.LABEL_FILE_TYPE; import static org.apache.rocketmq.tieredstore.metrics.TieredStoreMetricsConstant.LABEL_OPERATION; @@ -47,11 +46,10 @@ /** * this class is experimental and may change without notice. */ -public class PosixFileSegment extends TieredFileSegment { +public class PosixFileSegment extends FileSegment { - private static final Logger logger = LoggerFactory.getLogger(TieredStoreUtil.TIERED_STORE_LOGGER_NAME); + private static final Logger log = LoggerFactory.getLogger(MessageStoreUtil.TIERED_STORE_LOGGER_NAME); - private static final String UNDERLINE = "_"; private static final String OPERATION_POSIX_READ = "read"; private static final String OPERATION_POSIX_WRITE = "write"; @@ -60,7 +58,7 @@ public class PosixFileSegment extends TieredFileSegment { private volatile FileChannel readFileChannel; private volatile FileChannel writeFileChannel; - public PosixFileSegment(TieredMessageStoreConfig storeConfig, + public PosixFileSegment(MessageStoreConfig storeConfig, FileSegmentType fileType, String filePath, long baseOffset) { super(storeConfig, fileType, filePath, baseOffset); @@ -70,13 +68,13 @@ public PosixFileSegment(TieredMessageStoreConfig storeConfig, StringUtils.appendIfMissing(storeConfig.getTieredStoreFilePath(), File.separator)); // fullPath: basePath/hash_cluster/broker/topic/queueId/fileType/baseOffset - String brokerClusterName = storeConfig.getBrokerClusterName(); - String clusterBasePath = TieredStoreUtil.getHash(brokerClusterName) + UNDERLINE + brokerClusterName; - this.fullPath = Paths.get(basePath, clusterBasePath, filePath, - fileType.toString(), TieredStoreUtil.offset2FileName(baseOffset)).toString(); - logger.info("Constructing Posix FileSegment, filePath: {}", fullPath); + String clusterName = storeConfig.getBrokerClusterName(); + String clusterBasePath = String.format("%s_%s", MessageStoreUtil.getHash(clusterName), clusterName); + fullPath = Paths.get(basePath, clusterBasePath, filePath, + fileType.toString(), MessageStoreUtil.offset2FileName(baseOffset)).toString(); + log.info("Constructing Posix FileSegment, filePath: {}", fullPath); - createFile(); + this.createFile(); } protected AttributesBuilder newAttributesBuilder() { @@ -87,7 +85,7 @@ protected AttributesBuilder newAttributesBuilder() { @Override public String getPath() { - return fullPath; + return filePath; } @Override @@ -95,7 +93,7 @@ public long getSize() { if (exists()) { return file.length(); } - return -1; + return 0L; } @Override @@ -105,45 +103,63 @@ public boolean exists() { @Override public void createFile() { - if (file == null) { + if (this.file == null) { synchronized (this) { - if (file == null) { - File file = new File(fullPath); - try { - File dir = file.getParentFile(); - if (!dir.exists()) { - dir.mkdirs(); - } - - // TODO use direct IO to avoid polluting the page cache - file.createNewFile(); - this.readFileChannel = new RandomAccessFile(file, "r").getChannel(); - this.writeFileChannel = new RandomAccessFile(file, "rwd").getChannel(); - this.file = file; - } catch (Exception e) { - logger.error("PosixFileSegment#createFile: create file {} failed: ", filePath, e); - } + if (this.file == null) { + this.createFile0(); } } } } + @SuppressWarnings({"resource", "ResultOfMethodCallIgnored"}) + private void createFile0() { + try { + File file = new File(fullPath); + File dir = file.getParentFile(); + if (!dir.exists()) { + dir.mkdirs(); + } + if (!file.exists()) { + if (file.createNewFile()) { + log.debug("Create Posix FileSegment, filePath: {}", fullPath); + } + } + this.readFileChannel = new RandomAccessFile(file, "r").getChannel(); + this.writeFileChannel = new RandomAccessFile(file, "rwd").getChannel(); + this.file = file; + } catch (Exception e) { + log.error("PosixFileSegment#createFile: create file {} failed: ", filePath, e); + } + } + @Override + public void destroyFile() { + this.close(); + if (file != null && file.exists()) { + if (file.delete()) { + log.info("Destroy Posix FileSegment, filePath: {}", fullPath); + } else { + log.warn("Destroy Posix FileSegment error, filePath: {}", fullPath); + } + } + } + + @Override + public void close() { + super.close(); try { if (readFileChannel != null && readFileChannel.isOpen()) { readFileChannel.close(); + readFileChannel = null; } if (writeFileChannel != null && writeFileChannel.isOpen()) { writeFileChannel.close(); + writeFileChannel = null; } - logger.info("Destroy Posix FileSegment, filePath: {}", fullPath); } catch (IOException e) { - logger.error("Destroy Posix FileSegment failed, filePath: {}", fullPath, e); - } - - if (file.exists()) { - file.delete(); + log.error("Destroy Posix FileSegment failed, filePath: {}", fullPath, e); } } @@ -176,14 +192,13 @@ public CompletableFuture read0(long position, int length) { long costTime = stopwatch.stop().elapsed(TimeUnit.MILLISECONDS); attributesBuilder.put(LABEL_SUCCESS, false); TieredStoreMetricsManager.providerRpcLatency.record(costTime, attributesBuilder.build()); - logger.error("PosixFileSegment#read0: read file {} failed: position: {}, length: {}", - filePath, position, length, e); future.completeExceptionally(e); } return future; } @Override + @SuppressWarnings("ResultOfMethodCallIgnored") public CompletableFuture commit0( FileSegmentInputStream inputStream, long position, int length, boolean append) { @@ -191,51 +206,30 @@ public CompletableFuture commit0( AttributesBuilder attributesBuilder = newAttributesBuilder() .put(LABEL_OPERATION, OPERATION_POSIX_WRITE); - CompletableFuture future = new CompletableFuture<>(); - try { - TieredStoreExecutor.commitExecutor.execute(() -> { - try { - byte[] byteArray = ByteStreams.toByteArray(inputStream); - if (byteArray.length != length) { - logger.error("PosixFileSegment#commit0: append file {} failed: real data size: {}, is not equal to length: {}", - filePath, byteArray.length, length); - future.complete(false); - return; - } - writeFileChannel.position(position); - ByteBuffer buffer = ByteBuffer.wrap(byteArray); - while (buffer.hasRemaining()) { - writeFileChannel.write(buffer); - } - - attributesBuilder.put(LABEL_SUCCESS, true); - long costTime = stopwatch.stop().elapsed(TimeUnit.MILLISECONDS); - TieredStoreMetricsManager.providerRpcLatency.record(costTime, attributesBuilder.build()); - - Attributes metricsAttributes = newAttributesBuilder() - .put(LABEL_OPERATION, OPERATION_POSIX_WRITE) - .build(); - TieredStoreMetricsManager.uploadBytes.record(length, metricsAttributes); - - future.complete(true); - } catch (Exception e) { - long costTime = stopwatch.stop().elapsed(TimeUnit.MILLISECONDS); - attributesBuilder.put(LABEL_SUCCESS, false); - TieredStoreMetricsManager.providerRpcLatency.record(costTime, attributesBuilder.build()); - - logger.error("PosixFileSegment#commit0: append file {} failed: position: {}, length: {}", - filePath, position, length, e); - future.completeExceptionally(e); + return CompletableFuture.supplyAsync(() -> { + try { + byte[] byteArray = ByteStreams.toByteArray(inputStream); + writeFileChannel.position(position); + ByteBuffer buffer = ByteBuffer.wrap(byteArray); + while (buffer.hasRemaining()) { + writeFileChannel.write(buffer); } - }); - } catch (Exception e) { - // commit task cannot be executed - long costTime = stopwatch.stop().elapsed(TimeUnit.MILLISECONDS); - attributesBuilder.put(LABEL_SUCCESS, false); - TieredStoreMetricsManager.providerRpcLatency.record(costTime, attributesBuilder.build()); - - future.completeExceptionally(e); - } - return future; + writeFileChannel.force(true); + attributesBuilder.put(LABEL_SUCCESS, true); + long costTime = stopwatch.stop().elapsed(TimeUnit.MILLISECONDS); + TieredStoreMetricsManager.providerRpcLatency.record(costTime, attributesBuilder.build()); + + Attributes metricsAttributes = newAttributesBuilder() + .put(LABEL_OPERATION, OPERATION_POSIX_WRITE) + .build(); + TieredStoreMetricsManager.uploadBytes.record(length, metricsAttributes); + } catch (Exception e) { + long costTime = stopwatch.stop().elapsed(TimeUnit.MILLISECONDS); + attributesBuilder.put(LABEL_SUCCESS, false); + TieredStoreMetricsManager.providerRpcLatency.record(costTime, attributesBuilder.build()); + return false; + } + return true; + }, MessageStoreExecutor.getInstance().bufferCommitExecutor); } } diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/TieredFileSegment.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/TieredFileSegment.java deleted file mode 100644 index 6703de9403f..00000000000 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/TieredFileSegment.java +++ /dev/null @@ -1,485 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.rocketmq.tieredstore.provider; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.Semaphore; -import java.util.concurrent.locks.ReentrantLock; -import org.apache.rocketmq.logging.org.slf4j.Logger; -import org.apache.rocketmq.logging.org.slf4j.LoggerFactory; -import org.apache.rocketmq.tieredstore.common.AppendResult; -import org.apache.rocketmq.tieredstore.common.FileSegmentType; -import org.apache.rocketmq.tieredstore.common.TieredMessageStoreConfig; -import org.apache.rocketmq.tieredstore.exception.TieredStoreErrorCode; -import org.apache.rocketmq.tieredstore.exception.TieredStoreException; -import org.apache.rocketmq.tieredstore.file.TieredCommitLog; -import org.apache.rocketmq.tieredstore.file.TieredConsumeQueue; -import org.apache.rocketmq.tieredstore.provider.stream.FileSegmentInputStream; -import org.apache.rocketmq.tieredstore.provider.stream.FileSegmentInputStreamFactory; -import org.apache.rocketmq.tieredstore.util.MessageBufferUtil; -import org.apache.rocketmq.tieredstore.util.TieredStoreUtil; - -import static org.apache.rocketmq.tieredstore.index.IndexStoreFile.INDEX_BEGIN_TIME_STAMP; -import static org.apache.rocketmq.tieredstore.index.IndexStoreFile.INDEX_END_TIME_STAMP; - -public abstract class TieredFileSegment implements Comparable, TieredStoreProvider { - - private static final Logger logger = LoggerFactory.getLogger(TieredStoreUtil.TIERED_STORE_LOGGER_NAME); - - protected final String filePath; - protected final long baseOffset; - protected final FileSegmentType fileType; - protected final TieredMessageStoreConfig storeConfig; - - private final long maxSize; - private final ReentrantLock bufferLock = new ReentrantLock(); - private final Semaphore commitLock = new Semaphore(1); - - private volatile boolean full = false; - private volatile boolean closed = false; - - private volatile long minTimestamp = Long.MAX_VALUE; - private volatile long maxTimestamp = Long.MAX_VALUE; - private volatile long commitPosition = 0L; - private volatile long appendPosition = 0L; - - // only used in commitLog - private volatile long dispatchCommitOffset = 0L; - - private ByteBuffer codaBuffer; - private List bufferList = new ArrayList<>(); - private FileSegmentInputStream fileSegmentInputStream; - private CompletableFuture flightCommitRequest = CompletableFuture.completedFuture(false); - - public TieredFileSegment(TieredMessageStoreConfig storeConfig, - FileSegmentType fileType, String filePath, long baseOffset) { - - this.storeConfig = storeConfig; - this.fileType = fileType; - this.filePath = filePath; - this.baseOffset = baseOffset; - this.maxSize = getMaxSizeByFileType(); - } - - /** - * The max segment size of a file is determined by the file type - */ - protected long getMaxSizeByFileType() { - switch (fileType) { - case COMMIT_LOG: - return storeConfig.getTieredStoreCommitLogMaxSize(); - case CONSUME_QUEUE: - return storeConfig.getTieredStoreConsumeQueueMaxSize(); - case INDEX: - return Long.MAX_VALUE; - default: - throw new IllegalArgumentException("Unsupported file type: " + fileType); - } - } - - @Override - public int compareTo(TieredFileSegment o) { - return Long.compare(this.baseOffset, o.baseOffset); - } - - public long getBaseOffset() { - return baseOffset; - } - - public long getCommitOffset() { - return baseOffset + commitPosition; - } - - public long getCommitPosition() { - return commitPosition; - } - - public long getDispatchCommitOffset() { - return dispatchCommitOffset; - } - - public long getMaxOffset() { - return baseOffset + appendPosition; - } - - public long getMaxSize() { - return maxSize; - } - - public long getMinTimestamp() { - return minTimestamp; - } - - public void setMinTimestamp(long minTimestamp) { - this.minTimestamp = minTimestamp; - } - - public long getMaxTimestamp() { - return maxTimestamp; - } - - public void setMaxTimestamp(long maxTimestamp) { - this.maxTimestamp = maxTimestamp; - } - - public boolean isFull() { - return full; - } - - public void setFull() { - setFull(true); - } - - public void setFull(boolean appendCoda) { - bufferLock.lock(); - try { - full = true; - if (fileType == FileSegmentType.COMMIT_LOG && appendCoda) { - appendCoda(); - } - } finally { - bufferLock.unlock(); - } - } - - public boolean isClosed() { - return closed; - } - - public void close() { - closed = true; - } - - public FileSegmentType getFileType() { - return fileType; - } - - public void initPosition(long pos) { - this.commitPosition = pos; - this.appendPosition = pos; - } - - private List borrowBuffer() { - bufferLock.lock(); - try { - List tmp = bufferList; - bufferList = new ArrayList<>(); - return tmp; - } finally { - bufferLock.unlock(); - } - } - - @SuppressWarnings("NonAtomicOperationOnVolatileField") - public AppendResult append(ByteBuffer byteBuf, long timestamp) { - if (closed) { - return AppendResult.FILE_CLOSED; - } - - bufferLock.lock(); - try { - if (full || codaBuffer != null) { - return AppendResult.FILE_FULL; - } - - if (fileType == FileSegmentType.INDEX) { - minTimestamp = byteBuf.getLong(INDEX_BEGIN_TIME_STAMP); - maxTimestamp = byteBuf.getLong(INDEX_END_TIME_STAMP); - - appendPosition += byteBuf.remaining(); - // IndexFile is large and not change after compaction, no need deep copy - bufferList.add(byteBuf); - setFull(); - return AppendResult.SUCCESS; - } - - if (appendPosition + byteBuf.remaining() > maxSize) { - setFull(); - return AppendResult.FILE_FULL; - } - - if (bufferList.size() > storeConfig.getTieredStoreGroupCommitCount() - || appendPosition - commitPosition > storeConfig.getTieredStoreGroupCommitSize()) { - commitAsync(); - } - - if (bufferList.size() > storeConfig.getTieredStoreMaxGroupCommitCount()) { - logger.debug("File segment append buffer full, file: {}, buffer size: {}, pending bytes: {}", - getPath(), bufferList.size(), appendPosition - commitPosition); - return AppendResult.BUFFER_FULL; - } - - if (timestamp != Long.MAX_VALUE) { - maxTimestamp = timestamp; - if (minTimestamp == Long.MAX_VALUE) { - minTimestamp = timestamp; - } - } - - appendPosition += byteBuf.remaining(); - - // deep copy buffer - ByteBuffer byteBuffer = ByteBuffer.allocateDirect(byteBuf.remaining()); - byteBuffer.put(byteBuf); - byteBuffer.flip(); - byteBuf.rewind(); - - bufferList.add(byteBuffer); - return AppendResult.SUCCESS; - } finally { - bufferLock.unlock(); - } - } - - public void setCommitPosition(long commitPosition) { - this.commitPosition = commitPosition; - } - - public long getAppendPosition() { - return appendPosition; - } - - public void setAppendPosition(long appendPosition) { - this.appendPosition = appendPosition; - } - - @SuppressWarnings("NonAtomicOperationOnVolatileField") - private void appendCoda() { - if (codaBuffer != null) { - return; - } - codaBuffer = ByteBuffer.allocate(TieredCommitLog.CODA_SIZE); - codaBuffer.putInt(TieredCommitLog.CODA_SIZE); - codaBuffer.putInt(TieredCommitLog.BLANK_MAGIC_CODE); - codaBuffer.putLong(maxTimestamp); - codaBuffer.flip(); - appendPosition += TieredCommitLog.CODA_SIZE; - } - - public ByteBuffer read(long position, int length) { - return readAsync(position, length).join(); - } - - public CompletableFuture readAsync(long position, int length) { - CompletableFuture future = new CompletableFuture<>(); - if (position < 0 || length < 0) { - future.completeExceptionally( - new TieredStoreException(TieredStoreErrorCode.ILLEGAL_PARAM, "position or length is negative")); - return future; - } - if (length == 0) { - future.completeExceptionally( - new TieredStoreException(TieredStoreErrorCode.ILLEGAL_PARAM, "length is zero")); - return future; - } - if (position >= commitPosition) { - future.completeExceptionally( - new TieredStoreException(TieredStoreErrorCode.ILLEGAL_PARAM, "position is illegal")); - return future; - } - if (position + length > commitPosition) { - logger.debug("TieredFileSegment#readAsync request position + length is greater than commit position," + - " correct length using commit position, file: {}, request position: {}, commit position:{}, change length from {} to {}", - getPath(), position, commitPosition, length, commitPosition - position); - length = (int) (commitPosition - position); - if (length == 0) { - future.completeExceptionally( - new TieredStoreException(TieredStoreErrorCode.NO_NEW_DATA, "request position is equal to commit position")); - return future; - } - if (fileType == FileSegmentType.CONSUME_QUEUE && length % TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE != 0) { - future.completeExceptionally( - new TieredStoreException(TieredStoreErrorCode.ILLEGAL_PARAM, "position and length is illegal")); - return future; - } - } - return read0(position, length); - } - - public boolean needCommit() { - return appendPosition > commitPosition; - } - - public boolean commit() { - if (closed) { - return false; - } - // result is false when we send real commit request - // use join for wait flight request done - Boolean result = commitAsync().join(); - if (!result) { - result = flightCommitRequest.join(); - } - return result; - } - - private void releaseCommitLock() { - if (commitLock.availablePermits() == 0) { - commitLock.release(); - } else { - logger.error("[Bug] FileSegmentCommitAsync, lock is already released: available permits: {}", - commitLock.availablePermits()); - } - } - - private void updateDispatchCommitOffset(List bufferList) { - if (fileType == FileSegmentType.COMMIT_LOG && bufferList.size() > 0) { - dispatchCommitOffset = - MessageBufferUtil.getQueueOffset(bufferList.get(bufferList.size() - 1)); - } - } - - /** - * @return false: commit, true: no commit operation - */ - @SuppressWarnings("NonAtomicOperationOnVolatileField") - public CompletableFuture commitAsync() { - if (closed) { - return CompletableFuture.completedFuture(false); - } - - if (!needCommit()) { - return CompletableFuture.completedFuture(true); - } - - if (commitLock.drainPermits() <= 0) { - return CompletableFuture.completedFuture(false); - } - - try { - if (fileSegmentInputStream != null) { - long fileSize = this.getSize(); - if (fileSize == -1L) { - logger.error("Get commit position error before commit, Commit: {}, Expect: {}, Current Max: {}, FileName: {}", - commitPosition, commitPosition + fileSegmentInputStream.getContentLength(), appendPosition, getPath()); - releaseCommitLock(); - return CompletableFuture.completedFuture(false); - } else { - if (correctPosition(fileSize, null)) { - updateDispatchCommitOffset(fileSegmentInputStream.getBufferList()); - fileSegmentInputStream = null; - } - } - } - - int bufferSize; - if (fileSegmentInputStream != null) { - bufferSize = fileSegmentInputStream.available(); - } else { - List bufferList = borrowBuffer(); - bufferSize = bufferList.stream().mapToInt(ByteBuffer::remaining).sum() - + (codaBuffer != null ? codaBuffer.remaining() : 0); - if (bufferSize == 0) { - releaseCommitLock(); - return CompletableFuture.completedFuture(true); - } - fileSegmentInputStream = FileSegmentInputStreamFactory.build( - fileType, baseOffset + commitPosition, bufferList, codaBuffer, bufferSize); - } - - return flightCommitRequest = this - .commit0(fileSegmentInputStream, commitPosition, bufferSize, fileType != FileSegmentType.INDEX) - .thenApply(result -> { - if (result) { - updateDispatchCommitOffset(fileSegmentInputStream.getBufferList()); - commitPosition += bufferSize; - fileSegmentInputStream = null; - return true; - } else { - fileSegmentInputStream.rewind(); - return false; - } - }) - .exceptionally(this::handleCommitException) - .whenComplete((result, e) -> releaseCommitLock()); - - } catch (Exception e) { - handleCommitException(e); - releaseCommitLock(); - } - return CompletableFuture.completedFuture(false); - } - - private long getCorrectFileSize(Throwable throwable) { - if (throwable instanceof TieredStoreException) { - long fileSize = ((TieredStoreException) throwable).getPosition(); - if (fileSize > 0) { - return fileSize; - } - } - return getSize(); - } - - private boolean handleCommitException(Throwable e) { - // Get root cause here - Throwable cause = e.getCause() != null ? e.getCause() : e; - long fileSize = this.getCorrectFileSize(cause); - - if (fileSize == -1L) { - logger.error("Get commit position error, Commit: %d, Expect: %d, Current Max: %d, FileName: %s", - commitPosition, commitPosition + fileSegmentInputStream.getContentLength(), appendPosition, getPath()); - fileSegmentInputStream.rewind(); - return false; - } - - if (correctPosition(fileSize, cause)) { - updateDispatchCommitOffset(fileSegmentInputStream.getBufferList()); - fileSegmentInputStream = null; - return true; - } else { - fileSegmentInputStream.rewind(); - return false; - } - } - - /** - * return true to clear buffer - */ - private boolean correctPosition(long fileSize, Throwable throwable) { - - // Current we have three offsets here: commit offset, expect offset, file size. - // We guarantee that the commit offset is less than or equal to the expect offset. - // Max offset will increase because we can continuously put in new buffers - String handleInfo = throwable == null ? "before commit" : "after commit"; - long expectPosition = commitPosition + fileSegmentInputStream.getContentLength(); - - String offsetInfo = String.format("Correct Commit Position, %s, result=[{}], " + - "Commit: %d, Expect: %d, Current Max: %d, FileSize: %d, FileName: %s", - handleInfo, commitPosition, expectPosition, appendPosition, fileSize, this.getPath()); - - // We are believing that the file size returned by the server is correct, - // can reset the commit offset to the file size reported by the storage system. - if (fileSize == expectPosition) { - logger.info(offsetInfo, "Success", throwable); - commitPosition = fileSize; - return true; - } - - if (fileSize < commitPosition) { - logger.error(offsetInfo, "FileSizeIncorrect", throwable); - } else if (fileSize == commitPosition) { - logger.warn(offsetInfo, "CommitFailed", throwable); - } else if (fileSize > commitPosition) { - logger.warn(offsetInfo, "PartialSuccess", throwable); - } - commitPosition = fileSize; - return false; - } -} diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/stream/CommitLogInputStream.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/stream/CommitLogInputStream.java similarity index 91% rename from tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/stream/CommitLogInputStream.java rename to tieredstore/src/main/java/org/apache/rocketmq/tieredstore/stream/CommitLogInputStream.java index 13b6e0ef9c9..e2d7354755d 100644 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/stream/CommitLogInputStream.java +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/stream/CommitLogInputStream.java @@ -15,13 +15,13 @@ * limitations under the License. */ -package org.apache.rocketmq.tieredstore.provider.stream; +package org.apache.rocketmq.tieredstore.stream; import java.io.IOException; import java.nio.ByteBuffer; import java.util.List; import org.apache.rocketmq.tieredstore.common.FileSegmentType; -import org.apache.rocketmq.tieredstore.util.MessageBufferUtil; +import org.apache.rocketmq.tieredstore.util.MessageFormatUtil; public class CommitLogInputStream extends FileSegmentInputStream { @@ -90,9 +90,9 @@ public int read() { commitLogOffset += readPosInCurBuffer; readPosInCurBuffer = 0; } - if (readPosInCurBuffer >= MessageBufferUtil.PHYSICAL_OFFSET_POSITION - && readPosInCurBuffer < MessageBufferUtil.SYS_FLAG_OFFSET_POSITION) { - res = (int) ((commitLogOffset >> (8 * (MessageBufferUtil.SYS_FLAG_OFFSET_POSITION - readPosInCurBuffer - 1))) & 0xff); + if (readPosInCurBuffer >= MessageFormatUtil.PHYSICAL_OFFSET_POSITION + && readPosInCurBuffer < MessageFormatUtil.SYS_FLAG_OFFSET_POSITION) { + res = (int) ((commitLogOffset >> (8 * (MessageFormatUtil.SYS_FLAG_OFFSET_POSITION - readPosInCurBuffer - 1))) & 0xff); readPosInCurBuffer++; } else { res = curBuffer.get(readPosInCurBuffer++) & 0xff; @@ -150,18 +150,18 @@ public int read(byte[] b, int off, int len) { remaining = curBuf.remaining() - posInCurBuffer; readLen = Math.min(remaining, needRead); curBuf = bufferList.get(bufIndex); - if (posInCurBuffer < MessageBufferUtil.PHYSICAL_OFFSET_POSITION) { - realReadLen = Math.min(MessageBufferUtil.PHYSICAL_OFFSET_POSITION - posInCurBuffer, readLen); + if (posInCurBuffer < MessageFormatUtil.PHYSICAL_OFFSET_POSITION) { + realReadLen = Math.min(MessageFormatUtil.PHYSICAL_OFFSET_POSITION - posInCurBuffer, readLen); // read from commitLog buffer curBuf.position(posInCurBuffer); curBuf.get(b, off, realReadLen); curBuf.position(0); - } else if (posInCurBuffer < MessageBufferUtil.SYS_FLAG_OFFSET_POSITION) { - realReadLen = Math.min(MessageBufferUtil.SYS_FLAG_OFFSET_POSITION - posInCurBuffer, readLen); + } else if (posInCurBuffer < MessageFormatUtil.SYS_FLAG_OFFSET_POSITION) { + realReadLen = Math.min(MessageFormatUtil.SYS_FLAG_OFFSET_POSITION - posInCurBuffer, readLen); // read from converted PHYSICAL_OFFSET_POSITION byte[] physicalOffsetBytes = new byte[realReadLen]; for (int i = 0; i < realReadLen; i++) { - physicalOffsetBytes[i] = (byte) ((curCommitLogOffset >> (8 * (MessageBufferUtil.SYS_FLAG_OFFSET_POSITION - posInCurBuffer - i - 1))) & 0xff); + physicalOffsetBytes[i] = (byte) ((curCommitLogOffset >> (8 * (MessageFormatUtil.SYS_FLAG_OFFSET_POSITION - posInCurBuffer - i - 1))) & 0xff); } System.arraycopy(physicalOffsetBytes, 0, b, off, realReadLen); } else { diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/stream/FileSegmentInputStream.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/stream/FileSegmentInputStream.java similarity index 99% rename from tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/stream/FileSegmentInputStream.java rename to tieredstore/src/main/java/org/apache/rocketmq/tieredstore/stream/FileSegmentInputStream.java index 9e9d5135cd7..5020ff199d0 100644 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/stream/FileSegmentInputStream.java +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/stream/FileSegmentInputStream.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.rocketmq.tieredstore.provider.stream; +package org.apache.rocketmq.tieredstore.stream; import java.io.IOException; import java.io.InputStream; diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/stream/FileSegmentInputStreamFactory.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/stream/FileSegmentInputStreamFactory.java similarity index 87% rename from tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/stream/FileSegmentInputStreamFactory.java rename to tieredstore/src/main/java/org/apache/rocketmq/tieredstore/stream/FileSegmentInputStreamFactory.java index a90baff3ae6..6872296bbc0 100644 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/provider/stream/FileSegmentInputStreamFactory.java +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/stream/FileSegmentInputStreamFactory.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.rocketmq.tieredstore.provider.stream; +package org.apache.rocketmq.tieredstore.stream; import java.nio.ByteBuffer; import java.util.List; @@ -32,8 +32,7 @@ public static FileSegmentInputStream build( switch (fileType) { case COMMIT_LOG: - return new CommitLogInputStream( - fileType, offset, bufferList, byteBuffer, length); + return new CommitLogInputStream(fileType, offset, bufferList, byteBuffer, length); case CONSUME_QUEUE: return new FileSegmentInputStream(fileType, bufferList, length); case INDEX: @@ -42,7 +41,7 @@ public static FileSegmentInputStream build( } return new FileSegmentInputStream(fileType, bufferList, length); default: - throw new IllegalArgumentException("file type is not supported"); + throw new IllegalArgumentException("file type not supported"); } } } diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/util/MessageBufferUtil.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/util/MessageBufferUtil.java deleted file mode 100644 index 2c4a6e5784b..00000000000 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/util/MessageBufferUtil.java +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.rocketmq.tieredstore.util; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import org.apache.rocketmq.common.UtilAll; -import org.apache.rocketmq.common.message.MessageDecoder; -import org.apache.rocketmq.logging.org.slf4j.Logger; -import org.apache.rocketmq.logging.org.slf4j.LoggerFactory; -import org.apache.rocketmq.tieredstore.common.SelectBufferResult; -import org.apache.rocketmq.tieredstore.file.TieredCommitLog; -import org.apache.rocketmq.tieredstore.file.TieredConsumeQueue; - -public class MessageBufferUtil { - private static final Logger logger = LoggerFactory.getLogger(TieredStoreUtil.TIERED_STORE_LOGGER_NAME); - - public static final int QUEUE_OFFSET_POSITION = 4 /* total size */ - + 4 /* magic code */ - + 4 /* body CRC */ - + 4 /* queue id */ - + 4; /* flag */ - - public static final int PHYSICAL_OFFSET_POSITION = 4 /* total size */ - + 4 /* magic code */ - + 4 /* body CRC */ - + 4 /* queue id */ - + 4 /* flag */ - + 8; /* queue offset */ - - public static final int SYS_FLAG_OFFSET_POSITION = 4 /* total size */ - + 4 /* magic code */ - + 4 /* body CRC */ - + 4 /* queue id */ - + 4 /* flag */ - + 8 /* queue offset */ - + 8; /* physical offset */ - - public static final int STORE_TIMESTAMP_POSITION = 4 /* total size */ - + 4 /* magic code */ - + 4 /* body CRC */ - + 4 /* queue id */ - + 4 /* flag */ - + 8 /* queue offset */ - + 8 /* physical offset */ - + 4 /* sys flag */ - + 8 /* born timestamp */ - + 8; /* born host */ - - public static final int STORE_HOST_POSITION = 4 /* total size */ - + 4 /* magic code */ - + 4 /* body CRC */ - + 4 /* queue id */ - + 4 /* flag */ - + 8 /* queue offset */ - + 8 /* physical offset */ - + 4 /* sys flag */ - + 8 /* born timestamp */ - + 8 /* born host */ - + 8; /* store timestamp */ - - public static int getTotalSize(ByteBuffer message) { - return message.getInt(message.position()); - } - - public static int getMagicCode(ByteBuffer message) { - return message.getInt(message.position() + 4); - } - - public static long getQueueOffset(ByteBuffer message) { - return message.getLong(message.position() + QUEUE_OFFSET_POSITION); - } - - public static long getCommitLogOffset(ByteBuffer message) { - return message.getLong(message.position() + PHYSICAL_OFFSET_POSITION); - } - - public static long getStoreTimeStamp(ByteBuffer message) { - return message.getLong(message.position() + STORE_TIMESTAMP_POSITION); - } - - public static ByteBuffer getOffsetIdBuffer(ByteBuffer message) { - ByteBuffer idBuffer = ByteBuffer.allocate(TieredStoreUtil.MSG_ID_LENGTH); - idBuffer.limit(TieredStoreUtil.MSG_ID_LENGTH); - idBuffer.putLong(message.getLong(message.position() + STORE_HOST_POSITION)); - idBuffer.putLong(getCommitLogOffset(message)); - idBuffer.flip(); - return idBuffer; - } - - public static String getOffsetId(ByteBuffer message) { - return UtilAll.bytes2string(getOffsetIdBuffer(message).array()); - } - - public static Map getProperties(ByteBuffer message) { - ByteBuffer slice = message.slice(); - return MessageDecoder.decodeProperties(slice); - } - - public static List splitMessageBuffer(ByteBuffer cqBuffer, ByteBuffer msgBuffer) { - - cqBuffer.rewind(); - msgBuffer.rewind(); - - List bufferResultList = new ArrayList<>( - cqBuffer.remaining() / TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE); - - if (msgBuffer.remaining() == 0) { - logger.error("MessageBufferUtil#splitMessage, msg buffer length is zero"); - return bufferResultList; - } - - if (cqBuffer.remaining() % TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE != 0) { - logger.error("MessageBufferUtil#splitMessage, consume queue buffer size incorrect, {}", cqBuffer.remaining()); - return bufferResultList; - } - - try { - long firstCommitLogOffset = CQItemBufferUtil.getCommitLogOffset(cqBuffer); - - for (int position = cqBuffer.position(); position < cqBuffer.limit(); - position += TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE) { - - cqBuffer.position(position); - long logOffset = CQItemBufferUtil.getCommitLogOffset(cqBuffer); - int bufferSize = CQItemBufferUtil.getSize(cqBuffer); - long tagCode = CQItemBufferUtil.getTagCode(cqBuffer); - - int offset = (int) (logOffset - firstCommitLogOffset); - if (offset + bufferSize > msgBuffer.limit()) { - logger.error("MessageBufferUtil#splitMessage, message buffer size incorrect. " + - "Expect length in consume queue: {}, actual length: {}", offset + bufferSize, msgBuffer.limit()); - break; - } - - msgBuffer.position(offset); - int magicCode = getMagicCode(msgBuffer); - if (magicCode == TieredCommitLog.BLANK_MAGIC_CODE) { - offset += TieredCommitLog.CODA_SIZE; - msgBuffer.position(offset); - magicCode = getMagicCode(msgBuffer); - } - if (magicCode != MessageDecoder.MESSAGE_MAGIC_CODE && - magicCode != MessageDecoder.MESSAGE_MAGIC_CODE_V2) { - logger.warn("MessageBufferUtil#splitMessage, found unknown magic code. " + - "Message offset: {}, wrong magic code: {}", offset, magicCode); - continue; - } - - if (bufferSize != getTotalSize(msgBuffer)) { - logger.warn("MessageBufferUtil#splitMessage, message length in commitlog incorrect. " + - "Except length in commitlog: {}, actual: {}", getTotalSize(msgBuffer), bufferSize); - continue; - } - - ByteBuffer sliceBuffer = msgBuffer.slice(); - sliceBuffer.limit(bufferSize); - bufferResultList.add(new SelectBufferResult(sliceBuffer, offset, bufferSize, tagCode)); - } - } catch (Exception e) { - logger.error("MessageBufferUtil#splitMessage, split message buffer error", e); - } finally { - cqBuffer.rewind(); - msgBuffer.rewind(); - } - return bufferResultList; - } -} diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/util/MessageFormatUtil.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/util/MessageFormatUtil.java new file mode 100644 index 00000000000..560234d050a --- /dev/null +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/util/MessageFormatUtil.java @@ -0,0 +1,175 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.rocketmq.tieredstore.util; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import org.apache.rocketmq.common.UtilAll; +import org.apache.rocketmq.common.message.MessageDecoder; +import org.apache.rocketmq.tieredstore.common.SelectBufferResult; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class MessageFormatUtil { + + private static final Logger log = LoggerFactory.getLogger(MessageStoreUtil.TIERED_STORE_LOGGER_NAME); + + public static final int MSG_ID_LENGTH = 8 + 8; + public static final int MAGIC_CODE_POSITION = 4; + public static final int QUEUE_OFFSET_POSITION = 20; + public static final int PHYSICAL_OFFSET_POSITION = 28; + public static final int SYS_FLAG_OFFSET_POSITION = 36; + public static final int STORE_TIMESTAMP_POSITION = 56; + public static final int STORE_HOST_POSITION = 64; + + /** + * item size: int, 4 bytes + * magic code: int, 4 bytes + * max store timestamp: long, 8 bytes + */ + public static final int COMMIT_LOG_CODA_SIZE = 4 + 8 + 4; + public static final int BLANK_MAGIC_CODE = 0xBBCCDDEE ^ 1880681586 + 8; + + /** + * commit log offset: long, 8 bytes + * message size: int, 4 bytes + * tag hash code: long, 8 bytes + */ + public static final int CONSUME_QUEUE_UNIT_SIZE = 8 + 4 + 8; + + public static int getTotalSize(ByteBuffer message) { + return message.getInt(message.position()); + } + + public static int getMagicCode(ByteBuffer message) { + return message.getInt(message.position() + MAGIC_CODE_POSITION); + } + + public static long getQueueOffset(ByteBuffer message) { + return message.getLong(message.position() + QUEUE_OFFSET_POSITION); + } + + public static long getCommitLogOffset(ByteBuffer message) { + return message.getLong(message.position() + PHYSICAL_OFFSET_POSITION); + } + + public static long getStoreTimeStamp(ByteBuffer message) { + return message.getLong(message.position() + STORE_TIMESTAMP_POSITION); + } + + public static ByteBuffer getOffsetIdBuffer(ByteBuffer message) { + ByteBuffer buffer = ByteBuffer.allocate(MSG_ID_LENGTH); + buffer.putLong(message.getLong(message.position() + STORE_HOST_POSITION)); + buffer.putLong(getCommitLogOffset(message)); + buffer.flip(); + return buffer; + } + + public static String getOffsetId(ByteBuffer message) { + return UtilAll.bytes2string(getOffsetIdBuffer(message).array()); + } + + public static Map getProperties(ByteBuffer message) { + return MessageDecoder.decodeProperties(message.slice()); + } + + public static long getCommitLogOffsetFromItem(ByteBuffer cqItem) { + return cqItem.getLong(cqItem.position()); + } + + public static int getSizeFromItem(ByteBuffer cqItem) { + return cqItem.getInt(cqItem.position() + 8); + } + + public static long getTagCodeFromItem(ByteBuffer cqItem) { + return cqItem.getLong(cqItem.position() + 12); + } + + public static List splitMessageBuffer(ByteBuffer cqBuffer, ByteBuffer msgBuffer) { + + if (cqBuffer == null || msgBuffer == null) { + log.error("MessageFormatUtil split buffer error, cq buffer or msg buffer is null"); + return new ArrayList<>(); + } + + cqBuffer.rewind(); + msgBuffer.rewind(); + + List bufferResultList = new ArrayList<>( + cqBuffer.remaining() / CONSUME_QUEUE_UNIT_SIZE); + + if (msgBuffer.remaining() == 0) { + log.error("MessageFormatUtil split buffer error, msg buffer length is 0"); + return bufferResultList; + } + + if (cqBuffer.remaining() == 0 || cqBuffer.remaining() % CONSUME_QUEUE_UNIT_SIZE != 0) { + log.error("MessageFormatUtil split buffer error, cq buffer size is {}", cqBuffer.remaining()); + return bufferResultList; + } + + try { + long firstCommitLogOffset = MessageFormatUtil.getCommitLogOffsetFromItem(cqBuffer); + + for (int position = cqBuffer.position(); position < cqBuffer.limit(); + position += CONSUME_QUEUE_UNIT_SIZE) { + + cqBuffer.position(position); + long logOffset = MessageFormatUtil.getCommitLogOffsetFromItem(cqBuffer); + int bufferSize = MessageFormatUtil.getSizeFromItem(cqBuffer); + long tagCode = MessageFormatUtil.getTagCodeFromItem(cqBuffer); + + int offset = (int) (logOffset - firstCommitLogOffset); + if (offset + bufferSize > msgBuffer.limit()) { + log.error("MessageFormatUtil split buffer error, message buffer offset exceeded limit. " + + "Expect length: {}, Actual length: {}", offset + bufferSize, msgBuffer.limit()); + break; + } + + msgBuffer.position(offset); + int magicCode = getMagicCode(msgBuffer); + if (magicCode == BLANK_MAGIC_CODE) { + offset += COMMIT_LOG_CODA_SIZE; + msgBuffer.position(offset); + magicCode = getMagicCode(msgBuffer); + } + if (magicCode != MessageDecoder.MESSAGE_MAGIC_CODE && + magicCode != MessageDecoder.MESSAGE_MAGIC_CODE_V2) { + log.error("MessageFormatUtil split buffer error, found unknown magic code. " + + "Message offset: {}, wrong magic code: {}", offset, magicCode); + continue; + } + + if (bufferSize != getTotalSize(msgBuffer)) { + log.error("MessageFormatUtil split buffer error, message length not match. " + + "CommitLog length: {}, buffer length: {}", getTotalSize(msgBuffer), bufferSize); + continue; + } + + ByteBuffer sliceBuffer = msgBuffer.slice(); + sliceBuffer.limit(bufferSize); + bufferResultList.add(new SelectBufferResult(sliceBuffer, offset, bufferSize, tagCode)); + } + } finally { + cqBuffer.rewind(); + msgBuffer.rewind(); + } + return bufferResultList; + } +} diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/util/TieredStoreUtil.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/util/MessageStoreUtil.java similarity index 54% rename from tieredstore/src/main/java/org/apache/rocketmq/tieredstore/util/TieredStoreUtil.java rename to tieredstore/src/main/java/org/apache/rocketmq/tieredstore/util/MessageStoreUtil.java index d15765fcd03..eccde8cad76 100644 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/util/TieredStoreUtil.java +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/util/MessageStoreUtil.java @@ -16,27 +16,18 @@ */ package org.apache.rocketmq.tieredstore.util; -import com.google.common.annotations.VisibleForTesting; -import java.io.File; -import java.lang.reflect.Constructor; import java.math.BigInteger; import java.nio.charset.StandardCharsets; import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.text.DecimalFormat; import java.text.NumberFormat; -import java.util.LinkedList; -import java.util.List; -import org.apache.commons.lang3.StringUtils; import org.apache.rocketmq.common.message.MessageQueue; -import org.apache.rocketmq.common.topic.TopicValidator; -import org.apache.rocketmq.logging.org.slf4j.Logger; -import org.apache.rocketmq.logging.org.slf4j.LoggerFactory; -import org.apache.rocketmq.tieredstore.common.TieredMessageStoreConfig; -import org.apache.rocketmq.tieredstore.metadata.TieredMetadataStore; -public class TieredStoreUtil { +public class MessageStoreUtil { - private static final Logger logger = LoggerFactory.getLogger(TieredStoreUtil.TIERED_STORE_LOGGER_NAME); + public static final String TIERED_STORE_LOGGER_NAME = "RocketmqTieredStore"; + public static final String RMQ_SYS_TIERED_STORE_INDEX_TOPIC = "rmq_sys_INDEX"; public static final long BYTE = 1L; public static final long KB = BYTE << 10; @@ -46,23 +37,8 @@ public class TieredStoreUtil { public static final long PB = TB << 10; public static final long EB = PB << 10; - public static final String TIERED_STORE_LOGGER_NAME = "RocketmqTieredStore"; - public static final String RMQ_SYS_TIERED_STORE_INDEX_TOPIC = "rmq_sys_INDEX"; - public final static int MSG_ID_LENGTH = 8 + 8; - private static final DecimalFormat DEC_FORMAT = new DecimalFormat("#.##"); - private final static List SYSTEM_TOPIC_LIST = new LinkedList() { - { - add(RMQ_SYS_TIERED_STORE_INDEX_TOPIC); - } - }; - - private final static List SYSTEM_TOPIC_WHITE_LIST = new LinkedList<>(); - - @VisibleForTesting - public volatile static TieredMetadataStore metadataStoreInstance; - private static String formatSize(long size, long divider, String unitName) { return DEC_FORMAT.format((double) size / divider) + unitName; } @@ -82,7 +58,7 @@ public static String toHumanReadable(long size) { return formatSize(size, MB, "MB"); if (size >= KB) return formatSize(size, KB, "KB"); - return formatSize(size, BYTE, "Bytes"); + return formatSize(size, BYTE, "B"); } public static String getHash(String str) { @@ -91,23 +67,27 @@ public static String getHash(String str) { md.update(str.getBytes(StandardCharsets.UTF_8)); byte[] digest = md.digest(); return String.format("%032x", new BigInteger(1, digest)).substring(0, 8); - } catch (Exception ignore) { - return ""; + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); } } + public static String toFilePath(MessageQueue mq) { + return String.format("%s/%s/%s", mq.getBrokerName(), mq.getTopic(), mq.getQueueId()); + } + + public static String getIndexFilePath(String brokerName) { + return toFilePath(new MessageQueue(RMQ_SYS_TIERED_STORE_INDEX_TOPIC, brokerName, 0)); + } + public static String offset2FileName(final long offset) { final NumberFormat numberFormat = NumberFormat.getInstance(); - numberFormat.setMinimumIntegerDigits(20); numberFormat.setMaximumFractionDigits(0); numberFormat.setGroupingUsed(false); - try { MessageDigest md = MessageDigest.getInstance("MD5"); - md.update(Long.toString(offset).getBytes(StandardCharsets.UTF_8)); - byte[] digest = md.digest(); String hash = String.format("%032x", new BigInteger(1, digest)).substring(0, 8); return hash + numberFormat.format(offset); @@ -119,52 +99,4 @@ public static String offset2FileName(final long offset) { public static long fileName2Offset(final String fileName) { return Long.parseLong(fileName.substring(fileName.length() - 20)); } - - public static void addSystemTopic(final String topic) { - SYSTEM_TOPIC_LIST.add(topic); - } - - public static boolean isSystemTopic(final String topic) { - if (StringUtils.isBlank(topic)) { - return false; - } - - if (SYSTEM_TOPIC_WHITE_LIST.contains(topic)) { - return false; - } - - if (SYSTEM_TOPIC_LIST.contains(topic)) { - return true; - } - return TopicValidator.isSystemTopic(topic); - } - - public static TieredMetadataStore getMetadataStore(TieredMessageStoreConfig storeConfig) { - if (storeConfig == null) { - return metadataStoreInstance; - } - - if (metadataStoreInstance == null) { - synchronized (TieredMetadataStore.class) { - if (metadataStoreInstance == null) { - try { - Class clazz = Class.forName( - storeConfig.getTieredMetadataServiceProvider()).asSubclass(TieredMetadataStore.class); - Constructor constructor = - clazz.getConstructor(TieredMessageStoreConfig.class); - metadataStoreInstance = constructor.newInstance(storeConfig); - } catch (Exception e) { - logger.error("TieredMetadataStore#getInstance: " + - "build metadata store failed, provider class: {}", - storeConfig.getTieredMetadataServiceProvider(), e); - } - } - } - } - return metadataStoreInstance; - } - - public static String toPath(MessageQueue mq) { - return mq.getBrokerName() + File.separator + mq.getTopic() + File.separator + mq.getQueueId(); - } } diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/TieredDispatcherTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/TieredDispatcherTest.java deleted file mode 100644 index 5791dc9a4e2..00000000000 --- a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/TieredDispatcherTest.java +++ /dev/null @@ -1,178 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.rocketmq.tieredstore; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.Objects; -import org.apache.rocketmq.common.message.MessageQueue; -import org.apache.rocketmq.store.ConsumeQueue; -import org.apache.rocketmq.store.DefaultMessageStore; -import org.apache.rocketmq.store.DispatchRequest; -import org.apache.rocketmq.store.SelectMappedBufferResult; -import org.apache.rocketmq.tieredstore.common.AppendResult; -import org.apache.rocketmq.tieredstore.common.FileSegmentType; -import org.apache.rocketmq.tieredstore.common.TieredMessageStoreConfig; -import org.apache.rocketmq.tieredstore.common.TieredStoreExecutor; -import org.apache.rocketmq.tieredstore.file.CompositeQueueFlatFile; -import org.apache.rocketmq.tieredstore.file.TieredConsumeQueue; -import org.apache.rocketmq.tieredstore.file.TieredFlatFileManager; -import org.apache.rocketmq.tieredstore.metadata.FileSegmentMetadata; -import org.apache.rocketmq.tieredstore.metadata.TieredMetadataStore; -import org.apache.rocketmq.tieredstore.provider.memory.MemoryFileSegment; -import org.apache.rocketmq.tieredstore.util.MessageBufferUtil; -import org.apache.rocketmq.tieredstore.util.MessageBufferUtilTest; -import org.apache.rocketmq.tieredstore.util.TieredStoreUtil; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mockito; - -public class TieredDispatcherTest { - - private final String storePath = TieredStoreTestUtil.getRandomStorePath(); - private TieredMessageStoreConfig storeConfig; - private MessageQueue mq; - private TieredMetadataStore metadataStore; - - @Before - public void setUp() { - storeConfig = new TieredMessageStoreConfig(); - storeConfig.setStorePathRootDir(storePath); - storeConfig.setTieredBackendServiceProvider("org.apache.rocketmq.tieredstore.provider.memory.MemoryFileSegmentWithoutCheck"); - storeConfig.setBrokerName(storeConfig.getBrokerName()); - mq = new MessageQueue("CompositeQueueFlatFileTest", storeConfig.getBrokerName(), 0); - metadataStore = TieredStoreUtil.getMetadataStore(storeConfig); - TieredStoreExecutor.init(); - } - - @After - public void tearDown() throws IOException { - TieredStoreTestUtil.destroyCompositeFlatFileManager(); - TieredStoreTestUtil.destroyMetadataStore(); - TieredStoreTestUtil.destroyTempDir(storePath); - TieredStoreExecutor.shutdown(); - } - - @Test - public void testDispatch() { - metadataStore.addQueue(mq, 6); - MemoryFileSegment segment = new MemoryFileSegment(FileSegmentType.COMMIT_LOG, mq, 1000, storeConfig); - segment.initPosition(segment.getSize()); - - String filePath1 = TieredStoreUtil.toPath(mq); - FileSegmentMetadata segmentMetadata = new FileSegmentMetadata( - filePath1, 1000, FileSegmentType.COMMIT_LOG.getType()); - metadataStore.updateFileSegment(segmentMetadata); - metadataStore.updateFileSegment(segmentMetadata); - - segment = new MemoryFileSegment(FileSegmentType.CONSUME_QUEUE, mq, - 6 * TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE, storeConfig); - FileSegmentMetadata segmentMetadata2 = new FileSegmentMetadata( - filePath1, segment.getBaseOffset(), FileSegmentType.CONSUME_QUEUE.getType()); - metadataStore.updateFileSegment(segmentMetadata2); - - TieredFlatFileManager flatFileManager = TieredFlatFileManager.getInstance(storeConfig); - DefaultMessageStore defaultMessageStore = Mockito.mock(DefaultMessageStore.class); - TieredDispatcher dispatcher = new TieredDispatcher(defaultMessageStore, storeConfig); - - SelectMappedBufferResult mockResult = new SelectMappedBufferResult(0, MessageBufferUtilTest.buildMockedMessageBuffer(), MessageBufferUtilTest.MSG_LEN, null); - Mockito.when(defaultMessageStore.selectOneMessageByOffset(7, MessageBufferUtilTest.MSG_LEN)).thenReturn(mockResult); - DispatchRequest request = new DispatchRequest(mq.getTopic(), mq.getQueueId(), 6, 7, MessageBufferUtilTest.MSG_LEN, 1); - dispatcher.dispatch(request); - Assert.assertNotNull(flatFileManager.getFlatFile(mq)); - Assert.assertEquals(7, Objects.requireNonNull(flatFileManager.getFlatFile(mq)).getDispatchOffset()); - - CompositeQueueFlatFile flatFile = flatFileManager.getOrCreateFlatFileIfAbsent(mq); - Assert.assertNotNull(flatFile); - flatFile.commit(true); - Assert.assertEquals(6, flatFile.getConsumeQueueMaxOffset()); - - dispatcher.buildConsumeQueueAndIndexFile(); - Assert.assertEquals(7, flatFile.getConsumeQueueMaxOffset()); - - ByteBuffer buffer1 = MessageBufferUtilTest.buildMockedMessageBuffer(); - buffer1.putLong(MessageBufferUtil.QUEUE_OFFSET_POSITION, 7); - flatFile.appendCommitLog(buffer1); - ByteBuffer buffer2 = MessageBufferUtilTest.buildMockedMessageBuffer(); - buffer2.putLong(MessageBufferUtil.QUEUE_OFFSET_POSITION, 8); - flatFile.appendCommitLog(buffer2); - ByteBuffer buffer3 = MessageBufferUtilTest.buildMockedMessageBuffer(); - buffer3.putLong(MessageBufferUtil.QUEUE_OFFSET_POSITION, 9); - flatFile.appendCommitLog(buffer3); - flatFile.commitCommitLog(); - Assert.assertEquals(9 + 1, flatFile.getDispatchOffset()); - Assert.assertEquals(9, flatFile.getCommitLogDispatchCommitOffset()); - - dispatcher.doRedispatchRequestToWriteMap(AppendResult.SUCCESS, flatFile, 8, 8, 0, 0, buffer1); - dispatcher.doRedispatchRequestToWriteMap(AppendResult.SUCCESS, flatFile, 9, 9, 0, 0, buffer2); - dispatcher.buildConsumeQueueAndIndexFile(); - Assert.assertEquals(7, flatFile.getConsumeQueueMaxOffset()); - - dispatcher.doRedispatchRequestToWriteMap(AppendResult.SUCCESS, flatFile, 7, 7, 0, 0, buffer1); - dispatcher.doRedispatchRequestToWriteMap(AppendResult.SUCCESS, flatFile, 8, 8, 0, 0, buffer2); - dispatcher.doRedispatchRequestToWriteMap(AppendResult.SUCCESS, flatFile, 9, 9, 0, 0, buffer3); - dispatcher.buildConsumeQueueAndIndexFile(); - Assert.assertEquals(6, flatFile.getConsumeQueueMinOffset()); - Assert.assertEquals(9 + 1, flatFile.getConsumeQueueMaxOffset()); - } - - @Test - public void testDispatchByFlatFile() { - metadataStore.addQueue(mq, 6); - TieredFlatFileManager flatFileManager = TieredFlatFileManager.getInstance(storeConfig); - DefaultMessageStore defaultStore = Mockito.mock(DefaultMessageStore.class); - Mockito.when(defaultStore.getConsumeQueue(mq.getTopic(), mq.getQueueId())).thenReturn(Mockito.mock(ConsumeQueue.class)); - TieredDispatcher dispatcher = new TieredDispatcher(defaultStore, storeConfig); - - Mockito.when(defaultStore.getMinOffsetInQueue(mq.getTopic(), mq.getQueueId())).thenReturn(0L); - Mockito.when(defaultStore.getMaxOffsetInQueue(mq.getTopic(), mq.getQueueId())).thenReturn(9L); - - // mock cq item, position = 7 - ByteBuffer cqItem = ByteBuffer.allocate(ConsumeQueue.CQ_STORE_UNIT_SIZE); - cqItem.putLong(7); - cqItem.putInt(MessageBufferUtilTest.MSG_LEN); - cqItem.putLong(1); - cqItem.flip(); - SelectMappedBufferResult mockResult = new SelectMappedBufferResult(0, cqItem, ConsumeQueue.CQ_STORE_UNIT_SIZE, null); - Mockito.when(((ConsumeQueue) defaultStore.getConsumeQueue(mq.getTopic(), mq.getQueueId())).getIndexBuffer(6)).thenReturn(mockResult); - - // mock cq item, position = 8 - cqItem = ByteBuffer.allocate(ConsumeQueue.CQ_STORE_UNIT_SIZE); - cqItem.putLong(8); - cqItem.putInt(MessageBufferUtilTest.MSG_LEN); - cqItem.putLong(1); - cqItem.flip(); - mockResult = new SelectMappedBufferResult(0, cqItem, ConsumeQueue.CQ_STORE_UNIT_SIZE, null); - Mockito.when(((ConsumeQueue) defaultStore.getConsumeQueue(mq.getTopic(), mq.getQueueId())).getIndexBuffer(7)).thenReturn(mockResult); - - mockResult = new SelectMappedBufferResult(0, MessageBufferUtilTest.buildMockedMessageBuffer(), MessageBufferUtilTest.MSG_LEN, null); - Mockito.when(defaultStore.selectOneMessageByOffset(7, MessageBufferUtilTest.MSG_LEN)).thenReturn(mockResult); - - ByteBuffer msg = MessageBufferUtilTest.buildMockedMessageBuffer(); - msg.putLong(MessageBufferUtil.QUEUE_OFFSET_POSITION, 7); - mockResult = new SelectMappedBufferResult(0, msg, MessageBufferUtilTest.MSG_LEN, null); - Mockito.when(defaultStore.selectOneMessageByOffset(8, MessageBufferUtilTest.MSG_LEN)).thenReturn(mockResult); - - CompositeQueueFlatFile flatFile = flatFileManager.getOrCreateFlatFileIfAbsent(mq); - Assert.assertNotNull(flatFile); - flatFile.initOffset(7); - dispatcher.dispatchFlatFile(flatFile); - Assert.assertEquals(8, flatFileManager.getFlatFile(mq).getDispatchOffset()); - } -} diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/TieredMessageFetcherTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/TieredMessageFetcherTest.java deleted file mode 100644 index 4e8287533f5..00000000000 --- a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/TieredMessageFetcherTest.java +++ /dev/null @@ -1,302 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.rocketmq.tieredstore; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Objects; -import java.util.concurrent.TimeUnit; -import org.apache.commons.lang3.SystemUtils; -import org.apache.commons.lang3.tuple.Triple; -import org.apache.rocketmq.common.BoundaryType; -import org.apache.rocketmq.common.message.MessageQueue; -import org.apache.rocketmq.store.DispatchRequest; -import org.apache.rocketmq.store.GetMessageResult; -import org.apache.rocketmq.store.GetMessageStatus; -import org.apache.rocketmq.store.QueryMessageResult; -import org.apache.rocketmq.store.SelectMappedBufferResult; -import org.apache.rocketmq.tieredstore.common.AppendResult; -import org.apache.rocketmq.tieredstore.common.SelectBufferResultWrapper; -import org.apache.rocketmq.tieredstore.common.TieredMessageStoreConfig; -import org.apache.rocketmq.tieredstore.common.TieredStoreExecutor; -import org.apache.rocketmq.tieredstore.file.CompositeFlatFile; -import org.apache.rocketmq.tieredstore.file.CompositeQueueFlatFile; -import org.apache.rocketmq.tieredstore.file.TieredFlatFileManager; -import org.apache.rocketmq.tieredstore.util.MessageBufferUtil; -import org.apache.rocketmq.tieredstore.util.MessageBufferUtilTest; -import org.apache.rocketmq.tieredstore.util.TieredStoreUtil; -import org.awaitility.Awaitility; -import org.junit.After; -import org.junit.Assert; -import org.junit.Assume; -import org.junit.Before; -import org.junit.Test; - -public class TieredMessageFetcherTest { - - private final String storePath = TieredStoreTestUtil.getRandomStorePath(); - private TieredMessageStoreConfig storeConfig; - private MessageQueue mq; - - @Before - public void setUp() { - storeConfig = new TieredMessageStoreConfig(); - storeConfig.setStorePathRootDir(storePath); - storeConfig.setBrokerName(storeConfig.getBrokerName()); - storeConfig.setReadAheadCacheExpireDuration(Long.MAX_VALUE); - storeConfig.setTieredBackendServiceProvider("org.apache.rocketmq.tieredstore.provider.memory.MemoryFileSegmentWithoutCheck"); - storeConfig.setTieredStoreIndexFileMaxHashSlotNum(2); - storeConfig.setTieredStoreIndexFileMaxIndexNum(3); - mq = new MessageQueue("TieredMessageFetcherTest", storeConfig.getBrokerName(), 0); - TieredStoreUtil.getMetadataStore(storeConfig); - TieredStoreExecutor.init(); - } - - @After - public void tearDown() throws IOException { - TieredStoreTestUtil.destroyCompositeFlatFileManager(); - TieredStoreTestUtil.destroyMetadataStore(); - TieredStoreTestUtil.destroyTempDir(storePath); - TieredStoreExecutor.shutdown(); - } - - public Triple buildFetcher() { - TieredFlatFileManager flatFileManager = TieredFlatFileManager.getInstance(storeConfig); - TieredMessageFetcher fetcher = new TieredMessageFetcher(storeConfig); - GetMessageResult getMessageResult = fetcher.getMessageAsync("group", mq.getTopic(), mq.getQueueId(), 0, 32, null).join(); - Assert.assertEquals(GetMessageStatus.NO_MATCHED_LOGIC_QUEUE, getMessageResult.getStatus()); - - CompositeFlatFile flatFile = flatFileManager.getOrCreateFlatFileIfAbsent(mq); - Assert.assertNotNull(flatFile); - flatFile.initOffset(0); - - getMessageResult = fetcher.getMessageAsync("group", mq.getTopic(), mq.getQueueId(), 0, 32, null).join(); - Assert.assertEquals(GetMessageStatus.NO_MESSAGE_IN_QUEUE, getMessageResult.getStatus()); - - ByteBuffer msg1 = MessageBufferUtilTest.buildMockedMessageBuffer(); - msg1.putLong(MessageBufferUtil.QUEUE_OFFSET_POSITION, 0); - msg1.putLong(MessageBufferUtil.PHYSICAL_OFFSET_POSITION, 0); - AppendResult result = flatFile.appendCommitLog(msg1); - Assert.assertEquals(AppendResult.SUCCESS, result); - - ByteBuffer msg2 = MessageBufferUtilTest.buildMockedMessageBuffer(); - msg2.putLong(MessageBufferUtil.QUEUE_OFFSET_POSITION, 1); - msg2.putLong(MessageBufferUtil.PHYSICAL_OFFSET_POSITION, MessageBufferUtilTest.MSG_LEN); - flatFile.appendCommitLog(msg2); - Assert.assertEquals(AppendResult.SUCCESS, result); - - result = flatFile.appendConsumeQueue(new DispatchRequest(mq.getTopic(), mq.getQueueId(), 0, 0, MessageBufferUtilTest.MSG_LEN, 0)); - Assert.assertEquals(AppendResult.SUCCESS, result); - result = flatFile.appendConsumeQueue(new DispatchRequest(mq.getTopic(), mq.getQueueId(), 1, MessageBufferUtilTest.MSG_LEN, MessageBufferUtilTest.MSG_LEN, 0)); - Assert.assertEquals(AppendResult.SUCCESS, result); - - flatFile.commit(true); - return Triple.of(fetcher, msg1, msg2); - } - - @Test - public void testGetMessageFromTieredStoreAsync() { - Triple triple = buildFetcher(); - TieredMessageFetcher fetcher = triple.getLeft(); - ByteBuffer msg1 = triple.getMiddle(); - ByteBuffer msg2 = triple.getRight(); - CompositeQueueFlatFile flatFile = TieredFlatFileManager.getInstance(storeConfig).getFlatFile(mq); - Assert.assertNotNull(flatFile); - - GetMessageResult getMessageResult = fetcher.getMessageFromTieredStoreAsync(flatFile, 0, 32).join(); - Assert.assertEquals(GetMessageStatus.FOUND, getMessageResult.getStatus()); - Assert.assertEquals(2, getMessageResult.getMessageBufferList().size()); - Assert.assertEquals(msg1, getMessageResult.getMessageBufferList().get(0)); - Assert.assertEquals(msg2, getMessageResult.getMessageBufferList().get(1)); - - AppendResult result = flatFile.appendConsumeQueue(new DispatchRequest(mq.getTopic(), mq.getQueueId(), 2, storeConfig.getReadAheadMessageSizeThreshold(), MessageBufferUtilTest.MSG_LEN, 0)); - Assert.assertEquals(AppendResult.SUCCESS, result); - flatFile.commit(true); - getMessageResult = fetcher.getMessageFromTieredStoreAsync(flatFile, 0, 32).join(); - Assert.assertEquals(GetMessageStatus.FOUND, getMessageResult.getStatus()); - Assert.assertEquals(2, getMessageResult.getMessageBufferList().size()); - } - - @Test - public void testGetMessageFromCacheAsync() { - Triple triple = buildFetcher(); - TieredMessageFetcher fetcher = triple.getLeft(); - ByteBuffer msg1 = triple.getMiddle(); - ByteBuffer msg2 = triple.getRight(); - CompositeQueueFlatFile flatFile = TieredFlatFileManager.getInstance(storeConfig).getFlatFile(mq); - Assert.assertNotNull(flatFile); - - fetcher.recordCacheAccess(flatFile, "prevent-invalid-cache", 0, new ArrayList<>()); - Assert.assertEquals(0, fetcher.getMessageCache().estimatedSize()); - SelectMappedBufferResult bufferResult = new SelectMappedBufferResult(0, msg1, msg1.remaining(), null); - fetcher.putMessageToCache(flatFile, new SelectBufferResultWrapper(bufferResult, 0, 0, false)); - Assert.assertEquals(1, fetcher.getMessageCache().estimatedSize()); - - GetMessageResult getMessageResult = fetcher.getMessageFromCacheAsync(flatFile, "group", 0, 32, true).join(); - Assert.assertEquals(GetMessageStatus.FOUND, getMessageResult.getStatus()); - Assert.assertEquals(1, getMessageResult.getMessageBufferList().size()); - Assert.assertEquals(msg1, getMessageResult.getMessageBufferList().get(0)); - - Awaitility.waitAtMost(3, TimeUnit.SECONDS) - .until(() -> fetcher.getMessageCache().estimatedSize() == 2); - ArrayList wrapperList = new ArrayList<>(); - wrapperList.add(fetcher.getMessageFromCache(flatFile, 0)); - fetcher.recordCacheAccess(flatFile, "prevent-invalid-cache", 0, wrapperList); - Assert.assertEquals(1, fetcher.getMessageCache().estimatedSize()); - wrapperList.clear(); - wrapperList.add(fetcher.getMessageFromCache(flatFile, 1)); - fetcher.recordCacheAccess(flatFile, "prevent-invalid-cache", 0, wrapperList); - Assert.assertEquals(1, fetcher.getMessageCache().estimatedSize()); - - SelectMappedBufferResult messageFromCache = - Objects.requireNonNull(fetcher.getMessageFromCache(flatFile, 1)).getDuplicateResult(); - fetcher.recordCacheAccess(flatFile, "group", 0, wrapperList); - Assert.assertNotNull(messageFromCache); - Assert.assertEquals(msg2, messageFromCache.getByteBuffer()); - Assert.assertEquals(0, fetcher.getMessageCache().estimatedSize()); - } - - @Test - public void testGetMessageAsync() { - Triple triple = buildFetcher(); - TieredMessageFetcher fetcher = triple.getLeft(); - ByteBuffer msg1 = triple.getMiddle(); - ByteBuffer msg2 = triple.getRight(); - - GetMessageResult getMessageResult = fetcher.getMessageAsync("group", mq.getTopic(), mq.getQueueId(), -1, 32, null).join(); - Assert.assertEquals(GetMessageStatus.OFFSET_TOO_SMALL, getMessageResult.getStatus()); - - getMessageResult = fetcher.getMessageAsync("group", mq.getTopic(), mq.getQueueId(), 2, 32, null).join(); - Assert.assertEquals(GetMessageStatus.OFFSET_OVERFLOW_ONE, getMessageResult.getStatus()); - - getMessageResult = fetcher.getMessageAsync("group", mq.getTopic(), mq.getQueueId(), 3, 32, null).join(); - Assert.assertEquals(GetMessageStatus.OFFSET_OVERFLOW_BADLY, getMessageResult.getStatus()); - - getMessageResult = fetcher.getMessageAsync("group", mq.getTopic(), mq.getQueueId(), 0, 32, null).join(); - Assert.assertEquals(GetMessageStatus.FOUND, getMessageResult.getStatus()); - Assert.assertEquals(2, getMessageResult.getMessageBufferList().size()); - Assert.assertEquals(msg1, getMessageResult.getMessageBufferList().get(0)); - Assert.assertEquals(msg2, getMessageResult.getMessageBufferList().get(1)); - } - - @Test - public void testGetMessageStoreTimeStampAsync() { - TieredMessageFetcher fetcher = new TieredMessageFetcher(storeConfig); - CompositeFlatFile flatFile = TieredFlatFileManager.getInstance(storeConfig).getOrCreateFlatFileIfAbsent(mq); - Assert.assertNotNull(flatFile); - flatFile.initOffset(0); - - ByteBuffer msg1 = MessageBufferUtilTest.buildMockedMessageBuffer(); - msg1.putLong(MessageBufferUtil.QUEUE_OFFSET_POSITION, 0); - msg1.putLong(MessageBufferUtil.PHYSICAL_OFFSET_POSITION, 0); - long currentTimeMillis1 = System.currentTimeMillis(); - msg1.putLong(MessageBufferUtil.STORE_TIMESTAMP_POSITION, currentTimeMillis1); - AppendResult result = flatFile.appendCommitLog(msg1); - Assert.assertEquals(AppendResult.SUCCESS, result); - - ByteBuffer msg2 = MessageBufferUtilTest.buildMockedMessageBuffer(); - msg2.putLong(MessageBufferUtil.QUEUE_OFFSET_POSITION, 1); - msg2.putLong(MessageBufferUtil.PHYSICAL_OFFSET_POSITION, MessageBufferUtilTest.MSG_LEN); - long currentTimeMillis2 = System.currentTimeMillis(); - msg2.putLong(MessageBufferUtil.STORE_TIMESTAMP_POSITION, currentTimeMillis2); - flatFile.appendCommitLog(msg2); - Assert.assertEquals(AppendResult.SUCCESS, result); - - result = flatFile.appendConsumeQueue(new DispatchRequest(mq.getTopic(), mq.getQueueId(), 0, 0, MessageBufferUtilTest.MSG_LEN, 0)); - Assert.assertEquals(AppendResult.SUCCESS, result); - result = flatFile.appendConsumeQueue(new DispatchRequest(mq.getTopic(), mq.getQueueId(), 1, MessageBufferUtilTest.MSG_LEN, MessageBufferUtilTest.MSG_LEN, 0)); - Assert.assertEquals(AppendResult.SUCCESS, result); - - flatFile.commit(true); - - long result1 = fetcher.getEarliestMessageTimeAsync(mq.getTopic(), mq.getQueueId()).join(); - long result2 = fetcher.getMessageStoreTimeStampAsync(mq.getTopic(), mq.getQueueId(), 0).join(); - Assert.assertEquals(result1, result2); - Assert.assertEquals(currentTimeMillis1, result1); - - long result3 = fetcher.getMessageStoreTimeStampAsync(mq.getTopic(), mq.getQueueId(), 1).join(); - Assert.assertEquals(currentTimeMillis2, result3); - } - - @Test - public void testGetOffsetInQueueByTime() { - TieredMessageFetcher fetcher = new TieredMessageFetcher(storeConfig); - Assert.assertEquals(-1, fetcher.getOffsetInQueueByTime(mq.getTopic(), mq.getQueueId(), 0, BoundaryType.LOWER)); - - CompositeQueueFlatFile flatFile = TieredFlatFileManager.getInstance(storeConfig).getOrCreateFlatFileIfAbsent(mq); - Assert.assertEquals(-1, fetcher.getOffsetInQueueByTime(mq.getTopic(), mq.getQueueId(), 0, BoundaryType.LOWER)); - Assert.assertNotNull(flatFile); - - // offset has not been initialized, so put message would be failed - AppendResult appendResult = flatFile.appendConsumeQueue(new DispatchRequest(mq.getTopic(), mq.getQueueId(), 50, 0, MessageBufferUtilTest.MSG_LEN, 0), true); - Assert.assertEquals(AppendResult.OFFSET_INCORRECT, appendResult); - flatFile.commit(true); - Assert.assertEquals(-1, fetcher.getOffsetInQueueByTime(mq.getTopic(), mq.getQueueId(), 0, BoundaryType.LOWER)); - - long timestamp = System.currentTimeMillis(); - ByteBuffer buffer = MessageBufferUtilTest.buildMockedMessageBuffer(); - buffer.putLong(MessageBufferUtil.QUEUE_OFFSET_POSITION, 50); - buffer.putLong(MessageBufferUtil.STORE_TIMESTAMP_POSITION, timestamp); - flatFile.initOffset(50); - flatFile.appendCommitLog(buffer, true); - appendResult = flatFile.appendConsumeQueue(new DispatchRequest(mq.getTopic(), mq.getQueueId(), 0, MessageBufferUtilTest.MSG_LEN, 0, timestamp, 50, "", "", 0, 0, null), true); - Assert.assertEquals(AppendResult.SUCCESS, appendResult); - flatFile.commit(true); - Assert.assertEquals(50, fetcher.getOffsetInQueueByTime(mq.getTopic(), mq.getQueueId(), 0, BoundaryType.LOWER)); - } - - @Test - public void testQueryMessageAsync() { - // skip this test on windows - Assume.assumeFalse(SystemUtils.IS_OS_WINDOWS); - Assume.assumeFalse(SystemUtils.IS_OS_LINUX); - - TieredMessageFetcher fetcher = new TieredMessageFetcher(storeConfig); - Assert.assertEquals(0, fetcher.queryMessageAsync(mq.getTopic(), "key", 32, 0, Long.MAX_VALUE).join().getMessageMapedList().size()); - - CompositeQueueFlatFile flatFile = TieredFlatFileManager.getInstance(storeConfig).getOrCreateFlatFileIfAbsent(mq); - Assert.assertEquals(0, fetcher.queryMessageAsync(mq.getTopic(), "key", 32, 0, Long.MAX_VALUE).join().getMessageMapedList().size()); - - Assert.assertNotNull(flatFile); - flatFile.initOffset(0); - ByteBuffer buffer = MessageBufferUtilTest.buildMockedMessageBuffer(); - buffer.putLong(MessageBufferUtil.QUEUE_OFFSET_POSITION, 0); - flatFile.appendCommitLog(buffer); - buffer = MessageBufferUtilTest.buildMockedMessageBuffer(); - buffer.putLong(MessageBufferUtil.QUEUE_OFFSET_POSITION, 1); - flatFile.appendCommitLog(buffer); - buffer = MessageBufferUtilTest.buildMockedMessageBuffer(); - buffer.putLong(MessageBufferUtil.QUEUE_OFFSET_POSITION, 2); - flatFile.appendCommitLog(buffer); - - long timestamp = System.currentTimeMillis(); - DispatchRequest request = new DispatchRequest(mq.getTopic(), mq.getQueueId(), 0, MessageBufferUtilTest.MSG_LEN, 0, timestamp, 0, "", "key", 0, 0, null); - flatFile.appendIndexFile(request); - request = new DispatchRequest(mq.getTopic(), mq.getQueueId(), MessageBufferUtilTest.MSG_LEN, MessageBufferUtilTest.MSG_LEN, 0, timestamp + 1, 0, "", "key", 0, 0, null); - flatFile.appendIndexFile(request); - request = new DispatchRequest(mq.getTopic(), mq.getQueueId(), MessageBufferUtilTest.MSG_LEN * 2, MessageBufferUtilTest.MSG_LEN, 0, timestamp + 2, 0, "", "another-key", 0, 0, null); - flatFile.appendIndexFile(request); - flatFile.commit(true); - Assert.assertEquals(1, fetcher.queryMessageAsync(mq.getTopic(), "key", 1, 0, Long.MAX_VALUE).join().getMessageMapedList().size()); - - QueryMessageResult result = fetcher.queryMessageAsync(mq.getTopic(), "key", 32, 0, Long.MAX_VALUE).join(); - Assert.assertEquals(2, result.getMessageMapedList().size()); - Assert.assertEquals(0, result.getMessageMapedList().get(0).getByteBuffer().getLong(MessageBufferUtil.QUEUE_OFFSET_POSITION)); - Assert.assertEquals(1, result.getMessageMapedList().get(1).getByteBuffer().getLong(MessageBufferUtil.QUEUE_OFFSET_POSITION)); - } -} diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/TieredMessageStoreTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/TieredMessageStoreTest.java index 07af1fc8b1f..2f395584829 100644 --- a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/TieredMessageStoreTest.java +++ b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/TieredMessageStoreTest.java @@ -18,30 +18,40 @@ import io.opentelemetry.api.common.Attributes; import io.opentelemetry.sdk.OpenTelemetrySdk; +import java.io.File; import java.io.IOException; import java.lang.reflect.Field; +import java.nio.ByteBuffer; +import java.util.HashMap; import java.util.HashSet; +import java.util.Locale; import java.util.Properties; import java.util.Set; import java.util.concurrent.CompletableFuture; +import org.apache.rocketmq.common.BoundaryType; import org.apache.rocketmq.common.BrokerConfig; import org.apache.rocketmq.common.message.MessageQueue; +import org.apache.rocketmq.common.topic.TopicValidator; import org.apache.rocketmq.logging.org.slf4j.LoggerFactory; import org.apache.rocketmq.remoting.Configuration; -import org.apache.rocketmq.store.CommitLog; import org.apache.rocketmq.store.DefaultMessageStore; +import org.apache.rocketmq.store.DispatchRequest; import org.apache.rocketmq.store.GetMessageResult; import org.apache.rocketmq.store.GetMessageStatus; -import org.apache.rocketmq.store.MessageStore; import org.apache.rocketmq.store.QueryMessageResult; import org.apache.rocketmq.store.SelectMappedBufferResult; import org.apache.rocketmq.store.config.MessageStoreConfig; import org.apache.rocketmq.store.plugin.MessageStorePluginContext; -import org.apache.rocketmq.tieredstore.common.TieredStoreExecutor; -import org.apache.rocketmq.tieredstore.file.CompositeQueueFlatFile; -import org.apache.rocketmq.tieredstore.file.TieredFlatFileManager; -import org.apache.rocketmq.tieredstore.util.TieredStoreUtil; -import org.apache.rocketmq.common.BoundaryType; +import org.apache.rocketmq.store.queue.ConsumeQueueInterface; +import org.apache.rocketmq.store.queue.CqUnit; +import org.apache.rocketmq.tieredstore.core.MessageStoreFetcher; +import org.apache.rocketmq.tieredstore.file.FlatFileStore; +import org.apache.rocketmq.tieredstore.file.FlatMessageFile; +import org.apache.rocketmq.tieredstore.provider.PosixFileSegment; +import org.apache.rocketmq.tieredstore.util.MessageFormatUtil; +import org.apache.rocketmq.tieredstore.util.MessageFormatUtilTest; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtil; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtilTest; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -53,186 +63,231 @@ import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class TieredMessageStoreTest { - private final String storePath = TieredStoreTestUtil.getRandomStorePath(); + private final String brokerName = "brokerName"; + private final String storePath = MessageStoreUtilTest.getRandomStorePath(); + private final MessageQueue mq = new MessageQueue("MessageStoreTest", brokerName, 0); - private MessageStoreConfig storeConfig; - private MessageQueue mq; - private MessageStore nextStore; - private TieredMessageStore store; - private TieredMessageFetcher fetcher; private Configuration configuration; - private TieredFlatFileManager flatFileManager; + private DefaultMessageStore defaultStore; + private TieredMessageStore currentStore; + private FlatFileStore flatFileStore; + private MessageStoreFetcher fetcher; @Before - public void setUp() { - storeConfig = new MessageStoreConfig(); - storeConfig.setStorePathRootDir(storePath); - mq = new MessageQueue("TieredMessageStoreTest", "broker", 0); - - nextStore = Mockito.mock(DefaultMessageStore.class); - CommitLog commitLog = mock(CommitLog.class); - when(commitLog.getMinOffset()).thenReturn(100L); - when(nextStore.getCommitLog()).thenReturn(commitLog); - + public void init() throws Exception { BrokerConfig brokerConfig = new BrokerConfig(); - brokerConfig.setBrokerName("broker"); - configuration = new Configuration(LoggerFactory.getLogger(TieredStoreUtil.TIERED_STORE_LOGGER_NAME), "/tmp/rmqut/config", storeConfig, brokerConfig); + brokerConfig.setBrokerName(brokerName); + Properties properties = new Properties(); - properties.setProperty("tieredBackendServiceProvider", "org.apache.rocketmq.tieredstore.provider.memory.MemoryFileSegment"); + properties.setProperty("recordGetMessageResult", Boolean.TRUE.toString().toLowerCase(Locale.ROOT)); + properties.setProperty("tieredBackendServiceProvider", PosixFileSegment.class.getName()); + + configuration = new Configuration(LoggerFactory.getLogger( + MessageStoreUtil.TIERED_STORE_LOGGER_NAME), storePath + File.separator + "conf", + new org.apache.rocketmq.tieredstore.MessageStoreConfig(), brokerConfig); configuration.registerConfig(properties); - MessageStorePluginContext context = new MessageStorePluginContext(new MessageStoreConfig(), null, null, brokerConfig, configuration); - store = new TieredMessageStore(context, nextStore); + MessageStorePluginContext context = new MessageStorePluginContext( + new MessageStoreConfig(), null, null, brokerConfig, configuration); + + defaultStore = Mockito.mock(DefaultMessageStore.class); + Mockito.when(defaultStore.load()).thenReturn(true); + + currentStore = new TieredMessageStore(context, defaultStore); + Assert.assertNotNull(currentStore.getStoreConfig()); + Assert.assertNotNull(currentStore.getBrokerName()); + Assert.assertEquals(defaultStore, currentStore.getDefaultStore()); + Assert.assertNotNull(currentStore.getMetadataStore()); + Assert.assertNotNull(currentStore.getTopicFilter()); + Assert.assertNotNull(currentStore.getStoreExecutor()); + Assert.assertNotNull(currentStore.getFlatFileStore()); + Assert.assertNotNull(currentStore.getIndexService()); - fetcher = Mockito.mock(TieredMessageFetcher.class); + fetcher = Mockito.spy(currentStore.fetcher); try { - Field field = store.getClass().getDeclaredField("fetcher"); + Field field = currentStore.getClass().getDeclaredField("fetcher"); field.setAccessible(true); - field.set(store, fetcher); + field.set(currentStore, fetcher); } catch (NoSuchFieldException | IllegalAccessException e) { Assert.fail(e.getClass().getCanonicalName() + ": " + e.getMessage()); } - TieredFlatFileManager.getInstance(store.getStoreConfig()).getOrCreateFlatFileIfAbsent(mq); + flatFileStore = currentStore.getFlatFileStore(); + + Mockito.when(defaultStore.getMinOffsetInQueue(anyString(), anyInt())).thenReturn(100L); + Mockito.when(defaultStore.getMaxOffsetInQueue(anyString(), anyInt())).thenReturn(200L); + ConsumeQueueInterface cq = Mockito.mock(ConsumeQueueInterface.class); + Mockito.when(defaultStore.getConsumeQueue(anyString(), anyInt())).thenReturn(cq); + + ByteBuffer buffer = MessageFormatUtilTest.buildMockedMessageBuffer(); + Mockito.when(cq.get(anyLong())).thenReturn( + new CqUnit(100, 1000, buffer.remaining(), 0L)); + Mockito.when(defaultStore.selectOneMessageByOffset(anyLong(), anyInt())).thenReturn( + new SelectMappedBufferResult(0L, buffer.asReadOnlyBuffer(), buffer.remaining(), null)); + currentStore.load(); + + FlatMessageFile flatFile = currentStore.getFlatFileStore().computeIfAbsent(mq); + Assert.assertNotNull(flatFile); + currentStore.dispatcher.doScheduleDispatch(flatFile, true).join(); + + for (int i = 100; i < 200; i++) { + SelectMappedBufferResult bufferResult = new SelectMappedBufferResult( + 0L, buffer, buffer.remaining(), null); + DispatchRequest request = new DispatchRequest(mq.getTopic(), mq.getQueueId(), + MessageFormatUtil.getCommitLogOffset(buffer), buffer.remaining(), 0L, + MessageFormatUtil.getStoreTimeStamp(buffer), 0L, + "", "", 0, 0L, new HashMap<>()); + flatFile.appendCommitLog(bufferResult); + flatFile.appendConsumeQueue(request); + } + currentStore.dispatcher.doScheduleDispatch(flatFile, true).join(); } @After - public void tearDown() throws IOException { - TieredStoreExecutor.shutdown(); - TieredStoreTestUtil.destroyCompositeFlatFileManager(); - TieredStoreTestUtil.destroyMetadataStore(); - TieredStoreTestUtil.destroyTempDir(storePath); - } - - private void mockCompositeFlatFile() { - flatFileManager = Mockito.mock(TieredFlatFileManager.class); - CompositeQueueFlatFile flatFile = Mockito.mock(CompositeQueueFlatFile.class); - when(flatFile.getConsumeQueueCommitOffset()).thenReturn(Long.MAX_VALUE); - when(flatFileManager.getFlatFile(mq)).thenReturn(flatFile); - try { - Field field = store.getClass().getDeclaredField("flatFileManager"); - field.setAccessible(true); - field.set(store, flatFileManager); - } catch (NoSuchFieldException | IllegalAccessException e) { - Assert.fail(e.getClass().getCanonicalName() + ": " + e.getMessage()); - } + public void shutdown() throws IOException { + currentStore.shutdown(); + currentStore.destroy(); + MessageStoreUtilTest.deleteStoreDirectory(storePath); } @Test public void testViaTieredStorage() { - mockCompositeFlatFile(); Properties properties = new Properties(); + // TieredStorageLevel.DISABLE properties.setProperty("tieredStorageLevel", "0"); configuration.update(properties); - Assert.assertFalse(store.fetchFromCurrentStore(mq.getTopic(), mq.getQueueId(), 0)); + Assert.assertFalse(currentStore.fetchFromCurrentStore(mq.getTopic(), mq.getQueueId(), 0)); // TieredStorageLevel.NOT_IN_DISK properties.setProperty("tieredStorageLevel", "1"); configuration.update(properties); - when(nextStore.checkInStoreByConsumeOffset(anyString(), anyInt(), anyLong())).thenReturn(false); - Assert.assertTrue(store.fetchFromCurrentStore(mq.getTopic(), mq.getQueueId(), 0)); + when(defaultStore.checkInStoreByConsumeOffset(anyString(), anyInt(), anyLong())).thenReturn(false); + Assert.assertTrue(currentStore.fetchFromCurrentStore(mq.getTopic(), mq.getQueueId(), 0)); - when(nextStore.checkInStoreByConsumeOffset(anyString(), anyInt(), anyLong())).thenReturn(true); - Assert.assertFalse(store.fetchFromCurrentStore(mq.getTopic(), mq.getQueueId(), 0)); + when(defaultStore.checkInStoreByConsumeOffset(anyString(), anyInt(), anyLong())).thenReturn(true); + Assert.assertFalse(currentStore.fetchFromCurrentStore(mq.getTopic(), mq.getQueueId(), 0)); // TieredStorageLevel.NOT_IN_MEM properties.setProperty("tieredStorageLevel", "2"); configuration.update(properties); - Mockito.when(nextStore.checkInStoreByConsumeOffset(anyString(), anyInt(), anyLong())).thenReturn(false); - Mockito.when(nextStore.checkInMemByConsumeOffset(anyString(), anyInt(), anyLong(), anyInt())).thenReturn(true); - Assert.assertTrue(store.fetchFromCurrentStore(mq.getTopic(), mq.getQueueId(), 0)); + Mockito.when(defaultStore.checkInStoreByConsumeOffset(anyString(), anyInt(), anyLong())).thenReturn(false); + Mockito.when(defaultStore.checkInMemByConsumeOffset(anyString(), anyInt(), anyLong(), anyInt())).thenReturn(true); + Assert.assertTrue(currentStore.fetchFromCurrentStore(mq.getTopic(), mq.getQueueId(), 0)); - Mockito.when(nextStore.checkInStoreByConsumeOffset(anyString(), anyInt(), anyLong())).thenReturn(true); - Mockito.when(nextStore.checkInMemByConsumeOffset(anyString(), anyInt(), anyLong(), anyInt())).thenReturn(false); - Assert.assertTrue(store.fetchFromCurrentStore(mq.getTopic(), mq.getQueueId(), 0)); + Mockito.when(defaultStore.checkInStoreByConsumeOffset(anyString(), anyInt(), anyLong())).thenReturn(true); + Mockito.when(defaultStore.checkInMemByConsumeOffset(anyString(), anyInt(), anyLong(), anyInt())).thenReturn(false); + Assert.assertTrue(currentStore.fetchFromCurrentStore(mq.getTopic(), mq.getQueueId(), 0)); - Mockito.when(nextStore.checkInStoreByConsumeOffset(anyString(), anyInt(), anyLong())).thenReturn(true); - Mockito.when(nextStore.checkInMemByConsumeOffset(anyString(), anyInt(), anyLong(), anyInt())).thenReturn(true); - Assert.assertFalse(store.fetchFromCurrentStore(mq.getTopic(), mq.getQueueId(), 0)); + Mockito.when(defaultStore.checkInStoreByConsumeOffset(anyString(), anyInt(), anyLong())).thenReturn(true); + Mockito.when(defaultStore.checkInMemByConsumeOffset(anyString(), anyInt(), anyLong(), anyInt())).thenReturn(true); + Assert.assertFalse(currentStore.fetchFromCurrentStore(mq.getTopic(), mq.getQueueId(), 0)); // TieredStorageLevel.FORCE properties.setProperty("tieredStorageLevel", "3"); configuration.update(properties); - Assert.assertTrue(store.fetchFromCurrentStore(mq.getTopic(), mq.getQueueId(), 0)); + Assert.assertTrue(currentStore.fetchFromCurrentStore(mq.getTopic(), mq.getQueueId(), 0)); } @Test public void testGetMessageAsync() { - mockCompositeFlatFile(); - GetMessageResult result1 = new GetMessageResult(); - result1.setStatus(GetMessageStatus.FOUND); - GetMessageResult result2 = new GetMessageResult(); - result2.setStatus(GetMessageStatus.OFFSET_OVERFLOW_BADLY); - - when(fetcher.getMessageAsync(anyString(), anyString(), anyInt(), anyLong(), anyInt(), any())).thenReturn(CompletableFuture.completedFuture(result1)); - when(nextStore.getMessage(anyString(), anyString(), anyInt(), anyLong(), anyInt(), any())).thenReturn(result2); - Assert.assertSame(result1, store.getMessage("group", mq.getTopic(), mq.getQueueId(), 0, 0, null)); - - result1.setStatus(GetMessageStatus.NO_MATCHED_LOGIC_QUEUE); - Assert.assertSame(result1, store.getMessage("group", mq.getTopic(), mq.getQueueId(), 0, 0, null)); - - result1.setStatus(GetMessageStatus.OFFSET_OVERFLOW_ONE); - Assert.assertSame(result1, store.getMessage("group", mq.getTopic(), mq.getQueueId(), 0, 0, null)); + GetMessageResult expect = new GetMessageResult(); + expect.setStatus(GetMessageStatus.FOUND); + expect.setMinOffset(100L); + expect.setMaxOffset(200L); + + // topic filter + Mockito.when(defaultStore.getMessageAsync(anyString(), anyString(), anyInt(), anyLong(), anyInt(), any())) + .thenReturn(CompletableFuture.completedFuture(expect)); + String groupName = "groupName"; + GetMessageResult result = currentStore.getMessage( + groupName, TopicValidator.SYSTEM_TOPIC_PREFIX, mq.getQueueId(), 100, 0, null); + Assert.assertSame(expect, result); + + // fetch from default + Mockito.when(fetcher.getMessageAsync(anyString(), anyString(), anyInt(), anyLong(), anyInt(), any())) + .thenReturn(CompletableFuture.completedFuture(expect)); + + result = currentStore.getMessage( + groupName, mq.getTopic(), mq.getQueueId(), 100, 0, null); + Assert.assertSame(expect, result); + + expect.setStatus(GetMessageStatus.NO_MATCHED_LOGIC_QUEUE); + Assert.assertSame(expect, currentStore.getMessage( + groupName, mq.getTopic(), mq.getQueueId(), 0, 0, null)); + + expect.setStatus(GetMessageStatus.OFFSET_OVERFLOW_ONE); + Assert.assertSame(expect, currentStore.getMessage( + groupName, mq.getTopic(), mq.getQueueId(), 0, 0, null)); + + expect.setStatus(GetMessageStatus.OFFSET_OVERFLOW_BADLY); + Assert.assertSame(expect, currentStore.getMessage( + groupName, mq.getTopic(), mq.getQueueId(), 0, 0, null)); + + expect.setStatus(GetMessageStatus.OFFSET_RESET); + Assert.assertSame(expect, currentStore.getMessage( + groupName, mq.getTopic(), mq.getQueueId(), 0, 0, null)); + } - result1.setStatus(GetMessageStatus.OFFSET_OVERFLOW_BADLY); - Assert.assertSame(result1, store.getMessage("group", mq.getTopic(), mq.getQueueId(), 0, 0, null)); + @Test + public void testGetMinOffsetInQueue() { + FlatMessageFile flatFile = flatFileStore.getFlatFile(mq); + Mockito.when(defaultStore.getMinOffsetInQueue(anyString(), anyInt())).thenReturn(100L); + Assert.assertEquals(100L, currentStore.getMinOffsetInQueue(mq.getTopic(), mq.getQueueId())); - // TieredStorageLevel.FORCE - Properties properties = new Properties(); - properties.setProperty("tieredStorageLevel", "3"); - configuration.update(properties); - when(nextStore.checkInStoreByConsumeOffset(anyString(), anyInt(), anyLong())).thenReturn(true); - Assert.assertEquals(result2.getStatus(), - store.getMessage("group", mq.getTopic(), mq.getQueueId(), 0, 0, null).getStatus()); + Mockito.when(flatFile.getConsumeQueueMinOffset()).thenReturn(10L); + Assert.assertEquals(10L, currentStore.getMinOffsetInQueue(mq.getTopic(), mq.getQueueId())); } @Test public void testGetEarliestMessageTimeAsync() { when(fetcher.getEarliestMessageTimeAsync(anyString(), anyInt())).thenReturn(CompletableFuture.completedFuture(1L)); - Assert.assertEquals(1, (long) store.getEarliestMessageTimeAsync(mq.getTopic(), mq.getQueueId()).join()); + Assert.assertEquals(1, (long) currentStore.getEarliestMessageTimeAsync(mq.getTopic(), mq.getQueueId()).join()); when(fetcher.getEarliestMessageTimeAsync(anyString(), anyInt())).thenReturn(CompletableFuture.completedFuture(-1L)); - when(nextStore.getEarliestMessageTime(anyString(), anyInt())).thenReturn(2L); - Assert.assertEquals(2, (long) store.getEarliestMessageTimeAsync(mq.getTopic(), mq.getQueueId()).join()); + when(defaultStore.getEarliestMessageTime(anyString(), anyInt())).thenReturn(2L); + Assert.assertEquals(2, (long) currentStore.getEarliestMessageTimeAsync(mq.getTopic(), mq.getQueueId()).join()); } @Test public void testGetMessageStoreTimeStampAsync() { - mockCompositeFlatFile(); // TieredStorageLevel.DISABLE Properties properties = new Properties(); properties.setProperty("tieredStorageLevel", "DISABLE"); configuration.update(properties); when(fetcher.getMessageStoreTimeStampAsync(anyString(), anyInt(), anyLong())).thenReturn(CompletableFuture.completedFuture(1L)); - when(nextStore.getMessageStoreTimeStampAsync(anyString(), anyInt(), anyLong())).thenReturn(CompletableFuture.completedFuture(2L)); - when(nextStore.getMessageStoreTimeStamp(anyString(), anyInt(), anyLong())).thenReturn(3L); - Assert.assertEquals(2, (long) store.getMessageStoreTimeStampAsync(mq.getTopic(), mq.getQueueId(), 0).join()); + when(defaultStore.getMessageStoreTimeStampAsync(anyString(), anyInt(), anyLong())).thenReturn(CompletableFuture.completedFuture(2L)); + when(defaultStore.getMessageStoreTimeStamp(anyString(), anyInt(), anyLong())).thenReturn(3L); + Assert.assertEquals(2, (long) currentStore.getMessageStoreTimeStampAsync(mq.getTopic(), mq.getQueueId(), 0).join()); // TieredStorageLevel.FORCE properties.setProperty("tieredStorageLevel", "FORCE"); configuration.update(properties); - Assert.assertEquals(1, (long) store.getMessageStoreTimeStampAsync(mq.getTopic(), mq.getQueueId(), 0).join()); + Assert.assertEquals(1, (long) currentStore.getMessageStoreTimeStampAsync(mq.getTopic(), mq.getQueueId(), 0).join()); Mockito.when(fetcher.getMessageStoreTimeStampAsync(anyString(), anyInt(), anyLong())).thenReturn(CompletableFuture.completedFuture(-1L)); - Assert.assertEquals(3, (long) store.getMessageStoreTimeStampAsync(mq.getTopic(), mq.getQueueId(), 0).join()); + Assert.assertEquals(3, (long) currentStore.getMessageStoreTimeStampAsync(mq.getTopic(), mq.getQueueId(), 0).join()); } @Test public void testGetOffsetInQueueByTime() { + Properties properties = new Properties(); + properties.setProperty("tieredStorageLevel", "FORCE"); + configuration.update(properties); + Mockito.when(fetcher.getOffsetInQueueByTime(anyString(), anyInt(), anyLong(), eq(BoundaryType.LOWER))).thenReturn(1L); - Mockito.when(nextStore.getOffsetInQueueByTime(anyString(), anyInt(), anyLong())).thenReturn(2L); - Mockito.when(nextStore.getEarliestMessageTime()).thenReturn(100L); - Assert.assertEquals(1, store.getOffsetInQueueByTime(mq.getTopic(), mq.getQueueId(), 0, BoundaryType.LOWER)); - Assert.assertEquals(2, store.getOffsetInQueueByTime(mq.getTopic(), mq.getQueueId(), 1000, BoundaryType.LOWER)); + Mockito.when(defaultStore.getOffsetInQueueByTime(anyString(), anyInt(), anyLong())).thenReturn(2L); + Mockito.when(defaultStore.getEarliestMessageTime()).thenReturn(100L); + Assert.assertEquals(1L, currentStore.getOffsetInQueueByTime(mq.getTopic(), mq.getQueueId(), 1000, BoundaryType.LOWER)); + Assert.assertEquals(1L, currentStore.getOffsetInQueueByTime(mq.getTopic(), mq.getQueueId(), 0, BoundaryType.LOWER)); Mockito.when(fetcher.getOffsetInQueueByTime(anyString(), anyInt(), anyLong(), eq(BoundaryType.LOWER))).thenReturn(-1L); - Assert.assertEquals(2, store.getOffsetInQueueByTime(mq.getTopic(), mq.getQueueId(), 0, BoundaryType.LOWER)); + Assert.assertEquals(-1L, currentStore.getOffsetInQueueByTime(mq.getTopic(), mq.getQueueId(), 0)); + Assert.assertEquals(-1L, currentStore.getOffsetInQueueByTime(mq.getTopic(), mq.getQueueId(), 0, BoundaryType.LOWER)); } @Test @@ -243,55 +298,36 @@ public void testQueryMessage() { when(fetcher.queryMessageAsync(anyString(), anyString(), anyInt(), anyLong(), anyLong())).thenReturn(CompletableFuture.completedFuture(result1)); QueryMessageResult result2 = new QueryMessageResult(); result2.addMessage(new SelectMappedBufferResult(0, null, 0, null)); - when(nextStore.queryMessage(anyString(), anyString(), anyInt(), anyLong(), anyLong())).thenReturn(result2); - when(nextStore.getEarliestMessageTime()).thenReturn(100L); - Assert.assertEquals(2, store.queryMessage(mq.getTopic(), "key", 32, 0, 99).getMessageMapedList().size()); - Assert.assertEquals(1, store.queryMessage(mq.getTopic(), "key", 32, 100, 200).getMessageMapedList().size()); - Assert.assertEquals(3, store.queryMessage(mq.getTopic(), "key", 32, 0, 200).getMessageMapedList().size()); - } - - @Test - public void testGetMinOffsetInQueue() { - mockCompositeFlatFile(); - CompositeQueueFlatFile flatFile = flatFileManager.getFlatFile(mq); - when(nextStore.getMinOffsetInQueue(anyString(), anyInt())).thenReturn(100L); - when(flatFileManager.getFlatFile(mq)).thenReturn(null); - Assert.assertEquals(100L, store.getMinOffsetInQueue(mq.getTopic(), mq.getQueueId())); - - when(flatFileManager.getFlatFile(mq)).thenReturn(flatFile); - when(flatFile.getConsumeQueueMinOffset()).thenReturn(10L); - Assert.assertEquals(10L, store.getMinOffsetInQueue(mq.getTopic(), mq.getQueueId())); + when(defaultStore.queryMessage(anyString(), anyString(), anyInt(), anyLong(), anyLong())).thenReturn(result2); + when(defaultStore.getEarliestMessageTime()).thenReturn(100L); + Assert.assertEquals(2, currentStore.queryMessage(mq.getTopic(), "key", 32, 0, 99).getMessageMapedList().size()); + Assert.assertEquals(1, currentStore.queryMessage(mq.getTopic(), "key", 32, 100, 200).getMessageMapedList().size()); + Assert.assertEquals(3, currentStore.queryMessage(mq.getTopic(), "key", 32, 0, 200).getMessageMapedList().size()); } @Test public void testCleanUnusedTopics() { Set topicSet = new HashSet<>(); - store.cleanUnusedTopic(topicSet); - Assert.assertNull(TieredFlatFileManager.getInstance(store.getStoreConfig()).getFlatFile(mq)); - Assert.assertNull(TieredStoreUtil.getMetadataStore(store.getStoreConfig()).getTopic(mq.getTopic())); - Assert.assertNull(TieredStoreUtil.getMetadataStore(store.getStoreConfig()).getQueue(mq)); + currentStore.cleanUnusedTopic(topicSet); + Assert.assertNull(flatFileStore.getFlatFile(mq)); + Assert.assertNull(flatFileStore.getMetadataStore().getTopic(mq.getTopic())); + Assert.assertNull(flatFileStore.getMetadataStore().getQueue(mq)); } @Test public void testDeleteTopics() { Set topicSet = new HashSet<>(); topicSet.add(mq.getTopic()); - store.deleteTopics(topicSet); - Assert.assertNull(TieredFlatFileManager.getInstance(store.getStoreConfig()).getFlatFile(mq)); - Assert.assertNull(TieredStoreUtil.getMetadataStore(store.getStoreConfig()).getTopic(mq.getTopic())); - Assert.assertNull(TieredStoreUtil.getMetadataStore(store.getStoreConfig()).getQueue(mq)); + currentStore.deleteTopics(topicSet); + Assert.assertNull(flatFileStore.getFlatFile(mq)); + Assert.assertNull(flatFileStore.getMetadataStore().getTopic(mq.getTopic())); + Assert.assertNull(flatFileStore.getMetadataStore().getQueue(mq)); } @Test public void testMetrics() { - store.getMetricsView(); - store.initMetrics(OpenTelemetrySdk.builder().build().getMeter(""), - Attributes::builder); - } - - @Test - public void testShutdownAndDestroy() { - store.shutdown(); - store.destroy(); + currentStore.getMetricsView(); + currentStore.initMetrics( + OpenTelemetrySdk.builder().build().getMeter(""), Attributes::builder); } } diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/TieredStoreTestUtil.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/TieredStoreTestUtil.java deleted file mode 100644 index fb11b60f05a..00000000000 --- a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/TieredStoreTestUtil.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.rocketmq.tieredstore; - -import java.io.File; -import java.lang.reflect.Field; -import java.util.UUID; -import org.apache.commons.io.FileUtils; -import org.apache.rocketmq.tieredstore.file.TieredFlatFileManager; -import org.apache.rocketmq.tieredstore.metadata.TieredMetadataStore; -import org.apache.rocketmq.tieredstore.util.TieredStoreUtil; -import org.junit.Assert; - -public class TieredStoreTestUtil { - - public static String getRandomStorePath() { - return FileUtils.getTempDirectory() + File.separator + "unit_test_tiered_store" + UUID.randomUUID(); - } - - public static void destroyMetadataStore() { - TieredMetadataStore metadataStore = TieredStoreUtil.getMetadataStore(null); - if (metadataStore != null) { - metadataStore.destroy(); - } - try { - Field field = TieredStoreUtil.class.getDeclaredField("metadataStoreInstance"); - field.setAccessible(true); - field.set(null, null); - } catch (NoSuchFieldException | IllegalAccessException e) { - Assert.fail(e.getClass().getCanonicalName() + ": " + e.getMessage()); - } - } - - public static void destroyCompositeFlatFileManager() { - TieredFlatFileManager flatFileManagerManager = TieredFlatFileManager.getInstance(null); - if (flatFileManagerManager != null) { - flatFileManagerManager.destroy(); - } - try { - Field field = TieredFlatFileManager.class.getDeclaredField("instance"); - field.setAccessible(true); - field.set(null, null); - } catch (NoSuchFieldException | IllegalAccessException e) { - Assert.fail(e.getClass().getCanonicalName() + ": " + e.getMessage()); - } - } - - public static void destroyTempDir(String storePath) { - try { - FileUtils.deleteDirectory(new File(storePath)); - } catch (Exception ignore) { - } - } -} diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/util/CQItemBufferUtilTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/common/FileSegmentTypeTest.java similarity index 51% rename from tieredstore/src/test/java/org/apache/rocketmq/tieredstore/util/CQItemBufferUtilTest.java rename to tieredstore/src/test/java/org/apache/rocketmq/tieredstore/common/FileSegmentTypeTest.java index 7f8caea2053..28439e06e6f 100644 --- a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/util/CQItemBufferUtilTest.java +++ b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/common/FileSegmentTypeTest.java @@ -14,38 +14,25 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.rocketmq.tieredstore.util; +package org.apache.rocketmq.tieredstore.common; -import java.nio.ByteBuffer; -import org.apache.rocketmq.store.ConsumeQueue; -import org.junit.Assert; -import org.junit.BeforeClass; import org.junit.Test; -public class CQItemBufferUtilTest { - private static ByteBuffer cqItem; +import static org.junit.Assert.assertEquals; - @BeforeClass - public static void setUp() { - cqItem = ByteBuffer.allocate(ConsumeQueue.CQ_STORE_UNIT_SIZE); - cqItem.putLong(1); - cqItem.putInt(2); - cqItem.putLong(3); - cqItem.flip(); - } - - @Test - public void testGetCommitLogOffset() { - Assert.assertEquals(1, CQItemBufferUtil.getCommitLogOffset(cqItem)); - } +public class FileSegmentTypeTest { @Test - public void testGetSize() { - Assert.assertEquals(2, CQItemBufferUtil.getSize(cqItem)); + public void getTypeCodeTest() { + assertEquals(0, FileSegmentType.COMMIT_LOG.getCode()); + assertEquals(1, FileSegmentType.CONSUME_QUEUE.getCode()); + assertEquals(2, FileSegmentType.INDEX.getCode()); } @Test - public void testGetTagCode() { - Assert.assertEquals(3, CQItemBufferUtil.getTagCode(cqItem)); + public void getTypeFromValueTest() { + assertEquals(FileSegmentType.COMMIT_LOG, FileSegmentType.valueOf(0)); + assertEquals(FileSegmentType.CONSUME_QUEUE, FileSegmentType.valueOf(1)); + assertEquals(FileSegmentType.INDEX, FileSegmentType.valueOf(2)); } -} +} \ No newline at end of file diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/common/GetMessageResultExtTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/common/GetMessageResultExtTest.java index deb8770d281..69240a420a7 100644 --- a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/common/GetMessageResultExtTest.java +++ b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/common/GetMessageResultExtTest.java @@ -23,30 +23,32 @@ import org.apache.rocketmq.store.GetMessageStatus; import org.apache.rocketmq.store.MessageFilter; import org.apache.rocketmq.store.SelectMappedBufferResult; -import org.apache.rocketmq.tieredstore.util.MessageBufferUtilTest; +import org.apache.rocketmq.tieredstore.util.MessageFormatUtilTest; import org.junit.Assert; import org.junit.Test; -import static org.junit.Assert.assertEquals; - public class GetMessageResultExtTest { @Test public void doFilterTest() { GetMessageResultExt resultExt = new GetMessageResultExt(); + Assert.assertNull(resultExt.getStatus()); Assert.assertEquals(0, resultExt.doFilterMessage(null).getMessageCount()); + resultExt.setStatus(GetMessageStatus.OFFSET_OVERFLOW_ONE); Assert.assertEquals(0, resultExt.doFilterMessage(null).getMessageCount()); + resultExt.setStatus(GetMessageStatus.OFFSET_OVERFLOW_BADLY); Assert.assertEquals(0, resultExt.doFilterMessage(null).getMessageCount()); - resultExt.addMessageExt(new SelectMappedBufferResult( - 1000L, MessageBufferUtilTest.buildMockedMessageBuffer(), 100, null), - 0, "TagA".hashCode()); - resultExt.addMessageExt(new SelectMappedBufferResult( - 2000L, MessageBufferUtilTest.buildMockedMessageBuffer(), 100, null), - 0, "TagB".hashCode()); - assertEquals(2, resultExt.getMessageCount()); + int total = 3; + for (int i = 0; i < total; i++) { + resultExt.addMessageExt(new SelectMappedBufferResult(i * 1000L, + MessageFormatUtilTest.buildMockedMessageBuffer(), 1000, null), + 0, ("Tag" + i).hashCode()); + } + Assert.assertEquals(total, resultExt.getMessageCount()); + Assert.assertEquals(total, resultExt.getTagCodeList().size()); resultExt.setStatus(GetMessageStatus.FOUND); GetMessageResult getMessageResult = resultExt.doFilterMessage(new MessageFilter() { @@ -61,5 +63,31 @@ public boolean isMatchedByCommitLog(ByteBuffer msgBuffer, Map pr } }); Assert.assertEquals(0, getMessageResult.getMessageCount()); + + getMessageResult = resultExt.doFilterMessage(new MessageFilter() { + @Override + public boolean isMatchedByConsumeQueue(Long tagsCode, ConsumeQueueExt.CqExtUnit cqExtUnit) { + return "Tag1".hashCode() == tagsCode; + } + + @Override + public boolean isMatchedByCommitLog(ByteBuffer msgBuffer, Map properties) { + return false; + } + }); + Assert.assertEquals(0, getMessageResult.getMessageCount()); + + getMessageResult = resultExt.doFilterMessage(new MessageFilter() { + @Override + public boolean isMatchedByConsumeQueue(Long tagsCode, ConsumeQueueExt.CqExtUnit cqExtUnit) { + return "Tag1".hashCode() == tagsCode; + } + + @Override + public boolean isMatchedByCommitLog(ByteBuffer msgBuffer, Map properties) { + return true; + } + }); + Assert.assertEquals(1, getMessageResult.getMessageCount()); } } \ No newline at end of file diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/common/InFlightRequestFutureTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/common/InFlightRequestFutureTest.java deleted file mode 100644 index 54b88f38d49..00000000000 --- a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/common/InFlightRequestFutureTest.java +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.rocketmq.tieredstore.common; - -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; -import org.apache.commons.lang3.tuple.Pair; -import org.junit.Assert; -import org.junit.Test; - -public class InFlightRequestFutureTest { - - @Test - public void testInFlightRequestFuture() { - List>> futureList = new ArrayList<>(); - futureList.add(Pair.of(32, CompletableFuture.completedFuture(1031L))); - futureList.add(Pair.of(256, CompletableFuture.completedFuture(1287L))); - InFlightRequestFuture future = new InFlightRequestFuture(1000, futureList); - - Assert.assertEquals(1000, future.getStartOffset()); - Assert.assertTrue(future.isFirstDone()); - Assert.assertTrue(future.isAllDone()); - Assert.assertEquals(1031, future.getFirstFuture().join().longValue()); - Assert.assertEquals(-1L, future.getFuture(0).join().longValue()); - Assert.assertEquals(1031L, future.getFuture(1024).join().longValue()); - Assert.assertEquals(1287L, future.getFuture(1200).join().longValue()); - Assert.assertEquals(-1L, future.getFuture(2000).join().longValue()); - Assert.assertEquals(1287L, future.getLastFuture().join().longValue()); - Assert.assertArrayEquals(futureList.stream().map(Pair::getRight).toArray(), future.getAllFuture().toArray()); - } - - @Test - public void testInFlightRequestKey() { - InFlightRequestKey requestKey1 = new InFlightRequestKey("group", 0, 0); - InFlightRequestKey requestKey2 = new InFlightRequestKey("group", 1, 1); - Assert.assertEquals(requestKey1, requestKey2); - Assert.assertEquals(requestKey1.hashCode(), requestKey2.hashCode()); - Assert.assertEquals(requestKey1.getGroup(), requestKey2.getGroup()); - Assert.assertNotEquals(requestKey1.getOffset(), requestKey2.getOffset()); - Assert.assertNotEquals(requestKey1.getBatchSize(), requestKey2.getBatchSize()); - } - - @Test - public void testGetStartOffset() { - List>> futureList = new ArrayList<>(); - CompletableFuture completableFuture = new CompletableFuture<>(); - futureList.add(Pair.of(1, completableFuture)); - InFlightRequestFuture inFlightRequestFuture = new InFlightRequestFuture(10, futureList); - long startOffset = inFlightRequestFuture.getStartOffset(); - Assert.assertEquals(10, startOffset); - } - - @Test - public void testGetFirstFuture() throws ExecutionException, InterruptedException { - List>> futureList = new ArrayList<>(); - CompletableFuture completableFuture = new CompletableFuture<>(); - completableFuture.complete(20L); - futureList.add(Pair.of(1, completableFuture)); - InFlightRequestFuture inFlightRequestFuture = new InFlightRequestFuture(10, futureList); - CompletableFuture firstFuture = inFlightRequestFuture.getFirstFuture(); - Assert.assertEquals(new Long(20), firstFuture.get()); - } - - @Test - public void testGetFuture() { - List>> futureList = new ArrayList<>(); - CompletableFuture completableFuture = new CompletableFuture<>(); - completableFuture.complete(20L); - futureList.add(Pair.of(1, completableFuture)); - InFlightRequestFuture inFlightRequestFuture = new InFlightRequestFuture(10, futureList); - CompletableFuture future = inFlightRequestFuture.getFuture(11); - Assert.assertEquals(new Long(-1L), future.join()); - } - - @Test - public void testGetLastFuture() throws ExecutionException, InterruptedException { - List>> futureList = new ArrayList<>(); - CompletableFuture completableFuture = new CompletableFuture<>(); - completableFuture.complete(20L); - futureList.add(Pair.of(1, completableFuture)); - InFlightRequestFuture inFlightRequestFuture = new InFlightRequestFuture(10, futureList); - CompletableFuture lastFuture = inFlightRequestFuture.getLastFuture(); - Assert.assertEquals(new Long(20), lastFuture.get()); - } - - @Test - public void testIsFirstDone() { - List>> futureList = new ArrayList<>(); - CompletableFuture completableFuture = new CompletableFuture<>(); - completableFuture.complete(20L); - futureList.add(Pair.of(1, completableFuture)); - InFlightRequestFuture inFlightRequestFuture = new InFlightRequestFuture(10, futureList); - Assert.assertTrue(inFlightRequestFuture.isFirstDone()); - } - - @Test - public void testIsAllDone() { - List>> futureList = new ArrayList<>(); - CompletableFuture completableFuture1 = new CompletableFuture<>(); - CompletableFuture completableFuture2 = new CompletableFuture<>(); - CompletableFuture completableFuture3 = new CompletableFuture<>(); - CompletableFuture completableFuture4 = new CompletableFuture<>(); - completableFuture1.complete(20L); - completableFuture2.complete(30L); - completableFuture3.complete(40L); - futureList.add(Pair.of(1, completableFuture1)); - futureList.add(Pair.of(2, completableFuture2)); - futureList.add(Pair.of(3, completableFuture3)); - futureList.add(Pair.of(4, completableFuture4)); - InFlightRequestFuture inFlightRequestFuture = new InFlightRequestFuture(10, futureList); - Assert.assertFalse(inFlightRequestFuture.isAllDone()); - } - - @Test - public void testGetAllFuture() { - List>> futureList = new ArrayList<>(); - CompletableFuture completableFuture1 = new CompletableFuture<>(); - CompletableFuture completableFuture2 = new CompletableFuture<>(); - CompletableFuture completableFuture3 = new CompletableFuture<>(); - CompletableFuture completableFuture4 = new CompletableFuture<>(); - futureList.add(Pair.of(1, completableFuture1)); - futureList.add(Pair.of(2, completableFuture2)); - futureList.add(Pair.of(3, completableFuture3)); - futureList.add(Pair.of(4, completableFuture4)); - InFlightRequestFuture inFlightRequestFuture = new InFlightRequestFuture(10, futureList); - List> allFuture = inFlightRequestFuture.getAllFuture(); - Assert.assertEquals(4, allFuture.size()); - } -} diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/common/SelectBufferResultTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/common/SelectBufferResultTest.java index b7e6e639f0c..f9dfce9447c 100644 --- a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/common/SelectBufferResultTest.java +++ b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/common/SelectBufferResultTest.java @@ -21,8 +21,9 @@ import org.junit.Test; public class SelectBufferResultTest { + @Test - public void testSelectBufferResult() { + public void selectBufferResultTest() { ByteBuffer buffer = ByteBuffer.allocate(10); long startOffset = 5L; int size = 10; diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/core/MessageStoreDispatcherImplTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/core/MessageStoreDispatcherImplTest.java new file mode 100644 index 00000000000..8ac7e068a76 --- /dev/null +++ b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/core/MessageStoreDispatcherImplTest.java @@ -0,0 +1,192 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.rocketmq.tieredstore.core; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.util.HashMap; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import org.apache.rocketmq.common.message.MessageAccessor; +import org.apache.rocketmq.common.message.MessageConst; +import org.apache.rocketmq.common.message.MessageDecoder; +import org.apache.rocketmq.common.message.MessageExt; +import org.apache.rocketmq.common.message.MessageQueue; +import org.apache.rocketmq.store.DispatchRequest; +import org.apache.rocketmq.store.MessageStore; +import org.apache.rocketmq.store.SelectMappedBufferResult; +import org.apache.rocketmq.store.queue.ConsumeQueueInterface; +import org.apache.rocketmq.store.queue.CqUnit; +import org.apache.rocketmq.tieredstore.MessageStoreConfig; +import org.apache.rocketmq.tieredstore.MessageStoreExecutor; +import org.apache.rocketmq.tieredstore.TieredMessageStore; +import org.apache.rocketmq.tieredstore.file.FlatFileFactory; +import org.apache.rocketmq.tieredstore.file.FlatFileStore; +import org.apache.rocketmq.tieredstore.file.FlatMessageFile; +import org.apache.rocketmq.tieredstore.index.IndexItem; +import org.apache.rocketmq.tieredstore.index.IndexService; +import org.apache.rocketmq.tieredstore.index.IndexStoreService; +import org.apache.rocketmq.tieredstore.metadata.DefaultMetadataStore; +import org.apache.rocketmq.tieredstore.metadata.MetadataStore; +import org.apache.rocketmq.tieredstore.provider.PosixFileSegment; +import org.apache.rocketmq.tieredstore.util.MessageFormatUtil; +import org.apache.rocketmq.tieredstore.util.MessageFormatUtilTest; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtilTest; +import org.awaitility.Awaitility; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyString; + +public class MessageStoreDispatcherImplTest { + + protected final String storePath = MessageStoreUtilTest.getRandomStorePath(); + protected MessageQueue mq; + protected MetadataStore metadataStore; + protected MessageStoreConfig storeConfig; + protected MessageStoreExecutor executor; + protected FlatFileStore fileStore; + protected TieredMessageStore messageStore; + + @Before + public void init() { + storeConfig = new MessageStoreConfig(); + storeConfig.setBrokerName("brokerName"); + storeConfig.setStorePathRootDir(storePath); + storeConfig.setTieredStoreFilePath(storePath); + storeConfig.setTieredBackendServiceProvider(PosixFileSegment.class.getName()); + mq = new MessageQueue("StoreTest", storeConfig.getBrokerName(), 1); + metadataStore = new DefaultMetadataStore(storeConfig); + executor = new MessageStoreExecutor(); + fileStore = new FlatFileStore(storeConfig, metadataStore, executor); + } + + @After + public void shutdown() throws IOException { + if (messageStore != null) { + messageStore.destroy(); + } + MessageStoreUtilTest.deleteStoreDirectory(storePath); + } + + @Test + public void dispatchFromCommitLogTest() throws Exception { + MessageStore defaultStore = Mockito.mock(MessageStore.class); + Mockito.when(defaultStore.getMinOffsetInQueue(anyString(), anyInt())).thenReturn(100L); + Mockito.when(defaultStore.getMaxOffsetInQueue(anyString(), anyInt())).thenReturn(200L); + + messageStore = Mockito.mock(TieredMessageStore.class); + IndexService indexService = + new IndexStoreService(new FlatFileFactory(metadataStore, storeConfig), storePath); + Mockito.when(messageStore.getDefaultStore()).thenReturn(defaultStore); + Mockito.when(messageStore.getStoreConfig()).thenReturn(storeConfig); + Mockito.when(messageStore.getStoreExecutor()).thenReturn(executor); + Mockito.when(messageStore.getFlatFileStore()).thenReturn(fileStore); + Mockito.when(messageStore.getIndexService()).thenReturn(indexService); + + // mock message + ByteBuffer buffer = MessageFormatUtilTest.buildMockedMessageBuffer(); + MessageExt messageExt = MessageDecoder.decode(buffer); + messageExt.setKeys("Key"); + MessageAccessor.putProperty( + messageExt, MessageConst.PROPERTY_UNIQ_CLIENT_MESSAGE_ID_KEYIDX, "uk"); + messageExt.setBody(new byte[10]); + messageExt.setStoreSize(0); + buffer = ByteBuffer.wrap(MessageDecoder.encode(messageExt, false)); + buffer.putInt(0, buffer.remaining()); + + DispatchRequest request = new DispatchRequest(mq.getTopic(), mq.getQueueId(), + MessageFormatUtil.getCommitLogOffset(buffer), buffer.remaining(), 0L, + MessageFormatUtil.getStoreTimeStamp(buffer), 0L, + "", "", 0, 0L, new HashMap<>()); + + // construct flat file + MessageStoreDispatcher dispatcher = new MessageStoreDispatcherImpl(messageStore); + dispatcher.dispatch(request); + FlatMessageFile flatFile = fileStore.getFlatFile(mq); + Assert.assertNotNull(flatFile); + + // init offset + dispatcher.doScheduleDispatch(flatFile, true).join(); + Assert.assertEquals(100L, flatFile.getConsumeQueueMinOffset()); + Assert.assertEquals(100L, flatFile.getConsumeQueueMaxOffset()); + Assert.assertEquals(100L, flatFile.getConsumeQueueCommitOffset()); + + ConsumeQueueInterface cq = Mockito.mock(ConsumeQueueInterface.class); + Mockito.when(defaultStore.getConsumeQueue(anyString(), anyInt())).thenReturn(cq); + Mockito.when(cq.get(anyLong())).thenReturn( + new CqUnit(100, 1000, buffer.remaining(), 0L)); + Mockito.when(defaultStore.selectOneMessageByOffset(anyLong(), anyInt())).thenReturn( + new SelectMappedBufferResult(0L, buffer.asReadOnlyBuffer(), buffer.remaining(), null)); + dispatcher.doScheduleDispatch(flatFile, true).join(); + + Awaitility.await().pollInterval(Duration.ofSeconds(1)).atMost(Duration.ofSeconds(30)).until(() -> { + List resultList1 = indexService.queryAsync( + mq.getTopic(), "uk", 32, 0L, System.currentTimeMillis()).join(); + List resultList2 = indexService.queryAsync( + mq.getTopic(), "uk", 120, 0L, System.currentTimeMillis()).join(); + Assert.assertEquals(32, resultList1.size()); + Assert.assertEquals(100, resultList2.size()); + return true; + }); + + Assert.assertEquals(100L, flatFile.getConsumeQueueMinOffset()); + Assert.assertEquals(200L, flatFile.getConsumeQueueMaxOffset()); + Assert.assertEquals(200L, flatFile.getConsumeQueueCommitOffset()); + } + + @Test + public void dispatchServiceTest() { + MessageStore defaultStore = Mockito.mock(MessageStore.class); + messageStore = Mockito.mock(TieredMessageStore.class); + IndexService indexService = + new IndexStoreService(new FlatFileFactory(metadataStore, storeConfig), storePath); + Mockito.when(messageStore.getDefaultStore()).thenReturn(defaultStore); + Mockito.when(messageStore.getStoreConfig()).thenReturn(storeConfig); + Mockito.when(messageStore.getStoreExecutor()).thenReturn(executor); + Mockito.when(messageStore.getFlatFileStore()).thenReturn(fileStore); + Mockito.when(messageStore.getIndexService()).thenReturn(indexService); + + // construct flat file + ByteBuffer buffer = MessageFormatUtilTest.buildMockedMessageBuffer(); + DispatchRequest request = new DispatchRequest(mq.getTopic(), mq.getQueueId(), + MessageFormatUtil.getCommitLogOffset(buffer), buffer.remaining(), 0L, + MessageFormatUtil.getStoreTimeStamp(buffer), 0L, + "", "", 0, 0L, new HashMap<>()); + MessageStoreDispatcherImpl dispatcher = new MessageStoreDispatcherImpl(messageStore); + dispatcher.dispatch(request); + FlatMessageFile flatFile = fileStore.getFlatFile(mq); + Assert.assertNotNull(flatFile); + + AtomicBoolean result = new AtomicBoolean(false); + MessageStoreDispatcherImpl dispatcherSpy = Mockito.spy(dispatcher); + Mockito.doAnswer(mock -> { + result.set(true); + return true; + }).when(dispatcherSpy).dispatchWithSemaphore(any()); + dispatcherSpy.start(); + Awaitility.await().atMost(Duration.ofSeconds(10)).until(result::get); + dispatcherSpy.shutdown(); + } +} diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/core/MessageStoreFetcherImplTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/core/MessageStoreFetcherImplTest.java new file mode 100644 index 00000000000..ce380776ae5 --- /dev/null +++ b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/core/MessageStoreFetcherImplTest.java @@ -0,0 +1,233 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.rocketmq.tieredstore.core; + +import java.io.IOException; +import java.time.Duration; +import java.util.concurrent.atomic.AtomicLong; +import org.apache.rocketmq.common.BoundaryType; +import org.apache.rocketmq.common.message.MessageQueue; +import org.apache.rocketmq.store.GetMessageResult; +import org.apache.rocketmq.store.GetMessageStatus; +import org.apache.rocketmq.store.QueryMessageResult; +import org.apache.rocketmq.tieredstore.MessageStoreConfig; +import org.apache.rocketmq.tieredstore.TieredMessageStore; +import org.apache.rocketmq.tieredstore.common.GetMessageResultExt; +import org.apache.rocketmq.tieredstore.file.FlatMessageFile; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtilTest; +import org.awaitility.Awaitility; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +public class MessageStoreFetcherImplTest { + + private String groupName; + private MessageQueue mq; + private MessageStoreConfig storeConfig; + private TieredMessageStore messageStore; + private MessageStoreDispatcherImplTest dispatcherTest; + private MessageStoreFetcherImpl fetcher; + + @Before + public void init() throws Exception { + groupName = "GID-fetcherTest"; + dispatcherTest = new MessageStoreDispatcherImplTest(); + dispatcherTest.init(); + } + + @After + public void shutdown() throws IOException { + if (messageStore != null) { + messageStore.destroy(); + } + MessageStoreUtilTest.deleteStoreDirectory(dispatcherTest.storePath); + } + + @Test + public void getMessageFromTieredStoreTest() throws Exception { + dispatcherTest.dispatchFromCommitLogTest(); + mq = dispatcherTest.mq; + messageStore = dispatcherTest.messageStore; + storeConfig = dispatcherTest.storeConfig; + + storeConfig.setReadAheadCacheEnable(true); + fetcher = new MessageStoreFetcherImpl(messageStore); + GetMessageResult getMessageResult = fetcher.getMessageAsync( + groupName, mq.getTopic(), 0, 0, 32, null).join(); + Assert.assertEquals(GetMessageStatus.NO_MATCHED_LOGIC_QUEUE, getMessageResult.getStatus()); + + getMessageResult = fetcher.getMessageAsync( + groupName, mq.getTopic(), mq.getQueueId(), 0, 32, null).join(); + Assert.assertEquals(GetMessageStatus.OFFSET_TOO_SMALL, getMessageResult.getStatus()); + Assert.assertEquals(100L, getMessageResult.getMinOffset()); + Assert.assertEquals(200L, getMessageResult.getMaxOffset()); + Assert.assertEquals(100L, getMessageResult.getNextBeginOffset()); + + getMessageResult = fetcher.getMessageAsync( + groupName, mq.getTopic(), mq.getQueueId(), 200, 32, null).join(); + Assert.assertEquals(GetMessageStatus.OFFSET_OVERFLOW_ONE, getMessageResult.getStatus()); + Assert.assertEquals(100L, getMessageResult.getMinOffset()); + Assert.assertEquals(200L, getMessageResult.getMaxOffset()); + Assert.assertEquals(200L, getMessageResult.getNextBeginOffset()); + + getMessageResult = fetcher.getMessageAsync( + groupName, mq.getTopic(), mq.getQueueId(), 300, 32, null).join(); + Assert.assertEquals(GetMessageStatus.OFFSET_OVERFLOW_BADLY, getMessageResult.getStatus()); + Assert.assertEquals(100L, getMessageResult.getMinOffset()); + Assert.assertEquals(200L, getMessageResult.getMaxOffset()); + Assert.assertEquals(200L, getMessageResult.getNextBeginOffset()); + + FlatMessageFile flatFile = dispatcherTest.fileStore.getFlatFile(mq); + + // direct + getMessageResult = fetcher.getMessageFromTieredStoreAsync(flatFile, 0, 32).join(); + Assert.assertEquals(GetMessageStatus.OFFSET_TOO_SMALL, getMessageResult.getStatus()); + Assert.assertEquals(100L, getMessageResult.getMinOffset()); + Assert.assertEquals(200L, getMessageResult.getMaxOffset()); + Assert.assertEquals(100L, getMessageResult.getNextBeginOffset()); + + getMessageResult = fetcher.getMessageFromTieredStoreAsync(flatFile, 200, 32).join(); + Assert.assertEquals(GetMessageStatus.OFFSET_OVERFLOW_ONE, getMessageResult.getStatus()); + Assert.assertEquals(100L, getMessageResult.getMinOffset()); + Assert.assertEquals(200L, getMessageResult.getMaxOffset()); + Assert.assertEquals(200L, getMessageResult.getNextBeginOffset()); + + getMessageResult = fetcher.getMessageFromTieredStoreAsync(flatFile, 300, 32).join(); + Assert.assertEquals(GetMessageStatus.OFFSET_OVERFLOW_BADLY, getMessageResult.getStatus()); + Assert.assertEquals(100L, getMessageResult.getMinOffset()); + Assert.assertEquals(200L, getMessageResult.getMaxOffset()); + Assert.assertEquals(200L, getMessageResult.getNextBeginOffset()); + + getMessageResult = fetcher.getMessageFromTieredStoreAsync(flatFile, 100, 32).join(); + Assert.assertEquals(GetMessageStatus.FOUND, getMessageResult.getStatus()); + Assert.assertEquals(100L, getMessageResult.getMinOffset()); + Assert.assertEquals(200L, getMessageResult.getMaxOffset()); + Assert.assertEquals(100L + 32L, getMessageResult.getNextBeginOffset()); + + getMessageResult = fetcher.getMessageFromTieredStoreAsync(flatFile, 180, 32).join(); + Assert.assertEquals(GetMessageStatus.FOUND, getMessageResult.getStatus()); + Assert.assertEquals(20, getMessageResult.getMessageCount()); + Assert.assertEquals(100L, getMessageResult.getMinOffset()); + Assert.assertEquals(200L, getMessageResult.getMaxOffset()); + Assert.assertEquals(200L, getMessageResult.getNextBeginOffset()); + + // limit count or size + int expect = 8; + int size = getMessageResult.getMessageBufferList().get(0).remaining(); + storeConfig.setReadAheadMessageSizeThreshold(expect * size + 10); + getMessageResult = fetcher.getMessageFromTieredStoreAsync(flatFile, 180, 32).join(); + Assert.assertEquals(GetMessageStatus.FOUND, getMessageResult.getStatus()); + Assert.assertEquals(expect, getMessageResult.getMessageCount()); + Assert.assertEquals(100L, getMessageResult.getMinOffset()); + Assert.assertEquals(200L, getMessageResult.getMaxOffset()); + Assert.assertEquals(180L + expect, getMessageResult.getNextBeginOffset()); + + storeConfig.setReadAheadMessageCountThreshold(expect); + storeConfig.setReadAheadMessageSizeThreshold(expect * size + expect * 2); + getMessageResult = fetcher.getMessageFromTieredStoreAsync(flatFile, 180, 32).join(); + Assert.assertEquals(GetMessageStatus.FOUND, getMessageResult.getStatus()); + Assert.assertEquals(expect, getMessageResult.getMessageCount()); + Assert.assertEquals(100L, getMessageResult.getMinOffset()); + Assert.assertEquals(200L, getMessageResult.getMaxOffset()); + Assert.assertEquals(180L + expect, getMessageResult.getNextBeginOffset()); + } + + @Test + public void getMessageFromCacheTest() throws Exception { + this.getMessageFromTieredStoreTest(); + mq = dispatcherTest.mq; + messageStore = dispatcherTest.messageStore; + storeConfig = dispatcherTest.storeConfig; + + storeConfig.setReadAheadCacheEnable(true); + storeConfig.setReadAheadMessageCountThreshold(32); + storeConfig.setReadAheadMessageSizeThreshold(Integer.MAX_VALUE); + + int batchSize = 4; + AtomicLong times = new AtomicLong(0L); + AtomicLong offset = new AtomicLong(100L); + FlatMessageFile flatFile = dispatcherTest.fileStore.getFlatFile(mq); + Awaitility.await().atMost(Duration.ofSeconds(10)).until(() -> { + GetMessageResultExt getMessageResult = + fetcher.getMessageFromCacheAsync(flatFile, groupName, offset.get(), batchSize).join(); + offset.set(getMessageResult.getNextBeginOffset()); + times.incrementAndGet(); + return offset.get() == 200L; + }); + Assert.assertEquals(100 / times.get(), batchSize); + } + + @Test + public void testGetMessageStoreTimeStampAsync() throws Exception { + this.getMessageFromTieredStoreTest(); + mq = dispatcherTest.mq; + messageStore = dispatcherTest.messageStore; + storeConfig = dispatcherTest.storeConfig; + + long result1 = fetcher.getEarliestMessageTimeAsync(mq.getTopic(), 0).join(); + Assert.assertEquals(-1L, result1); + + long result2 = fetcher.getEarliestMessageTimeAsync(mq.getTopic(), mq.getQueueId()).join(); + Assert.assertEquals(11L, result2); + + long result3 = fetcher.getMessageStoreTimeStampAsync(mq.getTopic(), 0, 100).join(); + Assert.assertEquals(-1L, result3); + + long result4 = fetcher.getMessageStoreTimeStampAsync(mq.getTopic(), mq.getQueueId(), 100).join(); + Assert.assertEquals(11L, result4); + + long result5 = fetcher.getMessageStoreTimeStampAsync(mq.getTopic(), mq.getQueueId(), 120).join(); + Assert.assertEquals(11L, result5); + } + + @Test + public void testGetOffsetInQueueByTime() throws Exception { + this.getMessageFromTieredStoreTest(); + mq = dispatcherTest.mq; + messageStore = dispatcherTest.messageStore; + storeConfig = dispatcherTest.storeConfig; + + // message time is all 11 + Assert.assertEquals(-1L, fetcher.getOffsetInQueueByTime(mq.getTopic(), 0, 10, BoundaryType.LOWER)); + + Assert.assertEquals(100L, fetcher.getOffsetInQueueByTime(mq.getTopic(), 1, 10, BoundaryType.LOWER)); + Assert.assertEquals(100L, fetcher.getOffsetInQueueByTime(mq.getTopic(), 1, 11, BoundaryType.LOWER)); + Assert.assertEquals(199L, fetcher.getOffsetInQueueByTime(mq.getTopic(), 1, 12, BoundaryType.LOWER)); + + Assert.assertEquals(100L, fetcher.getOffsetInQueueByTime(mq.getTopic(), 1, 10, BoundaryType.UPPER)); + Assert.assertEquals(199L, fetcher.getOffsetInQueueByTime(mq.getTopic(), 1, 11, BoundaryType.UPPER)); + Assert.assertEquals(199L, fetcher.getOffsetInQueueByTime(mq.getTopic(), 1, 12, BoundaryType.UPPER)); + } + + @Test + public void testQueryMessageAsync() throws Exception { + this.getMessageFromTieredStoreTest(); + mq = dispatcherTest.mq; + messageStore = dispatcherTest.messageStore; + storeConfig = dispatcherTest.storeConfig; + + QueryMessageResult queryMessageResult = fetcher.queryMessageAsync( + mq.getTopic(), "uk", 32, 0L, System.currentTimeMillis()).join(); + Assert.assertEquals(32, queryMessageResult.getMessageBufferList().size()); + + queryMessageResult = fetcher.queryMessageAsync( + mq.getTopic(), "uk", 120, 0L, System.currentTimeMillis()).join(); + Assert.assertEquals(100, queryMessageResult.getMessageBufferList().size()); + } +} diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/TieredStoreTopicBlackListFilterTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/core/MessageStoreTopicFilterTest.java similarity index 84% rename from tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/TieredStoreTopicBlackListFilterTest.java rename to tieredstore/src/test/java/org/apache/rocketmq/tieredstore/core/MessageStoreTopicFilterTest.java index fbaafa1b4cf..d5c3703152c 100644 --- a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/TieredStoreTopicBlackListFilterTest.java +++ b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/core/MessageStoreTopicFilterTest.java @@ -14,17 +14,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.rocketmq.tieredstore.provider; +package org.apache.rocketmq.tieredstore.core; import org.apache.rocketmq.common.topic.TopicValidator; +import org.apache.rocketmq.tieredstore.MessageStoreConfig; import org.junit.Assert; import org.junit.Test; -public class TieredStoreTopicBlackListFilterTest { +public class MessageStoreTopicFilterTest { @Test public void filterTopicTest() { - TieredStoreTopicFilter topicFilter = new TieredStoreTopicBlackListFilter(); + MessageStoreFilter topicFilter = new MessageStoreTopicFilter(new MessageStoreConfig()); Assert.assertTrue(topicFilter.filterTopic("")); Assert.assertTrue(topicFilter.filterTopic(TopicValidator.SYSTEM_TOPIC_PREFIX + "_Topic")); diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/exception/TieredStoreExceptionTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/exception/TieredStoreExceptionTest.java new file mode 100644 index 00000000000..1de891a8acc --- /dev/null +++ b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/exception/TieredStoreExceptionTest.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.rocketmq.tieredstore.exception; + +import org.junit.Assert; +import org.junit.Test; + +public class TieredStoreExceptionTest { + + @Test + public void testMessageStoreException() { + long position = 100L; + String requestId = "requestId"; + String error = "ErrorMessage"; + + TieredStoreException tieredStoreException = new TieredStoreException(TieredStoreErrorCode.IO_ERROR, error); + Assert.assertEquals(TieredStoreErrorCode.IO_ERROR, tieredStoreException.getErrorCode()); + Assert.assertEquals(error, tieredStoreException.getMessage()); + + tieredStoreException.setRequestId(requestId); + Assert.assertEquals(requestId, tieredStoreException.getRequestId()); + + tieredStoreException.setPosition(position); + Assert.assertEquals(position, tieredStoreException.getPosition()); + Assert.assertNotNull(tieredStoreException.toString()); + } +} \ No newline at end of file diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/CompositeQueueFlatFileTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/CompositeQueueFlatFileTest.java deleted file mode 100644 index 58842430483..00000000000 --- a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/CompositeQueueFlatFileTest.java +++ /dev/null @@ -1,197 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.rocketmq.tieredstore.file; - -import java.io.IOException; -import java.nio.ByteBuffer; -import org.apache.rocketmq.common.message.MessageQueue; -import org.apache.rocketmq.store.ConsumeQueue; -import org.apache.rocketmq.store.DispatchRequest; -import org.apache.rocketmq.tieredstore.TieredStoreTestUtil; -import org.apache.rocketmq.tieredstore.common.AppendResult; -import org.apache.rocketmq.tieredstore.common.FileSegmentType; -import org.apache.rocketmq.tieredstore.common.TieredMessageStoreConfig; -import org.apache.rocketmq.tieredstore.common.TieredStoreExecutor; -import org.apache.rocketmq.tieredstore.metadata.QueueMetadata; -import org.apache.rocketmq.tieredstore.metadata.TieredMetadataStore; -import org.apache.rocketmq.tieredstore.provider.memory.MemoryFileSegment; -import org.apache.rocketmq.tieredstore.util.MessageBufferUtil; -import org.apache.rocketmq.tieredstore.util.MessageBufferUtilTest; -import org.apache.rocketmq.tieredstore.util.TieredStoreUtil; -import org.apache.rocketmq.common.BoundaryType; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -public class CompositeQueueFlatFileTest { - - private final String storePath = TieredStoreTestUtil.getRandomStorePath(); - private TieredMessageStoreConfig storeConfig; - private TieredMetadataStore metadataStore; - private TieredFileAllocator tieredFileAllocator; - private MessageQueue mq; - - @Before - public void setUp() throws ClassNotFoundException, NoSuchMethodException { - storeConfig = new TieredMessageStoreConfig(); - storeConfig.setBrokerName("brokerName"); - storeConfig.setStorePathRootDir(storePath); - storeConfig.setTieredBackendServiceProvider("org.apache.rocketmq.tieredstore.provider.memory.MemoryFileSegment"); - storeConfig.setCommitLogRollingInterval(0); - storeConfig.setCommitLogRollingMinimumSize(999); - mq = new MessageQueue("CompositeQueueFlatFileTest", storeConfig.getBrokerName(), 0); - metadataStore = TieredStoreUtil.getMetadataStore(storeConfig); - tieredFileAllocator = new TieredFileAllocator(storeConfig); - TieredStoreExecutor.init(); - } - - @After - public void tearDown() throws IOException { - TieredStoreTestUtil.destroyCompositeFlatFileManager(); - TieredStoreTestUtil.destroyMetadataStore(); - TieredStoreTestUtil.destroyTempDir(storePath); - TieredStoreExecutor.shutdown(); - } - - @Test - public void testAppendCommitLog() { - CompositeQueueFlatFile flatFile = new CompositeQueueFlatFile(tieredFileAllocator, mq); - ByteBuffer message = MessageBufferUtilTest.buildMockedMessageBuffer(); - AppendResult result = flatFile.appendCommitLog(message); - Assert.assertEquals(AppendResult.SUCCESS, result); - Assert.assertEquals(123L, flatFile.commitLog.getFlatFile().getFileToWrite().getAppendPosition()); - Assert.assertEquals(0L, flatFile.commitLog.getFlatFile().getFileToWrite().getCommitPosition()); - - flatFile = new CompositeQueueFlatFile(tieredFileAllocator, mq); - flatFile.initOffset(6); - result = flatFile.appendCommitLog(message); - Assert.assertEquals(AppendResult.SUCCESS, result); - - message.putLong(MessageBufferUtil.QUEUE_OFFSET_POSITION, 7); - result = flatFile.appendCommitLog(message); - Assert.assertEquals(AppendResult.SUCCESS, result); - - flatFile.commit(true); - Assert.assertEquals(7, flatFile.getCommitLogDispatchCommitOffset()); - - flatFile.cleanExpiredFile(0); - flatFile.destroyExpiredFile(); - } - - @Test - public void testAppendConsumeQueue() { - CompositeQueueFlatFile file = new CompositeQueueFlatFile(tieredFileAllocator, mq); - DispatchRequest request = new DispatchRequest( - mq.getTopic(), mq.getQueueId(), 51, 2, 3, 4); - AppendResult result = file.appendConsumeQueue(request); - Assert.assertEquals(AppendResult.OFFSET_INCORRECT, result); - - // Create new segment in file queue - MemoryFileSegment segment = new MemoryFileSegment(FileSegmentType.CONSUME_QUEUE, mq, 20, storeConfig); - segment.initPosition(segment.getSize()); - file.consumeQueue.getFlatFile().setBaseOffset(20L); - file.consumeQueue.getFlatFile().getFileToWrite(); - - // Recreate will load metadata and build consume queue - file = new CompositeQueueFlatFile(tieredFileAllocator, mq); - segment.initPosition(ConsumeQueue.CQ_STORE_UNIT_SIZE); - result = file.appendConsumeQueue(request); - Assert.assertEquals(AppendResult.SUCCESS, result); - - request = new DispatchRequest( - mq.getTopic(), mq.getQueueId(), 52, 2, 3, 4); - result = file.appendConsumeQueue(request); - Assert.assertEquals(AppendResult.SUCCESS, result); - - file.commit(true); - file.flushMetadata(); - - QueueMetadata queueMetadata = metadataStore.getQueue(mq); - Assert.assertEquals(53, queueMetadata.getMaxOffset()); - } - - @Test - public void testBinarySearchInQueueByTime() throws ClassNotFoundException, NoSuchMethodException { - - // replace provider, need new factory again - storeConfig.setTieredBackendServiceProvider("org.apache.rocketmq.tieredstore.provider.memory.MemoryFileSegmentWithoutCheck"); - tieredFileAllocator = new TieredFileAllocator(storeConfig); - - // inject store time: 0, +100, +100, +100, +200 - CompositeQueueFlatFile flatFile = new CompositeQueueFlatFile(tieredFileAllocator, mq); - flatFile.initOffset(50); - long timestamp1 = System.currentTimeMillis(); - ByteBuffer buffer = MessageBufferUtilTest.buildMockedMessageBuffer(); - buffer.putLong(MessageBufferUtil.QUEUE_OFFSET_POSITION, 50); - buffer.putLong(MessageBufferUtil.STORE_TIMESTAMP_POSITION, timestamp1); - flatFile.appendCommitLog(buffer, true); - - long timestamp2 = timestamp1 + 100; - buffer = MessageBufferUtilTest.buildMockedMessageBuffer(); - buffer.putLong(MessageBufferUtil.QUEUE_OFFSET_POSITION, 51); - buffer.putLong(MessageBufferUtil.STORE_TIMESTAMP_POSITION, timestamp2); - flatFile.appendCommitLog(buffer, true); - buffer = MessageBufferUtilTest.buildMockedMessageBuffer(); - buffer.putLong(MessageBufferUtil.QUEUE_OFFSET_POSITION, 52); - buffer.putLong(MessageBufferUtil.STORE_TIMESTAMP_POSITION, timestamp2); - flatFile.appendCommitLog(buffer, true); - buffer = MessageBufferUtilTest.buildMockedMessageBuffer(); - buffer.putLong(MessageBufferUtil.QUEUE_OFFSET_POSITION, 53); - buffer.putLong(MessageBufferUtil.STORE_TIMESTAMP_POSITION, timestamp2); - flatFile.appendCommitLog(buffer, true); - - long timestamp3 = timestamp2 + 100; - buffer = MessageBufferUtilTest.buildMockedMessageBuffer(); - buffer.putLong(MessageBufferUtil.QUEUE_OFFSET_POSITION, 54); - buffer.putLong(MessageBufferUtil.STORE_TIMESTAMP_POSITION, timestamp3); - flatFile.appendCommitLog(buffer, true); - - // append message to consume queue - flatFile.consumeQueue.getFlatFile().setBaseOffset(50 * ConsumeQueue.CQ_STORE_UNIT_SIZE); - - for (int i = 0; i < 5; i++) { - AppendResult appendResult = flatFile.appendConsumeQueue(new DispatchRequest( - mq.getTopic(), mq.getQueueId(), MessageBufferUtilTest.MSG_LEN * i, - MessageBufferUtilTest.MSG_LEN, 0, timestamp1, 50 + i, - "", "", 0, 0, null), true); - Assert.assertEquals(AppendResult.SUCCESS, appendResult); - } - - // commit message will increase max consume queue offset - flatFile.commit(true); - - Assert.assertEquals(54, flatFile.getOffsetInConsumeQueueByTime(timestamp3 + 1, BoundaryType.UPPER)); - Assert.assertEquals(54, flatFile.getOffsetInConsumeQueueByTime(timestamp3, BoundaryType.UPPER)); - - Assert.assertEquals(50, flatFile.getOffsetInConsumeQueueByTime(timestamp1 - 1, BoundaryType.LOWER)); - Assert.assertEquals(50, flatFile.getOffsetInConsumeQueueByTime(timestamp1, BoundaryType.LOWER)); - - Assert.assertEquals(51, flatFile.getOffsetInConsumeQueueByTime(timestamp1 + 1, BoundaryType.LOWER)); - Assert.assertEquals(51, flatFile.getOffsetInConsumeQueueByTime(timestamp2, BoundaryType.LOWER)); - Assert.assertEquals(54, flatFile.getOffsetInConsumeQueueByTime(timestamp2 + 1, BoundaryType.LOWER)); - Assert.assertEquals(54, flatFile.getOffsetInConsumeQueueByTime(timestamp3, BoundaryType.LOWER)); - - Assert.assertEquals(50, flatFile.getOffsetInConsumeQueueByTime(timestamp1, BoundaryType.UPPER)); - Assert.assertEquals(50, flatFile.getOffsetInConsumeQueueByTime(timestamp1 + 1, BoundaryType.UPPER)); - Assert.assertEquals(53, flatFile.getOffsetInConsumeQueueByTime(timestamp2, BoundaryType.UPPER)); - Assert.assertEquals(53, flatFile.getOffsetInConsumeQueueByTime(timestamp2 + 1, BoundaryType.UPPER)); - - Assert.assertEquals(0, flatFile.getOffsetInConsumeQueueByTime(timestamp1 - 1, BoundaryType.UPPER)); - Assert.assertEquals(55, flatFile.getOffsetInConsumeQueueByTime(timestamp3 + 1, BoundaryType.LOWER)); - } -} diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/FlatAppendFileTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/FlatAppendFileTest.java new file mode 100644 index 00000000000..2e6943728e2 --- /dev/null +++ b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/FlatAppendFileTest.java @@ -0,0 +1,215 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.rocketmq.tieredstore.file; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.concurrent.CompletionException; +import org.apache.rocketmq.common.message.MessageQueue; +import org.apache.rocketmq.tieredstore.MessageStoreConfig; +import org.apache.rocketmq.tieredstore.common.FileSegmentType; +import org.apache.rocketmq.tieredstore.exception.TieredStoreErrorCode; +import org.apache.rocketmq.tieredstore.exception.TieredStoreException; +import org.apache.rocketmq.tieredstore.metadata.DefaultMetadataStore; +import org.apache.rocketmq.tieredstore.metadata.MetadataStore; +import org.apache.rocketmq.tieredstore.metadata.entity.FileSegmentMetadata; +import org.apache.rocketmq.tieredstore.provider.FileSegment; +import org.apache.rocketmq.tieredstore.provider.PosixFileSegment; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtil; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtilTest; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +public class FlatAppendFileTest { + + private final String storePath = MessageStoreUtilTest.getRandomStorePath(); + private MessageQueue queue; + private MetadataStore metadataStore; + private MessageStoreConfig storeConfig; + private FlatFileFactory flatFileFactory; + + @Before + public void init() throws ClassNotFoundException, NoSuchMethodException { + storeConfig = new MessageStoreConfig(); + storeConfig.setBrokerName("brokerName"); + storeConfig.setStorePathRootDir(storePath); + storeConfig.setTieredStoreFilePath(storePath); + storeConfig.setTieredBackendServiceProvider(PosixFileSegment.class.getName()); + storeConfig.setTieredStoreCommitLogMaxSize(2000L); + storeConfig.setTieredStoreConsumeQueueMaxSize(2000L); + queue = new MessageQueue("TieredFlatFileTest", storeConfig.getBrokerName(), 0); + metadataStore = new DefaultMetadataStore(storeConfig); + flatFileFactory = new FlatFileFactory(metadataStore, storeConfig); + } + + @After + public void shutdown() throws IOException { + MessageStoreUtilTest.deleteStoreDirectory(storePath); + } + + public ByteBuffer allocateBuffer(int size) { + byte[] byteArray = new byte[size]; + ByteBuffer buffer = ByteBuffer.wrap(byteArray); + Arrays.fill(byteArray, (byte) 0); + return buffer; + } + + @Test + public void recoverFileSizeTest() { + String filePath = MessageStoreUtil.toFilePath(queue); + FlatAppendFile flatFile = flatFileFactory.createFlatFileForConsumeQueue(filePath); + flatFile.rollingNewFile(500L); + + FileSegment fileSegment = flatFile.getFileToWrite(); + flatFile.append(allocateBuffer(1000), 1L); + flatFile.commitAsync().join(); + flatFile.flushFileSegmentMeta(fileSegment); + } + + @Test + public void testRecoverFile() { + String filePath = MessageStoreUtil.toFilePath(queue); + FlatAppendFile flatFile = flatFileFactory.createFlatFileForConsumeQueue(filePath); + flatFile.rollingNewFile(500L); + + FileSegment fileSegment = flatFile.getFileToWrite(); + flatFile.append(allocateBuffer(1000), 1L); + flatFile.commitAsync().join(); + flatFile.flushFileSegmentMeta(fileSegment); + + FileSegmentMetadata metadata = + metadataStore.getFileSegment(filePath, FileSegmentType.CONSUME_QUEUE, 500L); + Assert.assertEquals(fileSegment.getPath(), metadata.getPath()); + Assert.assertEquals(FileSegmentType.CONSUME_QUEUE, FileSegmentType.valueOf(metadata.getType())); + Assert.assertEquals(500L, metadata.getBaseOffset()); + Assert.assertEquals(1000L, metadata.getSize()); + Assert.assertEquals(0L, metadata.getSealTimestamp()); + + fileSegment.close(); + flatFile.rollingNewFile(flatFile.getAppendOffset()); + flatFile.append(allocateBuffer(200), 1L); + flatFile.commitAsync().join(); + flatFile.flushFileSegmentMeta(fileSegment); + Assert.assertEquals(2, flatFile.getFileSegmentList().size()); + flatFile.getFileToWrite().close(); + + metadata = metadataStore.getFileSegment(filePath, FileSegmentType.CONSUME_QUEUE, 1500L); + Assert.assertEquals(fileSegment.getPath(), metadata.getPath()); + Assert.assertEquals(FileSegmentType.CONSUME_QUEUE, FileSegmentType.valueOf(metadata.getType())); + Assert.assertEquals(1500L, metadata.getBaseOffset()); + Assert.assertEquals(200L, metadata.getSize()); + Assert.assertEquals(0L, metadata.getSealTimestamp()); + + // reference same file + flatFile = flatFileFactory.createFlatFileForConsumeQueue(filePath); + Assert.assertEquals(2, flatFile.fileSegmentTable.size()); + metadata = metadataStore.getFileSegment(filePath, FileSegmentType.CONSUME_QUEUE, 1500L); + Assert.assertEquals(fileSegment.getPath(), metadata.getPath()); + Assert.assertEquals(FileSegmentType.CONSUME_QUEUE, FileSegmentType.valueOf(metadata.getType())); + Assert.assertEquals(1500L, metadata.getBaseOffset()); + Assert.assertEquals(200L, metadata.getSize()); + Assert.assertEquals(0L, metadata.getSealTimestamp()); + flatFile.destroy(); + } + + @Test + public void testFileSegment() { + String filePath = MessageStoreUtil.toFilePath(queue); + FlatAppendFile flatFile = flatFileFactory.createFlatFileForConsumeQueue(filePath); + Assert.assertThrows(IllegalStateException.class, flatFile::getFileToWrite); + + flatFile.commitAsync().join(); + flatFile.rollingNewFile(0L); + Assert.assertEquals(0L, flatFile.getMinOffset()); + Assert.assertEquals(0L, flatFile.getCommitOffset()); + Assert.assertEquals(0L, flatFile.getAppendOffset()); + + flatFile.append(allocateBuffer(1000), 1L); + Assert.assertEquals(0L, flatFile.getMinOffset()); + Assert.assertEquals(0L, flatFile.getCommitOffset()); + Assert.assertEquals(1000L, flatFile.getAppendOffset()); + Assert.assertEquals(1L, flatFile.getMinTimestamp()); + Assert.assertEquals(1L, flatFile.getMaxTimestamp()); + + flatFile.commitAsync().join(); + Assert.assertEquals(filePath, flatFile.getFilePath()); + Assert.assertEquals(FileSegmentType.CONSUME_QUEUE, flatFile.getFileType()); + Assert.assertEquals(0L, flatFile.getMinOffset()); + Assert.assertEquals(1000L, flatFile.getCommitOffset()); + Assert.assertEquals(1000L, flatFile.getAppendOffset()); + Assert.assertEquals(1L, flatFile.getMinTimestamp()); + Assert.assertEquals(1L, flatFile.getMaxTimestamp()); + + // file full + flatFile.append(allocateBuffer(1000), 1L); + flatFile.append(allocateBuffer(1000), 1L); + flatFile.commitAsync().join(); + Assert.assertEquals(2, flatFile.fileSegmentTable.size()); + flatFile.destroy(); + } + + @Test + public void testAppendAndRead() { + FlatAppendFile flatFile = flatFileFactory.createFlatFileForConsumeQueue(MessageStoreUtil.toFilePath(queue)); + flatFile.rollingNewFile(500L); + Assert.assertEquals(500L, flatFile.getCommitOffset()); + Assert.assertEquals(500L, flatFile.getAppendOffset()); + + flatFile.append(allocateBuffer(1000), 1L); + + // no commit + CompletionException exception = Assert.assertThrows( + CompletionException.class, () -> flatFile.readAsync(500, 200).join()); + Assert.assertTrue(exception.getCause() instanceof TieredStoreException); + Assert.assertEquals(TieredStoreErrorCode.ILLEGAL_PARAM, + ((TieredStoreException) exception.getCause()).getErrorCode()); + flatFile.commitAsync().join(); + Assert.assertEquals(200, flatFile.readAsync(500, 200).join().remaining()); + + // 500-1500, 1500-3000 + flatFile.append(allocateBuffer(1500), 1L); + flatFile.commitAsync().join(); + Assert.assertEquals(2, flatFile.fileSegmentTable.size()); + Assert.assertEquals(1000, flatFile.readAsync(1000, 1000).join().remaining()); + flatFile.destroy(); + } + + @Test + public void testCleanExpiredFile() { + FlatAppendFile flatFile = flatFileFactory.createFlatFileForConsumeQueue(MessageStoreUtil.toFilePath(queue)); + flatFile.destroyExpiredFile(1); + + flatFile.rollingNewFile(500L); + flatFile.append(allocateBuffer(1000), 2L); + flatFile.commitAsync().join(); + Assert.assertEquals(1, flatFile.fileSegmentTable.size()); + flatFile.destroyExpiredFile(1); + Assert.assertEquals(1, flatFile.fileSegmentTable.size()); + flatFile.destroyExpiredFile(3); + Assert.assertEquals(0, flatFile.fileSegmentTable.size()); + + flatFile.rollingNewFile(1500L); + flatFile.append(allocateBuffer(1000), 2L); + flatFile.append(allocateBuffer(1000), 2L); + flatFile.commitAsync().join(); + flatFile.destroy(); + Assert.assertEquals(0, flatFile.fileSegmentTable.size()); + } +} diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/FlatCommitLogFileTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/FlatCommitLogFileTest.java new file mode 100644 index 00000000000..7e030d305eb --- /dev/null +++ b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/FlatCommitLogFileTest.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.rocketmq.tieredstore.file; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.concurrent.TimeUnit; +import org.apache.rocketmq.common.message.MessageQueue; +import org.apache.rocketmq.tieredstore.MessageStoreConfig; +import org.apache.rocketmq.tieredstore.common.AppendResult; +import org.apache.rocketmq.tieredstore.metadata.DefaultMetadataStore; +import org.apache.rocketmq.tieredstore.metadata.MetadataStore; +import org.apache.rocketmq.tieredstore.provider.PosixFileSegment; +import org.apache.rocketmq.tieredstore.util.MessageFormatUtil; +import org.apache.rocketmq.tieredstore.util.MessageFormatUtilTest; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtil; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtilTest; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +public class FlatCommitLogFileTest { + + private final String storePath = MessageStoreUtilTest.getRandomStorePath(); + private MessageQueue queue; + private MetadataStore metadataStore; + private MessageStoreConfig storeConfig; + private FlatFileFactory flatFileFactory; + + @Before + public void init() throws ClassNotFoundException, NoSuchMethodException { + storeConfig = new MessageStoreConfig(); + storeConfig.setBrokerName("brokerName"); + storeConfig.setStorePathRootDir(storePath); + storeConfig.setTieredStoreFilePath(storePath); + storeConfig.setTieredBackendServiceProvider(PosixFileSegment.class.getName()); + storeConfig.setTieredStoreCommitLogMaxSize(2000L); + storeConfig.setTieredStoreConsumeQueueMaxSize(2000L); + queue = new MessageQueue("TieredFlatFileTest", storeConfig.getBrokerName(), 0); + metadataStore = new DefaultMetadataStore(storeConfig); + flatFileFactory = new FlatFileFactory(metadataStore, storeConfig); + } + + @After + public void shutdown() throws IOException { + MessageStoreUtilTest.deleteStoreDirectory(storePath); + } + + @Test + public void constructTest() { + String filePath = MessageStoreUtil.toFilePath(queue); + FlatAppendFile flatFile = flatFileFactory.createFlatFileForCommitLog(filePath); + Assert.assertEquals(1L, flatFile.fileSegmentTable.size()); + } + + @Test + public void tryRollingFileTest() throws InterruptedException { + String filePath = MessageStoreUtil.toFilePath(queue); + FlatCommitLogFile flatFile = flatFileFactory.createFlatFileForCommitLog(filePath); + for (int i = 0; i < 3; i++) { + ByteBuffer byteBuffer = MessageFormatUtilTest.buildMockedMessageBuffer(); + byteBuffer.putLong(MessageFormatUtil.QUEUE_OFFSET_POSITION, i); + Assert.assertEquals(AppendResult.SUCCESS, flatFile.append(byteBuffer, i)); + TimeUnit.MILLISECONDS.sleep(2); + Assert.assertTrue(flatFile.tryRollingFile(1)); + } + Assert.assertEquals(4, flatFile.fileSegmentTable.size()); + Assert.assertFalse(flatFile.tryRollingFile(1000)); + flatFile.destroy(); + } + + @Test + public void getMinOffsetFromFileAsyncTest() { + String filePath = MessageStoreUtil.toFilePath(queue); + FlatCommitLogFile flatFile = flatFileFactory.createFlatFileForCommitLog(filePath); + + // append some messages + for (int i = 6; i < 9; i++) { + ByteBuffer byteBuffer = MessageFormatUtilTest.buildMockedMessageBuffer(); + byteBuffer.putLong(MessageFormatUtil.QUEUE_OFFSET_POSITION, i); + Assert.assertEquals(AppendResult.SUCCESS, flatFile.append(byteBuffer, 1L)); + } + Assert.assertEquals(-1L, flatFile.getMinOffsetFromFileAsync().join().longValue()); + + // append some messages + for (int i = 9; i < 30; i++) { + ByteBuffer byteBuffer = MessageFormatUtilTest.buildMockedMessageBuffer(); + byteBuffer.putLong(MessageFormatUtil.QUEUE_OFFSET_POSITION, i); + Assert.assertEquals(AppendResult.SUCCESS, flatFile.append(byteBuffer, 1L)); + } + + flatFile.commitAsync().join(); + Assert.assertEquals(6L, flatFile.getMinOffsetFromFile()); + Assert.assertEquals(6L, flatFile.getMinOffsetFromFileAsync().join().longValue()); + } +} \ No newline at end of file diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/FlatConsumeQueueFileTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/FlatConsumeQueueFileTest.java new file mode 100644 index 00000000000..8dfc1553d50 --- /dev/null +++ b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/FlatConsumeQueueFileTest.java @@ -0,0 +1,21 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.rocketmq.tieredstore.file; + +public class FlatConsumeQueueFileTest { + +} \ No newline at end of file diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/FlatFileFactoryTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/FlatFileFactoryTest.java new file mode 100644 index 00000000000..bc8ebaf1cb6 --- /dev/null +++ b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/FlatFileFactoryTest.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.rocketmq.tieredstore.file; + +import org.apache.rocketmq.tieredstore.MessageStoreConfig; +import org.apache.rocketmq.tieredstore.metadata.DefaultMetadataStore; +import org.apache.rocketmq.tieredstore.metadata.MetadataStore; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtilTest; +import org.junit.Assert; +import org.junit.Test; + +public class FlatFileFactoryTest { + + @Test + public void factoryTest() { + MessageStoreConfig storeConfig = new MessageStoreConfig(); + storeConfig.setTieredStoreFilePath(MessageStoreUtilTest.getRandomStorePath()); + MetadataStore metadataStore = new DefaultMetadataStore(storeConfig); + FlatFileFactory factory = new FlatFileFactory(metadataStore, storeConfig); + Assert.assertEquals(storeConfig, factory.getStoreConfig()); + Assert.assertEquals(metadataStore, factory.getMetadataStore()); + + FlatAppendFile flatFile1 = factory.createFlatFileForCommitLog("CommitLog"); + FlatAppendFile flatFile2 = factory.createFlatFileForConsumeQueue("ConsumeQueue"); + FlatAppendFile flatFile3 = factory.createFlatFileForIndexFile("IndexFile"); + + Assert.assertNotNull(flatFile1); + Assert.assertNotNull(flatFile2); + Assert.assertNotNull(flatFile3); + + flatFile1.destroy(); + flatFile2.destroy(); + flatFile3.destroy(); + } +} \ No newline at end of file diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/FlatFileStoreTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/FlatFileStoreTest.java new file mode 100644 index 00000000000..79647932dae --- /dev/null +++ b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/FlatFileStoreTest.java @@ -0,0 +1,101 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.rocketmq.tieredstore.file; + +import java.io.IOException; +import java.util.concurrent.CompletableFuture; +import org.apache.rocketmq.common.message.MessageQueue; +import org.apache.rocketmq.tieredstore.MessageStoreConfig; +import org.apache.rocketmq.tieredstore.MessageStoreExecutor; +import org.apache.rocketmq.tieredstore.exception.TieredStoreErrorCode; +import org.apache.rocketmq.tieredstore.exception.TieredStoreException; +import org.apache.rocketmq.tieredstore.metadata.DefaultMetadataStore; +import org.apache.rocketmq.tieredstore.metadata.MetadataStore; +import org.apache.rocketmq.tieredstore.provider.PosixFileSegment; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtilTest; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; + +import static org.mockito.ArgumentMatchers.any; + +public class FlatFileStoreTest { + + private final String storePath = MessageStoreUtilTest.getRandomStorePath(); + private MessageStoreConfig storeConfig; + private MetadataStore metadataStore; + + @Before + public void init() { + storeConfig = new MessageStoreConfig(); + storeConfig.setStorePathRootDir(storePath); + storeConfig.setTieredBackendServiceProvider(PosixFileSegment.class.getName()); + storeConfig.setBrokerName(storeConfig.getBrokerName()); + metadataStore = new DefaultMetadataStore(storeConfig); + } + + @After + public void shutdown() throws IOException { + MessageStoreUtilTest.deleteStoreDirectory(storePath); + } + + @Test + public void flatFileStoreTest() { + // Empty recover + MessageStoreExecutor executor = new MessageStoreExecutor(); + FlatFileStore fileStore = new FlatFileStore(storeConfig, metadataStore, executor); + Assert.assertTrue(fileStore.load()); + + Assert.assertEquals(storeConfig, fileStore.getStoreConfig()); + Assert.assertEquals(metadataStore, fileStore.getMetadataStore()); + Assert.assertNotNull(fileStore.getFlatFileFactory()); + + for (int i = 0; i < 4; i++) { + MessageQueue mq = new MessageQueue("flatFileStoreTest", storeConfig.getBrokerName(), i); + FlatMessageFile flatFile = fileStore.computeIfAbsent(mq); + FlatMessageFile flatFileGet = fileStore.getFlatFile(mq); + Assert.assertEquals(flatFile, flatFileGet); + } + Assert.assertEquals(4, fileStore.deepCopyFlatFileToList().size()); + fileStore.shutdown(); + + fileStore = new FlatFileStore(storeConfig, metadataStore, executor); + Assert.assertTrue(fileStore.load()); + Assert.assertEquals(4, fileStore.deepCopyFlatFileToList().size()); + + for (int i = 1; i < 3; i++) { + MessageQueue mq = new MessageQueue("flatFileStoreTest", storeConfig.getBrokerName(), i); + fileStore.destroyFile(mq); + } + Assert.assertEquals(2, fileStore.deepCopyFlatFileToList().size()); + fileStore.shutdown(); + + FlatFileStore fileStoreSpy = Mockito.spy(fileStore); + Mockito.when(fileStoreSpy.recoverAsync(any())).thenReturn(CompletableFuture.supplyAsync(() -> { + throw new TieredStoreException(TieredStoreErrorCode.ILLEGAL_PARAM, "Test"); + })); + Assert.assertFalse(fileStoreSpy.load()); + + Mockito.reset(fileStoreSpy); + fileStore.load(); + Assert.assertEquals(2, fileStore.deepCopyFlatFileToList().size()); + fileStore.destroy(); + Assert.assertEquals(0, fileStore.deepCopyFlatFileToList().size()); + } +} diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/FlatMessageFileTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/FlatMessageFileTest.java new file mode 100644 index 00000000000..95245aa27ef --- /dev/null +++ b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/FlatMessageFileTest.java @@ -0,0 +1,212 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.rocketmq.tieredstore.file; + +import java.io.IOException; +import java.nio.ByteBuffer; +import org.apache.rocketmq.common.BoundaryType; +import org.apache.rocketmq.common.message.MessageQueue; +import org.apache.rocketmq.store.ConsumeQueue; +import org.apache.rocketmq.store.DispatchRequest; +import org.apache.rocketmq.tieredstore.MessageStoreConfig; +import org.apache.rocketmq.tieredstore.common.AppendResult; +import org.apache.rocketmq.tieredstore.metadata.DefaultMetadataStore; +import org.apache.rocketmq.tieredstore.metadata.MetadataStore; +import org.apache.rocketmq.tieredstore.provider.PosixFileSegment; +import org.apache.rocketmq.tieredstore.util.MessageFormatUtil; +import org.apache.rocketmq.tieredstore.util.MessageFormatUtilTest; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtil; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtilTest; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +public class FlatMessageFileTest { + + private final String storePath = MessageStoreUtilTest.getRandomStorePath(); + private MessageStoreConfig storeConfig; + private MetadataStore metadataStore; + private FlatFileFactory flatFileFactory; + + @Before + public void init() throws ClassNotFoundException, NoSuchMethodException { + storeConfig = new MessageStoreConfig(); + storeConfig.setBrokerName("brokerName"); + storeConfig.setStorePathRootDir(storePath); + storeConfig.setTieredBackendServiceProvider(PosixFileSegment.class.getName()); + storeConfig.setCommitLogRollingInterval(0); + storeConfig.setCommitLogRollingMinimumSize(999); + metadataStore = new DefaultMetadataStore(storeConfig); + flatFileFactory = new FlatFileFactory(metadataStore, storeConfig); + } + + @After + public void shutdown() throws IOException { + MessageStoreUtilTest.deleteStoreDirectory(storePath); + } + + @Test + public void testAppendCommitLog() { + String topic = "CommitLogTest"; + FlatMessageFile flatFile = new FlatMessageFile(flatFileFactory, topic, 0); + Assert.assertTrue(flatFile.getTopicId() >= 0); + Assert.assertEquals(topic, flatFile.getMessageQueue().getTopic()); + Assert.assertEquals(0, flatFile.getMessageQueue().getQueueId()); + Assert.assertFalse(flatFile.isFlatFileInit()); + + flatFile.flushMetadata(); + Assert.assertNotNull(metadataStore.getQueue(flatFile.getMessageQueue())); + + long offset = 100; + flatFile.initOffset(offset); + for (int i = 0; i < 5; i++) { + ByteBuffer buffer = MessageFormatUtilTest.buildMockedMessageBuffer(); + DispatchRequest request = new DispatchRequest( + topic, 0, i, (long) buffer.remaining() * i, buffer.remaining(), 0L); + flatFile.appendCommitLog(buffer); + flatFile.appendConsumeQueue(request); + } + + Assert.assertNotNull(flatFile.getFileLock()); + + long time = MessageFormatUtil.getStoreTimeStamp(MessageFormatUtilTest.buildMockedMessageBuffer()); + Assert.assertEquals(time, flatFile.getMinStoreTimestamp()); + Assert.assertEquals(time, flatFile.getMaxStoreTimestamp()); + + long size = MessageFormatUtilTest.buildMockedMessageBuffer().remaining(); + Assert.assertEquals(-1L, flatFile.getFirstMessageOffset()); + Assert.assertEquals(0L, flatFile.getCommitLogMinOffset()); + Assert.assertEquals(0L, flatFile.getCommitLogCommitOffset()); + Assert.assertEquals(5 * size, flatFile.getCommitLogMaxOffset()); + + Assert.assertEquals(offset, flatFile.getConsumeQueueMinOffset()); + Assert.assertEquals(offset, flatFile.getConsumeQueueCommitOffset()); + Assert.assertEquals(offset + 5L, flatFile.getConsumeQueueMaxOffset()); + + Assert.assertTrue(flatFile.commitAsync().join()); + Assert.assertEquals(6L, flatFile.getFirstMessageOffset()); + Assert.assertEquals(0L, flatFile.getCommitLogMinOffset()); + Assert.assertEquals(5 * size, flatFile.getCommitLogCommitOffset()); + Assert.assertEquals(5 * size, flatFile.getCommitLogMaxOffset()); + + Assert.assertEquals(offset, flatFile.getConsumeQueueMinOffset()); + Assert.assertEquals(offset + 5L, flatFile.getConsumeQueueCommitOffset()); + Assert.assertEquals(offset + 5L, flatFile.getConsumeQueueMaxOffset()); + + // test read + ByteBuffer buffer = flatFile.getMessageAsync(offset).join(); + Assert.assertNotNull(buffer); + Assert.assertEquals(size, buffer.remaining()); + Assert.assertEquals(6L, MessageFormatUtil.getQueueOffset(buffer)); + + flatFile.destroyExpiredFile(0); + flatFile.destroy(); + } + + @Test + public void testEquals() { + String topic = "EqualsTest"; + FlatMessageFile flatFile1 = new FlatMessageFile(flatFileFactory, topic, 0); + FlatMessageFile flatFile2 = new FlatMessageFile(flatFileFactory, topic, 0); + FlatMessageFile flatFile3 = new FlatMessageFile(flatFileFactory, topic, 1); + Assert.assertEquals(flatFile1, flatFile2); + Assert.assertEquals(flatFile1.hashCode(), flatFile2.hashCode()); + Assert.assertNotEquals(flatFile1, flatFile3); + + flatFile1.shutdown(); + flatFile2.shutdown(); + flatFile3.shutdown(); + + flatFile1.destroy(); + flatFile2.destroy(); + flatFile3.destroy(); + } + + @Test + public void testBinarySearchInQueueByTime() { + + // replace provider, need new factory again + storeConfig.setTieredBackendServiceProvider(PosixFileSegment.class.getName()); + flatFileFactory = new FlatFileFactory(metadataStore, storeConfig); + + // inject store time: 0, +100, +100, +100, +200 + MessageQueue mq = new MessageQueue("TopicTest", "BrokerName", 1); + FlatMessageFile flatFile = new FlatMessageFile(flatFileFactory, MessageStoreUtil.toFilePath(mq)); + flatFile.initOffset(50); + long timestamp1 = 1000; + ByteBuffer buffer = MessageFormatUtilTest.buildMockedMessageBuffer(); + buffer.putLong(MessageFormatUtil.QUEUE_OFFSET_POSITION, 50); + buffer.putLong(MessageFormatUtil.STORE_TIMESTAMP_POSITION, timestamp1); + flatFile.appendCommitLog(buffer); + + long timestamp2 = timestamp1 + 100; + buffer = MessageFormatUtilTest.buildMockedMessageBuffer(); + buffer.putLong(MessageFormatUtil.QUEUE_OFFSET_POSITION, 51); + buffer.putLong(MessageFormatUtil.STORE_TIMESTAMP_POSITION, timestamp2); + flatFile.appendCommitLog(buffer); + buffer = MessageFormatUtilTest.buildMockedMessageBuffer(); + buffer.putLong(MessageFormatUtil.QUEUE_OFFSET_POSITION, 52); + buffer.putLong(MessageFormatUtil.STORE_TIMESTAMP_POSITION, timestamp2); + flatFile.appendCommitLog(buffer); + buffer = MessageFormatUtilTest.buildMockedMessageBuffer(); + buffer.putLong(MessageFormatUtil.QUEUE_OFFSET_POSITION, 53); + buffer.putLong(MessageFormatUtil.STORE_TIMESTAMP_POSITION, timestamp2); + flatFile.appendCommitLog(buffer); + + long timestamp3 = timestamp2 + 100; + buffer = MessageFormatUtilTest.buildMockedMessageBuffer(); + buffer.putLong(MessageFormatUtil.QUEUE_OFFSET_POSITION, 54); + buffer.putLong(MessageFormatUtil.STORE_TIMESTAMP_POSITION, timestamp3); + flatFile.appendCommitLog(buffer); + + // append message to consume queue + flatFile.consumeQueue.initOffset(50 * ConsumeQueue.CQ_STORE_UNIT_SIZE); + + for (int i = 0; i < 5; i++) { + AppendResult appendResult = flatFile.appendConsumeQueue(new DispatchRequest( + mq.getTopic(), mq.getQueueId(), MessageFormatUtilTest.MSG_LEN * i, + MessageFormatUtilTest.MSG_LEN, 0, timestamp1, 50 + i, + "", "", 0, 0, null)); + Assert.assertEquals(AppendResult.SUCCESS, appendResult); + } + + // commit message will increase max consume queue offset + Assert.assertTrue(flatFile.commitAsync().join()); + + Assert.assertEquals(54, flatFile.getQueueOffsetByTimeAsync(timestamp3 + 1, BoundaryType.UPPER).join().longValue()); + Assert.assertEquals(54, flatFile.getQueueOffsetByTimeAsync(timestamp3, BoundaryType.UPPER).join().longValue()); + + Assert.assertEquals(50, flatFile.getQueueOffsetByTimeAsync(timestamp1 - 1, BoundaryType.LOWER).join().longValue()); + Assert.assertEquals(50, flatFile.getQueueOffsetByTimeAsync(timestamp1, BoundaryType.LOWER).join().longValue()); + + Assert.assertEquals(51, flatFile.getQueueOffsetByTimeAsync(timestamp1 + 1, BoundaryType.LOWER).join().longValue()); + Assert.assertEquals(51, flatFile.getQueueOffsetByTimeAsync(timestamp2, BoundaryType.LOWER).join().longValue()); + Assert.assertEquals(54, flatFile.getQueueOffsetByTimeAsync(timestamp2 + 1, BoundaryType.LOWER).join().longValue()); + Assert.assertEquals(54, flatFile.getQueueOffsetByTimeAsync(timestamp3, BoundaryType.LOWER).join().longValue()); + + Assert.assertEquals(50, flatFile.getQueueOffsetByTimeAsync(timestamp1, BoundaryType.UPPER).join().longValue()); + Assert.assertEquals(51, flatFile.getQueueOffsetByTimeAsync(timestamp1 + 1, BoundaryType.UPPER).join().longValue()); + Assert.assertEquals(53, flatFile.getQueueOffsetByTimeAsync(timestamp2, BoundaryType.UPPER).join().longValue()); + Assert.assertEquals(54, flatFile.getQueueOffsetByTimeAsync(timestamp2 + 1, BoundaryType.UPPER).join().longValue()); + + Assert.assertEquals(50, flatFile.getQueueOffsetByTimeAsync(timestamp1 - 1, BoundaryType.UPPER).join().longValue()); + Assert.assertEquals(54, flatFile.getQueueOffsetByTimeAsync(timestamp3 + 1, BoundaryType.LOWER).join().longValue()); + + flatFile.destroy(); + } +} diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/TieredCommitLogTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/TieredCommitLogTest.java deleted file mode 100644 index 6693d3cb790..00000000000 --- a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/TieredCommitLogTest.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.rocketmq.tieredstore.file; - -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.List; -import org.apache.rocketmq.common.message.MessageQueue; -import org.apache.rocketmq.tieredstore.TieredStoreTestUtil; -import org.apache.rocketmq.tieredstore.common.AppendResult; -import org.apache.rocketmq.tieredstore.common.FileSegmentType; -import org.apache.rocketmq.tieredstore.common.TieredMessageStoreConfig; -import org.apache.rocketmq.tieredstore.common.TieredStoreExecutor; -import org.apache.rocketmq.tieredstore.metadata.FileSegmentMetadata; -import org.apache.rocketmq.tieredstore.metadata.TieredMetadataStore; -import org.apache.rocketmq.tieredstore.provider.TieredFileSegment; -import org.apache.rocketmq.tieredstore.util.MessageBufferUtil; -import org.apache.rocketmq.tieredstore.util.MessageBufferUtilTest; -import org.apache.rocketmq.tieredstore.util.TieredStoreUtil; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -public class TieredCommitLogTest { - - private final String storePath = TieredStoreTestUtil.getRandomStorePath(); - private MessageQueue mq; - private TieredFileAllocator fileAllocator; - private TieredMetadataStore metadataStore; - - @Before - public void setUp() throws ClassNotFoundException, NoSuchMethodException { - TieredMessageStoreConfig storeConfig = new TieredMessageStoreConfig(); - storeConfig.setBrokerName("brokerName"); - storeConfig.setStorePathRootDir(storePath); - storeConfig.setTieredStoreFilePath(storePath + File.separator); - storeConfig.setTieredBackendServiceProvider("org.apache.rocketmq.tieredstore.provider.posix.PosixFileSegment"); - storeConfig.setCommitLogRollingInterval(0); - storeConfig.setTieredStoreCommitLogMaxSize(1000); - - metadataStore = TieredStoreUtil.getMetadataStore(storeConfig); - fileAllocator = new TieredFileAllocator(storeConfig); - mq = new MessageQueue("CommitLogTest", storeConfig.getBrokerName(), 0); - TieredStoreExecutor.init(); - } - - @After - public void tearDown() throws IOException { - TieredStoreTestUtil.destroyCompositeFlatFileManager(); - TieredStoreTestUtil.destroyMetadataStore(); - TieredStoreTestUtil.destroyTempDir(storePath); - TieredStoreExecutor.shutdown(); - } - - @Test - public void correctMinOffsetTest() { - String filePath = TieredStoreUtil.toPath(mq); - TieredCommitLog tieredCommitLog = new TieredCommitLog(fileAllocator, filePath); - Assert.assertEquals(0L, tieredCommitLog.getMinOffset()); - Assert.assertEquals(0L, tieredCommitLog.getCommitOffset()); - Assert.assertEquals(0L, tieredCommitLog.getDispatchCommitOffset()); - - // append some messages - for (int i = 6; i < 50; i++) { - ByteBuffer byteBuffer = MessageBufferUtilTest.buildMockedMessageBuffer(); - byteBuffer.putLong(MessageBufferUtil.QUEUE_OFFSET_POSITION, i); - Assert.assertEquals(AppendResult.SUCCESS, tieredCommitLog.append(byteBuffer)); - } - - tieredCommitLog.commit(true); - tieredCommitLog.correctMinOffset(); - - // single file store: 1000 / 122 = 8, file count: 44 / 8 = 5 - Assert.assertEquals(6, tieredCommitLog.getFlatFile().getFileSegmentCount()); - - metadataStore.iterateFileSegment(filePath, FileSegmentType.COMMIT_LOG, metadata -> { - if (metadata.getBaseOffset() < 1000) { - metadata.setStatus(FileSegmentMetadata.STATUS_DELETED); - metadataStore.updateFileSegment(metadata); - } - }); - - // manually delete file - List segmentList = tieredCommitLog.getFlatFile().getFileSegmentList(); - segmentList.remove(0).destroyFile(); - segmentList.remove(0).destroyFile(); - - tieredCommitLog.correctMinOffset(); - Assert.assertEquals(4, tieredCommitLog.getFlatFile().getFileSegmentCount()); - Assert.assertEquals(6 + 8 + 8, tieredCommitLog.getMinConsumeQueueOffset()); - } -} \ No newline at end of file diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/TieredFlatFileManagerTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/TieredFlatFileManagerTest.java deleted file mode 100644 index 20fe4dd7022..00000000000 --- a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/TieredFlatFileManagerTest.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.rocketmq.tieredstore.file; - -import java.io.IOException; -import java.util.concurrent.TimeUnit; -import org.apache.rocketmq.common.message.MessageQueue; -import org.apache.rocketmq.tieredstore.TieredStoreTestUtil; -import org.apache.rocketmq.tieredstore.common.TieredMessageStoreConfig; -import org.apache.rocketmq.tieredstore.common.TieredStoreExecutor; -import org.apache.rocketmq.tieredstore.metadata.TieredMetadataStore; -import org.apache.rocketmq.tieredstore.util.TieredStoreUtil; -import org.awaitility.Awaitility; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -public class TieredFlatFileManagerTest { - - private final String storePath = TieredStoreTestUtil.getRandomStorePath(); - private TieredMessageStoreConfig storeConfig; - private MessageQueue mq; - private TieredMetadataStore metadataStore; - - @Before - public void setUp() { - storeConfig = new TieredMessageStoreConfig(); - storeConfig.setStorePathRootDir(storePath); - storeConfig.setTieredBackendServiceProvider("org.apache.rocketmq.tieredstore.provider.memory.MemoryFileSegment"); - storeConfig.setBrokerName(storeConfig.getBrokerName()); - mq = new MessageQueue("TieredFlatFileManagerTest", storeConfig.getBrokerName(), 0); - metadataStore = TieredStoreUtil.getMetadataStore(storeConfig); - TieredStoreExecutor.init(); - } - - @After - public void tearDown() throws IOException { - TieredStoreTestUtil.destroyCompositeFlatFileManager(); - TieredStoreTestUtil.destroyMetadataStore(); - TieredStoreTestUtil.destroyTempDir(storePath); - TieredStoreExecutor.shutdown(); - } - - @Test - public void testLoadAndDestroy() { - metadataStore.addTopic(mq.getTopic(), 0); - metadataStore.addQueue(mq, 100); - MessageQueue mq1 = new MessageQueue(mq.getTopic(), mq.getBrokerName(), 1); - metadataStore.addQueue(mq1, 200); - TieredFlatFileManager flatFileManager = TieredFlatFileManager.getInstance(storeConfig); - boolean load = flatFileManager.load(); - Assert.assertTrue(load); - - Awaitility.await() - .atMost(3, TimeUnit.SECONDS) - .until(() -> flatFileManager.deepCopyFlatFileToList().size() == 2); - - CompositeFlatFile flatFile = flatFileManager.getFlatFile(mq); - Assert.assertNotNull(flatFile); - Assert.assertEquals(-1L, flatFile.getDispatchOffset()); - flatFile.initOffset(100L); - Assert.assertEquals(100L, flatFile.getDispatchOffset()); - flatFile.initOffset(200L); - Assert.assertEquals(100L, flatFile.getDispatchOffset()); - - CompositeFlatFile flatFile1 = flatFileManager.getFlatFile(mq1); - Assert.assertNotNull(flatFile1); - flatFile1.initOffset(200L); - Assert.assertEquals(200, flatFile1.getDispatchOffset()); - - flatFileManager.destroyCompositeFile(mq); - Assert.assertTrue(flatFile.isClosed()); - Assert.assertNull(flatFileManager.getFlatFile(mq)); - Assert.assertNull(metadataStore.getQueue(mq)); - - flatFileManager.destroy(); - Assert.assertTrue(flatFile1.isClosed()); - Assert.assertNull(flatFileManager.getFlatFile(mq1)); - Assert.assertNull(metadataStore.getQueue(mq1)); - } -} diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/TieredFlatFileTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/TieredFlatFileTest.java deleted file mode 100644 index 7e2fbf20136..00000000000 --- a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/TieredFlatFileTest.java +++ /dev/null @@ -1,342 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.rocketmq.tieredstore.file; - -import org.apache.rocketmq.common.BoundaryType; -import org.apache.rocketmq.common.message.MessageQueue; -import org.apache.rocketmq.tieredstore.TieredStoreTestUtil; -import org.apache.rocketmq.tieredstore.common.FileSegmentType; -import org.apache.rocketmq.tieredstore.common.TieredMessageStoreConfig; -import org.apache.rocketmq.tieredstore.common.TieredStoreExecutor; -import org.apache.rocketmq.tieredstore.metadata.FileSegmentMetadata; -import org.apache.rocketmq.tieredstore.metadata.TieredMetadataStore; -import org.apache.rocketmq.tieredstore.provider.TieredFileSegment; -import org.apache.rocketmq.tieredstore.provider.memory.MemoryFileSegment; -import org.apache.rocketmq.tieredstore.util.TieredStoreUtil; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; - -public class TieredFlatFileTest { - - private final String storePath = TieredStoreTestUtil.getRandomStorePath(); - private MessageQueue queue; - private TieredMessageStoreConfig storeConfig; - private TieredFileAllocator fileQueueFactory; - - @Before - public void setUp() throws ClassNotFoundException, NoSuchMethodException { - storeConfig = new TieredMessageStoreConfig(); - storeConfig.setBrokerName("brokerName"); - storeConfig.setStorePathRootDir(storePath); - storeConfig.setTieredBackendServiceProvider("org.apache.rocketmq.tieredstore.provider.memory.MemoryFileSegment"); - queue = new MessageQueue("TieredFlatFileTest", storeConfig.getBrokerName(), 0); - fileQueueFactory = new TieredFileAllocator(storeConfig); - } - - @After - public void tearDown() throws IOException { - TieredStoreTestUtil.destroyMetadataStore(); - TieredStoreTestUtil.destroyTempDir(storePath); - TieredStoreExecutor.shutdown(); - } - - private List getSegmentMetadataList(TieredMetadataStore metadataStore) { - List result = new ArrayList<>(); - metadataStore.iterateFileSegment(result::add); - return result; - } - - @Test - public void testFileSegment() { - MemoryFileSegment fileSegment = new MemoryFileSegment( - FileSegmentType.COMMIT_LOG, queue, 100, storeConfig); - fileSegment.initPosition(fileSegment.getSize()); - - String filePath = TieredStoreUtil.toPath(queue); - TieredFlatFile fileQueue = fileQueueFactory.createFlatFileForCommitLog(filePath); - fileQueue.updateFileSegment(fileSegment); - - TieredMetadataStore metadataStore = TieredStoreUtil.getMetadataStore(storeConfig); - FileSegmentMetadata metadata = - metadataStore.getFileSegment(filePath, FileSegmentType.COMMIT_LOG, 100); - Assert.assertEquals(fileSegment.getPath(), metadata.getPath()); - Assert.assertEquals(FileSegmentType.COMMIT_LOG, FileSegmentType.valueOf(metadata.getType())); - Assert.assertEquals(100, metadata.getBaseOffset()); - Assert.assertEquals(0, metadata.getSealTimestamp()); - - fileSegment.setFull(); - fileQueue.updateFileSegment(fileSegment); - metadata = metadataStore.getFileSegment(fileSegment.getPath(), FileSegmentType.COMMIT_LOG, 100); - Assert.assertEquals(1000, metadata.getSize()); - Assert.assertEquals(0, metadata.getSealTimestamp()); - - fileSegment.commit(); - fileQueue.updateFileSegment(fileSegment); - metadata = metadataStore.getFileSegment(fileSegment.getPath(), FileSegmentType.COMMIT_LOG, 100); - Assert.assertEquals(1000 + TieredCommitLog.CODA_SIZE, metadata.getSize()); - Assert.assertTrue(metadata.getSealTimestamp() > 0); - - MemoryFileSegment fileSegment2 = new MemoryFileSegment(FileSegmentType.COMMIT_LOG, - queue, 1100, storeConfig); - fileQueue.updateFileSegment(fileSegment2); - List list = getSegmentMetadataList(metadataStore); - Assert.assertEquals(2, list.size()); - Assert.assertEquals(100, list.get(0).getBaseOffset()); - Assert.assertEquals(1100, list.get(1).getBaseOffset()); - - Assert.assertNotNull(metadataStore.getFileSegment( - fileSegment.getPath(), fileSegment.getFileType(), fileSegment.getBaseOffset())); - metadataStore.deleteFileSegment(fileSegment.getPath(), fileSegment.getFileType()); - Assert.assertEquals(0L, getSegmentMetadataList(metadataStore).size()); - } - - /** - * Test whether the file is continuous after switching to write. - */ - @Test - public void testGetFileSegment() { - TieredFlatFile fileQueue = fileQueueFactory.createFlatFileForCommitLog(TieredStoreUtil.toPath(queue)); - fileQueue.setBaseOffset(0); - TieredFileSegment segment1 = fileQueue.getFileToWrite(); - segment1.initPosition(1000); - segment1.append(ByteBuffer.allocate(100), 0); - segment1.setFull(); - segment1.commit(); - - TieredFileSegment segment2 = fileQueue.getFileToWrite(); - Assert.assertNotSame(segment1, segment2); - Assert.assertEquals(1000 + 100 + TieredCommitLog.CODA_SIZE, segment1.getMaxOffset()); - Assert.assertEquals(1000 + 100 + TieredCommitLog.CODA_SIZE, segment2.getBaseOffset()); - - Assert.assertSame(fileQueue.getSegmentIndexByOffset(1000), 0); - Assert.assertSame(fileQueue.getSegmentIndexByOffset(1050), 0); - Assert.assertSame(fileQueue.getSegmentIndexByOffset(1100 + TieredCommitLog.CODA_SIZE), 1); - Assert.assertSame(fileQueue.getSegmentIndexByOffset(1150), -1); - } - - @Test - public void testAppendAndRead() { - TieredFlatFile fileQueue = fileQueueFactory.createFlatFileForConsumeQueue(TieredStoreUtil.toPath(queue)); - fileQueue.setBaseOffset(0); - Assert.assertEquals(0, fileQueue.getMinOffset()); - Assert.assertEquals(0, fileQueue.getDispatchCommitOffset()); - - TieredFileSegment segment1 = fileQueue.getFileToWrite(); - segment1.initPosition(segment1.getSize()); - Assert.assertEquals(0, segment1.getBaseOffset()); - Assert.assertEquals(1000, fileQueue.getCommitOffset()); - Assert.assertEquals(1000, fileQueue.getMaxOffset()); - - ByteBuffer buffer = ByteBuffer.allocate(100); - long currentTimeMillis = System.currentTimeMillis(); - buffer.putLong(currentTimeMillis); - buffer.rewind(); - fileQueue.append(buffer); - Assert.assertEquals(1100, segment1.getMaxOffset()); - - segment1.setFull(); - fileQueue.commit(true); - Assert.assertEquals(1100, segment1.getCommitOffset()); - - ByteBuffer readBuffer = fileQueue.readAsync(1000, 8).join(); - Assert.assertEquals(currentTimeMillis, readBuffer.getLong()); - - TieredFileSegment segment2 = fileQueue.getFileToWrite(); - Assert.assertNotEquals(segment1, segment2); - segment2.initPosition(segment2.getSize()); - buffer.rewind(); - fileQueue.append(buffer); - fileQueue.commit(true); - readBuffer = fileQueue.readAsync(1000, 1200).join(); - Assert.assertEquals(currentTimeMillis, readBuffer.getLong(1100)); - } - - @Test - public void testLoadFromMetadata() { - String filePath = TieredStoreUtil.toPath(queue); - TieredFlatFile fileQueue = fileQueueFactory.createFlatFileForCommitLog(filePath); - - MemoryFileSegment fileSegment1 = - new MemoryFileSegment(FileSegmentType.COMMIT_LOG, queue, 100, storeConfig); - fileSegment1.initPosition(fileSegment1.getSize()); - fileSegment1.setFull(); - - fileQueue.updateFileSegment(fileSegment1); - fileQueue.updateFileSegment(fileSegment1); - - MemoryFileSegment fileSegment2 = - new MemoryFileSegment(FileSegmentType.COMMIT_LOG, queue, 1100, storeConfig); - fileQueue.updateFileSegment(fileSegment2); - - // Set instance to null and reload from disk - TieredStoreUtil.metadataStoreInstance = null; - fileQueue = fileQueueFactory.createFlatFileForCommitLog(filePath); - Assert.assertEquals(2, fileQueue.getNeedCommitFileSegmentList().size()); - TieredFileSegment file1 = fileQueue.getFileByIndex(0); - Assert.assertNotNull(file1); - Assert.assertEquals(100, file1.getBaseOffset()); - Assert.assertFalse(file1.isFull()); - - TieredFileSegment file2 = fileQueue.getFileByIndex(1); - Assert.assertNotNull(file2); - Assert.assertEquals(1100, file2.getBaseOffset()); - Assert.assertFalse(file2.isFull()); - - TieredFileSegment file3 = fileQueue.getFileByIndex(2); - Assert.assertNull(file3); - } - - @Test - public void testCheckFileSize() { - String filePath = TieredStoreUtil.toPath(queue); - TieredFlatFile tieredFlatFile = fileQueueFactory.createFlatFileForCommitLog(filePath); - - TieredFileSegment fileSegment1 = new MemoryFileSegment( - FileSegmentType.CONSUME_QUEUE, queue, 100, storeConfig); - fileSegment1.initPosition(fileSegment1.getSize() - 100); - fileSegment1.setFull(); - tieredFlatFile.updateFileSegment(fileSegment1); - tieredFlatFile.updateFileSegment(fileSegment1); - - TieredFileSegment fileSegment2 = new MemoryFileSegment( - FileSegmentType.CONSUME_QUEUE, queue, 1100, storeConfig); - fileSegment2.initPosition(fileSegment2.getSize() - 100); - tieredFlatFile.updateFileSegment(fileSegment2); - tieredFlatFile.updateFileSegment(fileSegment2); - - TieredFlatFile fileQueue = fileQueueFactory.createFlatFileForConsumeQueue(filePath); - Assert.assertEquals(1, fileQueue.getNeedCommitFileSegmentList().size()); - - fileSegment1 = fileQueue.getFileByIndex(0); - Assert.assertTrue(fileSegment1.isFull()); - Assert.assertEquals(fileSegment1.getSize() + 100, fileSegment1.getCommitOffset()); - - fileSegment2 = fileQueue.getFileByIndex(1); - Assert.assertEquals(1000, fileSegment2.getCommitPosition()); - - fileSegment2.setFull(); - fileQueue.commit(true); - Assert.assertEquals(0, fileQueue.getNeedCommitFileSegmentList().size()); - - fileQueue.getFileToWrite(); - Assert.assertEquals(1, fileQueue.getNeedCommitFileSegmentList().size()); - } - - @Test - public void testCleanExpiredFile() { - String filePath = TieredStoreUtil.toPath(queue); - TieredFlatFile tieredFlatFile = fileQueueFactory.createFlatFileForCommitLog(filePath); - - TieredFileSegment fileSegment1 = new MemoryFileSegment( - FileSegmentType.CONSUME_QUEUE, queue, 100, storeConfig); - fileSegment1.initPosition(fileSegment1.getSize() - 100); - fileSegment1.setFull(false); - fileSegment1.setMaxTimestamp(System.currentTimeMillis() - 1); - tieredFlatFile.updateFileSegment(fileSegment1); - tieredFlatFile.updateFileSegment(fileSegment1); - - long file1CreateTimeStamp = System.currentTimeMillis(); - - TieredFileSegment fileSegment2 = new MemoryFileSegment( - FileSegmentType.CONSUME_QUEUE, queue, 1100, storeConfig); - fileSegment2.initPosition(fileSegment2.getSize()); - fileSegment2.setMaxTimestamp(System.currentTimeMillis() + 1); - tieredFlatFile.updateFileSegment(fileSegment2); - tieredFlatFile.updateFileSegment(fileSegment2); - - TieredFlatFile fileQueue = fileQueueFactory.createFlatFileForConsumeQueue(filePath); - Assert.assertEquals(2, fileQueue.getFileSegmentCount()); - - TieredMetadataStore metadataStore = TieredStoreUtil.getMetadataStore(storeConfig); - fileQueue.cleanExpiredFile(file1CreateTimeStamp); - fileQueue.destroyExpiredFile(); - Assert.assertEquals(1, fileQueue.getFileSegmentCount()); - Assert.assertNull(getMetadata(metadataStore, fileSegment1)); - Assert.assertNotNull(getMetadata(metadataStore, fileSegment2)); - - fileQueue.cleanExpiredFile(Long.MAX_VALUE); - fileQueue.destroyExpiredFile(); - Assert.assertEquals(0, fileQueue.getFileSegmentCount()); - Assert.assertNull(getMetadata(metadataStore, fileSegment1)); - Assert.assertNull(getMetadata(metadataStore, fileSegment2)); - } - - private FileSegmentMetadata getMetadata(TieredMetadataStore metadataStore, TieredFileSegment fileSegment) { - return metadataStore.getFileSegment( - fileSegment.getPath(), fileSegment.getFileType(), fileSegment.getBaseOffset()); - } - - @Test - public void testRollingNewFile() { - String filePath = TieredStoreUtil.toPath(queue); - TieredFlatFile tieredFlatFile = fileQueueFactory.createFlatFileForCommitLog(filePath); - - TieredFileSegment fileSegment1 = new MemoryFileSegment( - FileSegmentType.CONSUME_QUEUE, queue, 100, storeConfig); - fileSegment1.initPosition(fileSegment1.getSize() - 100); - tieredFlatFile.updateFileSegment(fileSegment1); - - TieredFlatFile fileQueue = fileQueueFactory.createFlatFileForConsumeQueue(filePath); - Assert.assertEquals(1, fileQueue.getFileSegmentCount()); - - fileQueue.rollingNewFile(); - Assert.assertEquals(2, fileQueue.getFileSegmentCount()); - } - - @Test - public void testGetFileByTime() { - String filePath = TieredStoreUtil.toPath(queue); - TieredFlatFile tieredFlatFile = fileQueueFactory.createFlatFileForCommitLog(filePath); - TieredFileSegment fileSegment1 = new MemoryFileSegment(FileSegmentType.CONSUME_QUEUE, queue, 1100, storeConfig); - fileSegment1.setMinTimestamp(100); - fileSegment1.setMaxTimestamp(200); - - TieredFileSegment fileSegment2 = new MemoryFileSegment(FileSegmentType.CONSUME_QUEUE, queue, 1100, storeConfig); - fileSegment2.setMinTimestamp(200); - fileSegment2.setMaxTimestamp(300); - - tieredFlatFile.getFileSegmentList().add(fileSegment1); - tieredFlatFile.getFileSegmentList().add(fileSegment2); - - TieredFileSegment segmentUpper = tieredFlatFile.getFileByTime(400, BoundaryType.UPPER); - Assert.assertEquals(fileSegment2, segmentUpper); - - TieredFileSegment segmentLower = tieredFlatFile.getFileByTime(400, BoundaryType.LOWER); - Assert.assertEquals(fileSegment2, segmentLower); - - - TieredFileSegment segmentUpper2 = tieredFlatFile.getFileByTime(0, BoundaryType.UPPER); - Assert.assertEquals(fileSegment1, segmentUpper2); - - TieredFileSegment segmentLower2 = tieredFlatFile.getFileByTime(0, BoundaryType.LOWER); - Assert.assertEquals(fileSegment1, segmentLower2); - - - TieredFileSegment segmentUpper3 = tieredFlatFile.getFileByTime(200, BoundaryType.UPPER); - Assert.assertEquals(fileSegment1, segmentUpper3); - - TieredFileSegment segmentLower3 = tieredFlatFile.getFileByTime(200, BoundaryType.LOWER); - Assert.assertEquals(fileSegment2, segmentLower3); - } -} diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/index/IndexStoreFileTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/index/IndexStoreFileTest.java index b408a7c3cf6..48bf9ba4c74 100644 --- a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/index/IndexStoreFileTest.java +++ b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/index/IndexStoreFileTest.java @@ -28,13 +28,12 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import org.apache.rocketmq.common.ThreadFactoryImpl; -import org.apache.rocketmq.tieredstore.TieredStoreTestUtil; +import org.apache.rocketmq.tieredstore.MessageStoreConfig; import org.apache.rocketmq.tieredstore.common.AppendResult; import org.apache.rocketmq.tieredstore.common.FileSegmentType; -import org.apache.rocketmq.tieredstore.common.TieredMessageStoreConfig; -import org.apache.rocketmq.tieredstore.common.TieredStoreExecutor; -import org.apache.rocketmq.tieredstore.provider.TieredFileSegment; -import org.apache.rocketmq.tieredstore.provider.posix.PosixFileSegment; +import org.apache.rocketmq.tieredstore.provider.FileSegment; +import org.apache.rocketmq.tieredstore.provider.PosixFileSegment; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtilTest; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -51,20 +50,19 @@ public class IndexStoreFileTest { private static final Set KEY_SET = Collections.singleton(KEY); private String filePath; - private TieredMessageStoreConfig storeConfig; + private MessageStoreConfig storeConfig; private IndexStoreFile indexStoreFile; @Before public void init() throws IOException { - TieredStoreExecutor.init(); filePath = UUID.randomUUID().toString().replace("-", "").substring(0, 8); String directory = Paths.get(System.getProperty("user.home"), "store_test", filePath).toString(); - storeConfig = new TieredMessageStoreConfig(); + storeConfig = new MessageStoreConfig(); storeConfig.setStorePathRootDir(directory); storeConfig.setTieredStoreFilePath(directory); storeConfig.setTieredStoreIndexFileMaxHashSlotNum(5); storeConfig.setTieredStoreIndexFileMaxIndexNum(20); - storeConfig.setTieredBackendServiceProvider("org.apache.rocketmq.tieredstore.provider.posix.PosixFileSegment"); + storeConfig.setTieredBackendServiceProvider("org.apache.rocketmq.tieredstore.provider.PosixFileSegment"); indexStoreFile = new IndexStoreFile(storeConfig, System.currentTimeMillis()); } @@ -74,10 +72,7 @@ public void shutdown() { this.indexStoreFile.shutdown(); this.indexStoreFile.destroy(); } - TieredStoreTestUtil.destroyMetadataStore(); - TieredStoreTestUtil.destroyTempDir(storeConfig.getStorePathRootDir()); - TieredStoreTestUtil.destroyTempDir(storeConfig.getTieredStoreFilePath()); - TieredStoreExecutor.shutdown(); + MessageStoreUtilTest.deleteStoreDirectory(storeConfig.getTieredStoreFilePath()); } @Test @@ -215,7 +210,7 @@ public void recoverFileTest() throws IOException { } @Test - public void doCompactionTest() throws Exception { + public void doCompactionTest() { long timestamp = indexStoreFile.getTimestamp(); for (int i = 0; i < 10; i++) { Assert.assertEquals(AppendResult.SUCCESS, indexStoreFile.putKey( @@ -223,10 +218,10 @@ public void doCompactionTest() throws Exception { } ByteBuffer byteBuffer = indexStoreFile.doCompaction(); - TieredFileSegment fileSegment = new PosixFileSegment( + FileSegment fileSegment = new PosixFileSegment( storeConfig, FileSegmentType.INDEX, filePath, 0L); fileSegment.append(byteBuffer, timestamp); - fileSegment.commit(); + fileSegment.commitAsync().join(); Assert.assertEquals(byteBuffer.limit(), fileSegment.getSize()); fileSegment.destroyFile(); } @@ -256,10 +251,10 @@ public void queryAsyncFromSegmentFileTest() throws ExecutionException, Interrupt } ByteBuffer byteBuffer = indexStoreFile.doCompaction(); - TieredFileSegment fileSegment = new PosixFileSegment( + FileSegment fileSegment = new PosixFileSegment( storeConfig, FileSegmentType.INDEX, filePath, 0L); fileSegment.append(byteBuffer, timestamp); - fileSegment.commit(); + fileSegment.commitAsync().join(); Assert.assertEquals(byteBuffer.limit(), fileSegment.getSize()); indexStoreFile.destroy(); diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/index/IndexStoreServiceBenchTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/index/IndexStoreServiceBenchTest.java index 57d00eefe15..fcb28402ea9 100644 --- a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/index/IndexStoreServiceBenchTest.java +++ b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/index/IndexStoreServiceBenchTest.java @@ -27,13 +27,12 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.LongAdder; import org.apache.rocketmq.common.UtilAll; -import org.apache.rocketmq.logging.org.slf4j.Logger; -import org.apache.rocketmq.logging.org.slf4j.LoggerFactory; +import org.apache.rocketmq.tieredstore.MessageStoreConfig; import org.apache.rocketmq.tieredstore.common.AppendResult; -import org.apache.rocketmq.tieredstore.common.TieredMessageStoreConfig; -import org.apache.rocketmq.tieredstore.common.TieredStoreExecutor; -import org.apache.rocketmq.tieredstore.file.TieredFileAllocator; -import org.apache.rocketmq.tieredstore.util.TieredStoreUtil; +import org.apache.rocketmq.tieredstore.file.FlatFileFactory; +import org.apache.rocketmq.tieredstore.metadata.DefaultMetadataStore; +import org.apache.rocketmq.tieredstore.metadata.MetadataStore; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtil; import org.junit.Assert; import org.junit.Ignore; import org.openjdk.jmh.annotations.BenchmarkMode; @@ -51,15 +50,17 @@ import org.openjdk.jmh.runner.Runner; import org.openjdk.jmh.runner.options.Options; import org.openjdk.jmh.runner.options.OptionsBuilder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @Ignore @State(Scope.Benchmark) @Fork(value = 1, jvmArgs = {"-Djava.net.preferIPv4Stack=true", "-Djmh.rmi.port=1099"}) public class IndexStoreServiceBenchTest { - private static final Logger log = LoggerFactory.getLogger(TieredStoreUtil.TIERED_STORE_LOGGER_NAME); + private static final Logger log = LoggerFactory.getLogger(MessageStoreUtil.TIERED_STORE_LOGGER_NAME); private static final String TOPIC_NAME = "TopicTest"; - private TieredMessageStoreConfig storeConfig; + private MessageStoreConfig storeConfig; private IndexStoreService indexStoreService; private final LongAdder failureCount = new LongAdder(); @@ -68,25 +69,23 @@ public void init() throws ClassNotFoundException, NoSuchMethodException { String storePath = Paths.get(System.getProperty("user.home"), "store_test", "index").toString(); UtilAll.deleteFile(new File(storePath)); UtilAll.deleteFile(new File("./e96d41b2_IndexService")); - storeConfig = new TieredMessageStoreConfig(); + storeConfig = new MessageStoreConfig(); storeConfig.setBrokerClusterName("IndexService"); storeConfig.setBrokerName("IndexServiceBroker"); storeConfig.setStorePathRootDir(storePath); - storeConfig.setTieredBackendServiceProvider("org.apache.rocketmq.tieredstore.provider.posix.PosixFileSegment"); + storeConfig.setTieredBackendServiceProvider("org.apache.rocketmq.tieredstore.provider.PosixFileSegment"); storeConfig.setTieredStoreIndexFileMaxHashSlotNum(500 * 1000); storeConfig.setTieredStoreIndexFileMaxIndexNum(2000 * 1000); - TieredStoreUtil.getMetadataStore(storeConfig); - TieredStoreExecutor.init(); - TieredFileAllocator tieredFileAllocator = new TieredFileAllocator(storeConfig); - indexStoreService = new IndexStoreService(tieredFileAllocator, storePath); + MetadataStore metadataStore = new DefaultMetadataStore(storeConfig); + FlatFileFactory flatFileFactory = new FlatFileFactory(metadataStore, storeConfig); + indexStoreService = new IndexStoreService(flatFileFactory, storePath); indexStoreService.start(); } - @TearDown + @TearDown() public void shutdown() throws IOException { indexStoreService.shutdown(); indexStoreService.destroy(); - TieredStoreExecutor.shutdown(); } //@Benchmark diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/index/IndexStoreServiceTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/index/IndexStoreServiceTest.java index 20b4acbfa11..ec55a028bb9 100644 --- a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/index/IndexStoreServiceTest.java +++ b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/index/IndexStoreServiceTest.java @@ -34,25 +34,26 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import org.apache.rocketmq.common.ThreadFactoryImpl; -import org.apache.rocketmq.logging.org.slf4j.Logger; -import org.apache.rocketmq.logging.org.slf4j.LoggerFactory; import org.apache.rocketmq.store.logfile.DefaultMappedFile; -import org.apache.rocketmq.tieredstore.TieredStoreTestUtil; +import org.apache.rocketmq.tieredstore.MessageStoreConfig; import org.apache.rocketmq.tieredstore.common.AppendResult; -import org.apache.rocketmq.tieredstore.common.TieredMessageStoreConfig; -import org.apache.rocketmq.tieredstore.common.TieredStoreExecutor; -import org.apache.rocketmq.tieredstore.file.TieredFileAllocator; -import org.apache.rocketmq.tieredstore.util.TieredStoreUtil; +import org.apache.rocketmq.tieredstore.file.FlatFileFactory; +import org.apache.rocketmq.tieredstore.metadata.DefaultMetadataStore; +import org.apache.rocketmq.tieredstore.metadata.MetadataStore; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtil; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtilTest; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static org.awaitility.Awaitility.await; public class IndexStoreServiceTest { - private static final Logger log = LoggerFactory.getLogger(TieredStoreUtil.TIERED_STORE_LOGGER_NAME); + private static final Logger log = LoggerFactory.getLogger(MessageStoreUtil.TIERED_STORE_LOGGER_NAME); private static final String TOPIC_NAME = "TopicTest"; private static final int TOPIC_ID = 123; @@ -62,22 +63,22 @@ public class IndexStoreServiceTest { private static final Set KEY_SET = Collections.singleton("MessageKey"); private String filePath; - private TieredMessageStoreConfig storeConfig; - private TieredFileAllocator fileAllocator; + private MessageStoreConfig storeConfig; + private FlatFileFactory fileAllocator; private IndexStoreService indexService; @Before public void init() throws IOException, ClassNotFoundException, NoSuchMethodException { - TieredStoreExecutor.init(); filePath = UUID.randomUUID().toString().replace("-", "").substring(0, 8); String directory = Paths.get(System.getProperty("user.home"), "store_test", filePath).toString(); - storeConfig = new TieredMessageStoreConfig(); + storeConfig = new MessageStoreConfig(); storeConfig.setStorePathRootDir(directory); storeConfig.setTieredStoreFilePath(directory); storeConfig.setTieredStoreIndexFileMaxHashSlotNum(5); storeConfig.setTieredStoreIndexFileMaxIndexNum(20); - storeConfig.setTieredBackendServiceProvider("org.apache.rocketmq.tieredstore.provider.posix.PosixFileSegment"); - fileAllocator = new TieredFileAllocator(storeConfig); + storeConfig.setTieredBackendServiceProvider("org.apache.rocketmq.tieredstore.provider.PosixFileSegment"); + MetadataStore metadataStore = new DefaultMetadataStore(storeConfig); + fileAllocator = new FlatFileFactory(metadataStore, storeConfig); } @After @@ -86,10 +87,7 @@ public void shutdown() { indexService.shutdown(); indexService.destroy(); } - TieredStoreTestUtil.destroyMetadataStore(); - TieredStoreTestUtil.destroyTempDir(storeConfig.getStorePathRootDir()); - TieredStoreTestUtil.destroyTempDir(storeConfig.getTieredStoreFilePath()); - TieredStoreExecutor.shutdown(); + MessageStoreUtilTest.deleteStoreDirectory(storeConfig.getTieredStoreFilePath()); } @Test diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/metadata/TieredMetadataManagerTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/metadata/DefaultMetadataStoreTest.java similarity index 77% rename from tieredstore/src/test/java/org/apache/rocketmq/tieredstore/metadata/TieredMetadataManagerTest.java rename to tieredstore/src/test/java/org/apache/rocketmq/tieredstore/metadata/DefaultMetadataStoreTest.java index f7d2c352a2b..7a33903d84f 100644 --- a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/metadata/TieredMetadataManagerTest.java +++ b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/metadata/DefaultMetadataStoreTest.java @@ -24,39 +24,42 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import org.apache.rocketmq.common.message.MessageQueue; -import org.apache.rocketmq.tieredstore.TieredStoreTestUtil; +import org.apache.rocketmq.tieredstore.MessageStoreConfig; import org.apache.rocketmq.tieredstore.common.FileSegmentType; -import org.apache.rocketmq.tieredstore.common.TieredMessageStoreConfig; -import org.apache.rocketmq.tieredstore.util.TieredStoreUtil; +import org.apache.rocketmq.tieredstore.metadata.entity.FileSegmentMetadata; +import org.apache.rocketmq.tieredstore.metadata.entity.QueueMetadata; +import org.apache.rocketmq.tieredstore.metadata.entity.TopicMetadata; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtil; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtilTest; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; -public class TieredMetadataManagerTest { +public class DefaultMetadataStoreTest { - private final String storePath = TieredStoreTestUtil.getRandomStorePath(); + private final String storePath = MessageStoreUtilTest.getRandomStorePath(); private MessageQueue mq0; private MessageQueue mq1; private MessageQueue mq2; - private TieredMessageStoreConfig storeConfig; - private TieredMetadataStore metadataStore; + private MessageStoreConfig storeConfig; + private MetadataStore metadataStore; @Before - public void setUp() { - storeConfig = new TieredMessageStoreConfig(); + public void init() { + storeConfig = new MessageStoreConfig(); storeConfig.setBrokerName("brokerName"); storeConfig.setStorePathRootDir(storePath); mq0 = new MessageQueue("MetadataStoreTest0", storeConfig.getBrokerName(), 0); mq1 = new MessageQueue("MetadataStoreTest1", storeConfig.getBrokerName(), 0); mq2 = new MessageQueue("MetadataStoreTest1", storeConfig.getBrokerName(), 1); - metadataStore = new TieredMetadataManager(storeConfig); + metadataStore = new DefaultMetadataStore(storeConfig); } @After - public void tearDown() throws IOException { - TieredStoreTestUtil.destroyMetadataStore(); - TieredStoreTestUtil.destroyTempDir(storePath); + public void shutdown() throws IOException { + metadataStore.destroy(); + MessageStoreUtilTest.deleteStoreDirectory(storePath); } @Test @@ -142,13 +145,13 @@ public void testTopic() { Assert.assertNotNull(metadataStore.getTopic(topic1)); } - private long countFileSegment(TieredMetadataStore metadataStore) { + private long countFileSegment(MetadataStore metadataStore) { AtomicLong count = new AtomicLong(); metadataStore.iterateFileSegment(segmentMetadata -> count.incrementAndGet()); return count.get(); } - private long countFileSegment(TieredMetadataStore metadataStore, String filePath) { + private long countFileSegment(MetadataStore metadataStore, String filePath) { AtomicLong count = new AtomicLong(); metadataStore.iterateFileSegment( filePath, FileSegmentType.COMMIT_LOG, segmentMetadata -> count.incrementAndGet()); @@ -157,14 +160,14 @@ private long countFileSegment(TieredMetadataStore metadataStore, String filePath @Test public void testFileSegment() { - String filePath = TieredStoreUtil.toPath(mq0); + String filePath = MessageStoreUtil.toFilePath(mq0); FileSegmentMetadata segmentMetadata1 = new FileSegmentMetadata( - filePath, 0L, FileSegmentType.COMMIT_LOG.getType()); + filePath, 0L, FileSegmentType.COMMIT_LOG.getCode()); metadataStore.updateFileSegment(segmentMetadata1); Assert.assertEquals(1L, countFileSegment(metadataStore)); FileSegmentMetadata segmentMetadata2 = new FileSegmentMetadata( - filePath, 100, FileSegmentType.COMMIT_LOG.getType()); + filePath, 100, FileSegmentType.COMMIT_LOG.getCode()); metadataStore.updateFileSegment(segmentMetadata2); Assert.assertEquals(2L, countFileSegment(metadataStore)); @@ -186,15 +189,15 @@ public void testFileSegment() { @Test public void testFileSegmentDelete() { - String filePath0 = TieredStoreUtil.toPath(mq0); - String filePath1 = TieredStoreUtil.toPath(mq1); + String filePath0 = MessageStoreUtil.toFilePath(mq0); + String filePath1 = MessageStoreUtil.toFilePath(mq1); for (int i = 0; i < 10; i++) { FileSegmentMetadata segmentMetadata = new FileSegmentMetadata( - filePath0, i * 1000L * 1000L, FileSegmentType.COMMIT_LOG.getType()); + filePath0, i * 1000L * 1000L, FileSegmentType.COMMIT_LOG.getCode()); metadataStore.updateFileSegment(segmentMetadata); segmentMetadata = new FileSegmentMetadata( - filePath1, i * 1000L * 1000L, FileSegmentType.COMMIT_LOG.getType()); + filePath1, i * 1000L * 1000L, FileSegmentType.COMMIT_LOG.getCode()); metadataStore.updateFileSegment(segmentMetadata); } Assert.assertEquals(20, countFileSegment(metadataStore)); @@ -213,52 +216,52 @@ public void testFileSegmentDelete() { @Test public void testReload() { - TieredMetadataManager metadataManager = (TieredMetadataManager) metadataStore; - metadataManager.addTopic(mq0.getTopic(), 1); - metadataManager.addTopic(mq1.getTopic(), 2); + DefaultMetadataStore defaultMetadataStore = (DefaultMetadataStore) metadataStore; + defaultMetadataStore.addTopic(mq0.getTopic(), 1); + defaultMetadataStore.addTopic(mq1.getTopic(), 2); - metadataManager.addQueue(mq0, 2); - metadataManager.addQueue(mq1, 4); - metadataManager.addQueue(mq2, 8); + defaultMetadataStore.addQueue(mq0, 2); + defaultMetadataStore.addQueue(mq1, 4); + defaultMetadataStore.addQueue(mq2, 8); - String filePath0 = TieredStoreUtil.toPath(mq0); + String filePath0 = MessageStoreUtil.toFilePath(mq0); FileSegmentMetadata segmentMetadata = - new FileSegmentMetadata(filePath0, 100, FileSegmentType.COMMIT_LOG.getType()); + new FileSegmentMetadata(filePath0, 100, FileSegmentType.COMMIT_LOG.getCode()); metadataStore.updateFileSegment(segmentMetadata); segmentMetadata = - new FileSegmentMetadata(filePath0, 200, FileSegmentType.COMMIT_LOG.getType()); + new FileSegmentMetadata(filePath0, 200, FileSegmentType.COMMIT_LOG.getCode()); metadataStore.updateFileSegment(segmentMetadata); - Assert.assertTrue(new File(metadataManager.configFilePath()).exists()); + Assert.assertTrue(new File(defaultMetadataStore.configFilePath()).exists()); // Reload from disk - metadataManager = new TieredMetadataManager(storeConfig); - metadataManager.load(); - TopicMetadata topicMetadata = metadataManager.getTopic(mq0.getTopic()); + defaultMetadataStore = new DefaultMetadataStore(storeConfig); + defaultMetadataStore.load(); + TopicMetadata topicMetadata = defaultMetadataStore.getTopic(mq0.getTopic()); Assert.assertNotNull(topicMetadata); Assert.assertEquals(topicMetadata.getReserveTime(), 1); - topicMetadata = metadataManager.getTopic(mq1.getTopic()); + topicMetadata = defaultMetadataStore.getTopic(mq1.getTopic()); Assert.assertNotNull(topicMetadata); Assert.assertEquals(topicMetadata.getReserveTime(), 2); - QueueMetadata queueMetadata = metadataManager.getQueue(mq0); + QueueMetadata queueMetadata = defaultMetadataStore.getQueue(mq0); Assert.assertNotNull(queueMetadata); Assert.assertEquals(mq0, queueMetadata.getQueue()); Assert.assertEquals(queueMetadata.getMinOffset(), 2); - queueMetadata = metadataManager.getQueue(mq1); + queueMetadata = defaultMetadataStore.getQueue(mq1); Assert.assertNotNull(queueMetadata); Assert.assertEquals(mq1, queueMetadata.getQueue()); Assert.assertEquals(queueMetadata.getMinOffset(), 4); - queueMetadata = metadataManager.getQueue(mq2); + queueMetadata = defaultMetadataStore.getQueue(mq2); Assert.assertNotNull(queueMetadata); Assert.assertEquals(mq2, queueMetadata.getQueue()); Assert.assertEquals(queueMetadata.getMinOffset(), 8); Map map = new HashMap<>(); - metadataManager.iterateFileSegment(metadata -> map.put(metadata.getBaseOffset(), metadata)); + defaultMetadataStore.iterateFileSegment(metadata -> map.put(metadata.getBaseOffset(), metadata)); FileSegmentMetadata fileSegmentMetadata = map.get(100L); Assert.assertNotNull(fileSegmentMetadata); Assert.assertEquals(filePath0, fileSegmentMetadata.getPath()); @@ -267,4 +270,15 @@ public void testReload() { Assert.assertNotNull(fileSegmentMetadata); Assert.assertEquals(filePath0, fileSegmentMetadata.getPath()); } + + @Test + public void basicTest() { + this.testTopic(); + this.testQueue(); + this.testFileSegment(); + + ((DefaultMetadataStore) metadataStore).encode(); + ((DefaultMetadataStore) metadataStore).encode(false); + ((DefaultMetadataStore) metadataStore).encode(true); + } } diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/metrics/TieredStoreMetricsManagerTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/metrics/TieredStoreMetricsManagerTest.java index 26b38b9706e..cc4d9e2c68b 100644 --- a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/metrics/TieredStoreMetricsManagerTest.java +++ b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/metrics/TieredStoreMetricsManagerTest.java @@ -17,23 +17,17 @@ package org.apache.rocketmq.tieredstore.metrics; import io.opentelemetry.sdk.OpenTelemetrySdk; -import java.io.IOException; -import org.apache.rocketmq.tieredstore.TieredMessageFetcher; -import org.apache.rocketmq.tieredstore.TieredStoreTestUtil; -import org.apache.rocketmq.tieredstore.common.TieredMessageStoreConfig; -import org.apache.rocketmq.tieredstore.common.TieredStoreExecutor; -import org.junit.After; +import org.apache.rocketmq.store.DefaultMessageStore; +import org.apache.rocketmq.tieredstore.MessageStoreConfig; +import org.apache.rocketmq.tieredstore.TieredMessageStore; +import org.apache.rocketmq.tieredstore.core.MessageStoreFetcherImpl; +import org.apache.rocketmq.tieredstore.file.FlatFileStore; +import org.apache.rocketmq.tieredstore.provider.PosixFileSegment; import org.junit.Test; +import org.mockito.Mockito; public class TieredStoreMetricsManagerTest { - @After - public void tearDown() throws IOException { - TieredStoreTestUtil.destroyCompositeFlatFileManager(); - TieredStoreTestUtil.destroyMetadataStore(); - TieredStoreExecutor.shutdown(); - } - @Test public void getMetricsView() { TieredStoreMetricsManager.getMetricsView(); @@ -41,11 +35,17 @@ public void getMetricsView() { @Test public void init() { - TieredStoreExecutor.init(); - TieredMessageStoreConfig storeConfig = new TieredMessageStoreConfig(); - storeConfig.setTieredBackendServiceProvider("org.apache.rocketmq.tieredstore.provider.memory.MemoryFileSegment"); - TieredStoreMetricsManager.init(OpenTelemetrySdk.builder().build().getMeter(""), - null, storeConfig, new TieredMessageFetcher(storeConfig), null); + MessageStoreConfig storeConfig = new MessageStoreConfig(); + storeConfig.setTieredBackendServiceProvider(PosixFileSegment.class.getName()); + TieredMessageStore messageStore = Mockito.mock(TieredMessageStore.class); + Mockito.when(messageStore.getStoreConfig()).thenReturn(storeConfig); + Mockito.when(messageStore.getFlatFileStore()).thenReturn(Mockito.mock(FlatFileStore.class)); + MessageStoreFetcherImpl fetcher = Mockito.spy(new MessageStoreFetcherImpl(messageStore)); + + TieredStoreMetricsManager.init( + OpenTelemetrySdk.builder().build().getMeter(""), + null, storeConfig, fetcher, + Mockito.mock(FlatFileStore.class), Mockito.mock(DefaultMessageStore.class)); } @Test diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/FileSegmentFactoryTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/FileSegmentFactoryTest.java new file mode 100644 index 00000000000..1efbc3f9ee3 --- /dev/null +++ b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/FileSegmentFactoryTest.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.rocketmq.tieredstore.provider; + +import org.apache.rocketmq.tieredstore.MessageStoreConfig; +import org.apache.rocketmq.tieredstore.common.FileSegmentType; +import org.apache.rocketmq.tieredstore.metadata.DefaultMetadataStore; +import org.apache.rocketmq.tieredstore.metadata.MetadataStore; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtilTest; +import org.junit.Assert; +import org.junit.Test; + +public class FileSegmentFactoryTest { + + @Test + public void fileSegmentInstanceTest() throws ClassNotFoundException, NoSuchMethodException { + int baseOffset = 1000; + String filePath = "FileSegmentFactoryPath"; + String storePath = MessageStoreUtilTest.getRandomStorePath(); + MessageStoreConfig storeConfig = new MessageStoreConfig(); + storeConfig.setTieredStoreCommitLogMaxSize(1024); + storeConfig.setTieredStoreFilePath(storePath); + + MetadataStore metadataStore = new DefaultMetadataStore(storeConfig); + FileSegmentFactory factory = new FileSegmentFactory(metadataStore, storeConfig); + + Assert.assertEquals(metadataStore, factory.getMetadataStore()); + Assert.assertEquals(storeConfig, factory.getStoreConfig()); + + FileSegment fileSegment = factory.createCommitLogFileSegment(filePath, baseOffset); + Assert.assertEquals(1000, fileSegment.getBaseOffset()); + Assert.assertEquals(FileSegmentType.COMMIT_LOG, fileSegment.getFileType()); + fileSegment.destroyFile(); + + fileSegment = factory.createConsumeQueueFileSegment(filePath, baseOffset); + Assert.assertEquals(1000, fileSegment.getBaseOffset()); + Assert.assertEquals(FileSegmentType.CONSUME_QUEUE, fileSegment.getFileType()); + fileSegment.destroyFile(); + + fileSegment = factory.createIndexServiceFileSegment(filePath, baseOffset); + Assert.assertEquals(1000, fileSegment.getBaseOffset()); + Assert.assertEquals(FileSegmentType.INDEX, fileSegment.getFileType()); + fileSegment.destroyFile(); + + Assert.assertThrows(RuntimeException.class, + () -> factory.createSegment(null, null, 0L)); + storeConfig.setTieredBackendServiceProvider(null); + Assert.assertThrows(RuntimeException.class, + () -> new FileSegmentFactory(metadataStore, storeConfig)); + MessageStoreUtilTest.deleteStoreDirectory(storePath); + } +} diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/FileSegmentTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/FileSegmentTest.java new file mode 100644 index 00000000000..2bba3d01370 --- /dev/null +++ b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/FileSegmentTest.java @@ -0,0 +1,469 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.rocketmq.tieredstore.provider; + +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; +import java.util.concurrent.TimeUnit; +import org.apache.rocketmq.common.message.MessageQueue; +import org.apache.rocketmq.tieredstore.MessageStoreConfig; +import org.apache.rocketmq.tieredstore.MessageStoreExecutor; +import org.apache.rocketmq.tieredstore.common.AppendResult; +import org.apache.rocketmq.tieredstore.common.FileSegmentType; +import org.apache.rocketmq.tieredstore.exception.TieredStoreErrorCode; +import org.apache.rocketmq.tieredstore.exception.TieredStoreException; +import org.apache.rocketmq.tieredstore.metadata.DefaultMetadataStore; +import org.apache.rocketmq.tieredstore.metadata.MetadataStore; +import org.apache.rocketmq.tieredstore.util.MessageFormatUtil; +import org.apache.rocketmq.tieredstore.util.MessageFormatUtilTest; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtil; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtilTest; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyLong; + +public class FileSegmentTest { + + public int baseOffset = 1000; + private final String storePath = MessageStoreUtilTest.getRandomStorePath(); + private MessageStoreConfig storeConfig; + private MessageQueue mq; + private MessageStoreExecutor storeExecutor; + + @Before + public void init() { + storeConfig = new MessageStoreConfig(); + storeConfig.setTieredStoreCommitLogMaxSize(2000); + storeConfig.setTieredStoreFilePath(storePath); + storeConfig.setTieredBackendServiceProvider(PosixFileSegment.class.getName()); + mq = new MessageQueue("FileSegmentTest", "brokerName", 0); + storeExecutor = new MessageStoreExecutor(); + } + + @After + public void shutdown() { + MessageStoreUtilTest.deleteStoreDirectory(storePath); + storeExecutor.shutdown(); + } + + @Test + public void fileAttributesTest() { + int baseOffset = 1000; + FileSegment fileSegment = new PosixFileSegment( + storeConfig, FileSegmentType.COMMIT_LOG, MessageStoreUtil.toFilePath(mq), baseOffset); + + // for default value check + Assert.assertEquals(baseOffset, fileSegment.getBaseOffset()); + Assert.assertEquals(0L, fileSegment.getCommitPosition()); + Assert.assertEquals(0L, fileSegment.getAppendPosition()); + Assert.assertEquals(baseOffset, fileSegment.getCommitOffset()); + Assert.assertEquals(baseOffset, fileSegment.getAppendOffset()); + Assert.assertEquals(FileSegmentType.COMMIT_LOG, fileSegment.getFileType()); + Assert.assertEquals(Long.MAX_VALUE, fileSegment.getMinTimestamp()); + Assert.assertEquals(Long.MAX_VALUE, fileSegment.getMaxTimestamp()); + + // for recover + long timestamp = System.currentTimeMillis(); + fileSegment.setMinTimestamp(timestamp); + fileSegment.setMaxTimestamp(timestamp); + Assert.assertEquals(timestamp, fileSegment.getMinTimestamp()); + Assert.assertEquals(timestamp, fileSegment.getMaxTimestamp()); + + // for file status change + Assert.assertFalse(fileSegment.isClosed()); + fileSegment.close(); + Assert.assertTrue(fileSegment.isClosed()); + + fileSegment.destroyFile(); + } + + @Test + public void fileSortByOffsetTest() { + FileSegment fileSegment1 = new PosixFileSegment( + storeConfig, FileSegmentType.COMMIT_LOG, MessageStoreUtil.toFilePath(mq), 200L); + FileSegment fileSegment2 = new PosixFileSegment( + storeConfig, FileSegmentType.COMMIT_LOG, MessageStoreUtil.toFilePath(mq), 100L); + FileSegment[] fileSegments = new FileSegment[] {fileSegment1, fileSegment2}; + Arrays.sort(fileSegments); + Assert.assertEquals(fileSegments[0], fileSegment2); + Assert.assertEquals(fileSegments[1], fileSegment1); + } + + @Test + public void fileMaxSizeTest() { + FileSegment fileSegment = new PosixFileSegment( + storeConfig, FileSegmentType.COMMIT_LOG, MessageStoreUtil.toFilePath(mq), 100L); + Assert.assertEquals(storeConfig.getTieredStoreCommitLogMaxSize(), fileSegment.getMaxSize()); + fileSegment.destroyFile(); + + fileSegment = new PosixFileSegment( + storeConfig, FileSegmentType.CONSUME_QUEUE, MessageStoreUtil.toFilePath(mq), 100L); + Assert.assertEquals(storeConfig.getTieredStoreConsumeQueueMaxSize(), fileSegment.getMaxSize()); + fileSegment.destroyFile(); + + fileSegment = new PosixFileSegment( + storeConfig, FileSegmentType.INDEX, MessageStoreUtil.toFilePath(mq), 100L); + Assert.assertEquals(Long.MAX_VALUE, fileSegment.getMaxSize()); + fileSegment.destroyFile(); + } + + @Test + public void unexpectedCaseTest() { + MetadataStore metadataStore = new DefaultMetadataStore(storeConfig); + FileSegmentFactory factory = new FileSegmentFactory(metadataStore, storeConfig); + FileSegment fileSegment = factory.createCommitLogFileSegment(MessageStoreUtil.toFilePath(mq), baseOffset); + + fileSegment.initPosition(fileSegment.getSize()); + Assert.assertFalse(fileSegment.needCommit()); + Assert.assertTrue(fileSegment.commitAsync().join()); + + fileSegment.append(ByteBuffer.allocate(0), 0L); + Assert.assertTrue(fileSegment.commitAsync().join()); + + ByteBuffer byteBuffer = ByteBuffer.allocate(8); + byteBuffer.putLong(0L); + byteBuffer.flip(); + fileSegment.append(byteBuffer, 0L); + + byteBuffer.getLong(); + Assert.assertTrue(fileSegment.commitAsync().join()); + fileSegment.destroyFile(); + } + + @Test + public void commitLogTest() throws InterruptedException { + MetadataStore metadataStore = new DefaultMetadataStore(storeConfig); + FileSegmentFactory factory = new FileSegmentFactory(metadataStore, storeConfig); + FileSegment fileSegment = factory.createCommitLogFileSegment(MessageStoreUtil.toFilePath(mq), baseOffset); + long lastSize = fileSegment.getSize(); + fileSegment.initPosition(fileSegment.getSize()); + Assert.assertFalse(fileSegment.needCommit()); + Assert.assertTrue(fileSegment.commitAsync().join()); + + fileSegment.append(MessageFormatUtilTest.buildMockedMessageBuffer(), 0L); + fileSegment.append(MessageFormatUtilTest.buildMockedMessageBuffer(), 0L); + Assert.assertTrue(fileSegment.needCommit()); + + fileSegment.commitLock.acquire(); + Assert.assertFalse(fileSegment.commitAsync().join()); + fileSegment.commitLock.release(); + + long storeTimestamp = System.currentTimeMillis(); + ByteBuffer buffer = MessageFormatUtilTest.buildMockedMessageBuffer(); + buffer.putLong(MessageFormatUtil.STORE_TIMESTAMP_POSITION, storeTimestamp); + buffer.putLong(MessageFormatUtil.QUEUE_OFFSET_POSITION, 100L); + fileSegment.append(buffer, storeTimestamp); + + Assert.assertTrue(fileSegment.needCommit()); + Assert.assertEquals(baseOffset, fileSegment.getBaseOffset()); + Assert.assertEquals(baseOffset + lastSize + MessageFormatUtilTest.MSG_LEN * 3, fileSegment.getAppendOffset()); + Assert.assertEquals(0L, fileSegment.getMinTimestamp()); + Assert.assertEquals(storeTimestamp, fileSegment.getMaxTimestamp()); + + List buffers = fileSegment.borrowBuffer(); + Assert.assertEquals(3, buffers.size()); + fileSegment.bufferList.addAll(buffers); + + fileSegment.commitAsync().join(); + Assert.assertFalse(fileSegment.needCommit()); + Assert.assertEquals(fileSegment.getCommitOffset(), fileSegment.getAppendOffset()); + + // offset will change when type is commitLog + ByteBuffer msg1 = fileSegment.read(lastSize, MessageFormatUtilTest.MSG_LEN); + Assert.assertEquals(baseOffset + lastSize, MessageFormatUtil.getCommitLogOffset(msg1)); + + ByteBuffer msg2 = fileSegment.read(lastSize + MessageFormatUtilTest.MSG_LEN, MessageFormatUtilTest.MSG_LEN); + Assert.assertEquals(baseOffset + lastSize + MessageFormatUtilTest.MSG_LEN, MessageFormatUtil.getCommitLogOffset(msg2)); + + ByteBuffer msg3 = fileSegment.read(lastSize + MessageFormatUtilTest.MSG_LEN * 2, MessageFormatUtilTest.MSG_LEN); + Assert.assertEquals(baseOffset + lastSize + MessageFormatUtilTest.MSG_LEN * 2, MessageFormatUtil.getCommitLogOffset(msg3)); + + // buffer full + fileSegment.bufferList.addAll(buffers); + storeConfig.setTieredStoreMaxGroupCommitCount(3); + Assert.assertEquals(AppendResult.BUFFER_FULL, + fileSegment.append(MessageFormatUtilTest.buildMockedMessageBuffer(), 0L)); + + // file full + fileSegment.initPosition(storeConfig.getTieredStoreCommitLogMaxSize() - MessageFormatUtilTest.MSG_LEN + 1); + Assert.assertEquals(AppendResult.FILE_FULL, + fileSegment.append(MessageFormatUtilTest.buildMockedMessageBuffer(), 0L)); + + // file close + fileSegment.close(); + Assert.assertEquals(AppendResult.FILE_CLOSED, + fileSegment.append(MessageFormatUtilTest.buildMockedMessageBuffer(), 0L)); + Assert.assertFalse(fileSegment.commitAsync().join()); + + fileSegment.destroyFile(); + } + + @Test + public void consumeQueueTest() throws ClassNotFoundException, NoSuchMethodException { + MetadataStore metadataStore = new DefaultMetadataStore(storeConfig); + FileSegmentFactory factory = new FileSegmentFactory(metadataStore, storeConfig); + FileSegment fileSegment = factory.createConsumeQueueFileSegment(MessageStoreUtil.toFilePath(mq), baseOffset); + + long storeTimestamp = System.currentTimeMillis(); + int messageSize = MessageFormatUtilTest.MSG_LEN; + int unitSize = MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE; + long initPosition = 5 * unitSize; + + fileSegment.initPosition(initPosition); + fileSegment.append(MessageFormatUtilTest.buildMockedConsumeQueueBuffer().putLong(0, baseOffset), 0); + fileSegment.append(MessageFormatUtilTest.buildMockedConsumeQueueBuffer().putLong(0, baseOffset + messageSize), 0); + fileSegment.append(MessageFormatUtilTest.buildMockedConsumeQueueBuffer().putLong(0, baseOffset + messageSize * 2), storeTimestamp); + + Assert.assertEquals(initPosition + unitSize * 3, fileSegment.getAppendPosition()); + Assert.assertEquals(0, fileSegment.getMinTimestamp()); + Assert.assertEquals(storeTimestamp, fileSegment.getMaxTimestamp()); + + fileSegment.commitAsync().join(); + Assert.assertEquals(fileSegment.getAppendOffset(), fileSegment.getCommitOffset()); + + ByteBuffer cqItem1 = fileSegment.read(initPosition, unitSize); + Assert.assertEquals(baseOffset, cqItem1.getLong()); + + ByteBuffer cqItem2 = fileSegment.read(initPosition + unitSize, unitSize); + Assert.assertEquals(baseOffset + messageSize, cqItem2.getLong()); + + ByteBuffer cqItem3 = fileSegment.read(initPosition + unitSize * 2, unitSize); + Assert.assertEquals(baseOffset + messageSize * 2, cqItem3.getLong()); + } + + @Test + public void fileSegmentReadTest() throws ClassNotFoundException, NoSuchMethodException { + MetadataStore metadataStore = new DefaultMetadataStore(storeConfig); + FileSegmentFactory factory = new FileSegmentFactory(metadataStore, storeConfig); + FileSegment fileSegment = factory.createConsumeQueueFileSegment(MessageStoreUtil.toFilePath(mq), baseOffset); + + long storeTimestamp = System.currentTimeMillis(); + int messageSize = MessageFormatUtilTest.MSG_LEN; + int unitSize = MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE; + long initPosition = 5 * unitSize; + + fileSegment.initPosition(initPosition); + fileSegment.append(MessageFormatUtilTest.buildMockedConsumeQueueBuffer().putLong(0, baseOffset), 0); + fileSegment.append(MessageFormatUtilTest.buildMockedConsumeQueueBuffer().putLong(0, baseOffset + messageSize), 0); + fileSegment.append(MessageFormatUtilTest.buildMockedConsumeQueueBuffer().putLong(0, baseOffset + messageSize * 2), storeTimestamp); + fileSegment.commitAsync().join(); + + CompletionException exception = Assert.assertThrows( + CompletionException.class, () -> fileSegment.read(-1, -1)); + Assert.assertTrue(exception.getCause() instanceof TieredStoreException); + Assert.assertEquals(TieredStoreErrorCode.ILLEGAL_PARAM, ((TieredStoreException) exception.getCause()).getErrorCode()); + + exception = Assert.assertThrows( + CompletionException.class, () -> fileSegment.read(100, 0)); + Assert.assertTrue(exception.getCause() instanceof TieredStoreException); + Assert.assertEquals(TieredStoreErrorCode.ILLEGAL_PARAM, ((TieredStoreException) exception.getCause()).getErrorCode()); + + // at most three messages + Assert.assertEquals(unitSize * 3, + fileSegment.read(100, messageSize * 3).remaining()); + Assert.assertEquals(unitSize * 3, + fileSegment.read(100, messageSize * 5).remaining()); + } + + @Test + public void commitFailedThenSuccessTest() { + MemoryFileSegment segment = new MemoryFileSegment( + storeConfig, FileSegmentType.COMMIT_LOG, MessageStoreUtil.toFilePath(mq), baseOffset); + + long lastSize = segment.getSize(); + segment.setCheckSize(false); + segment.initPosition(lastSize); + segment.setSize((int) lastSize); + + int messageSize = MessageFormatUtilTest.MSG_LEN; + ByteBuffer buffer1 = MessageFormatUtilTest.buildMockedMessageBuffer().putLong( + MessageFormatUtil.PHYSICAL_OFFSET_POSITION, baseOffset + lastSize); + ByteBuffer buffer2 = MessageFormatUtilTest.buildMockedMessageBuffer().putLong( + MessageFormatUtil.PHYSICAL_OFFSET_POSITION, baseOffset + lastSize + messageSize); + Assert.assertEquals(AppendResult.SUCCESS, segment.append(buffer1, 0)); + Assert.assertEquals(AppendResult.SUCCESS, segment.append(buffer2, 0)); + + // Mock new message arrive + long timestamp = System.currentTimeMillis(); + segment.blocker = new CompletableFuture<>(); + new Thread(() -> { + try { + TimeUnit.SECONDS.sleep(1); + } catch (InterruptedException e) { + Assert.fail(e.getMessage()); + } + ByteBuffer buffer = MessageFormatUtilTest.buildMockedMessageBuffer(); + buffer.putLong(MessageFormatUtil.PHYSICAL_OFFSET_POSITION, messageSize * 2); + buffer.putLong(MessageFormatUtil.STORE_TIMESTAMP_POSITION, timestamp); + segment.append(buffer, 0); + segment.blocker.complete(false); + }).start(); + + // Commit failed + segment.commitAsync().join(); + segment.blocker.join(); + segment.blocker = null; + + // Copy data and assume commit success + segment.getMemStore().put(buffer1); + segment.getMemStore().put(buffer2); + segment.setSize((int) (lastSize + messageSize * 2)); + + segment.commitAsync().join(); + Assert.assertEquals(lastSize + messageSize * 3, segment.getCommitPosition()); + Assert.assertEquals(baseOffset + lastSize + messageSize * 3, segment.getCommitOffset()); + Assert.assertEquals(baseOffset + lastSize + messageSize * 3, segment.getAppendOffset()); + + ByteBuffer msg1 = segment.read(lastSize, messageSize); + Assert.assertEquals(baseOffset + lastSize, MessageFormatUtil.getCommitLogOffset(msg1)); + + ByteBuffer msg2 = segment.read(lastSize + messageSize, messageSize); + Assert.assertEquals(baseOffset + lastSize + messageSize, MessageFormatUtil.getCommitLogOffset(msg2)); + + ByteBuffer msg3 = segment.read(lastSize + messageSize * 2, messageSize); + Assert.assertEquals(baseOffset + lastSize + messageSize * 2, MessageFormatUtil.getCommitLogOffset(msg3)); + } + + @Test + public void commitFailedMoreTimes() { + long startTime = System.currentTimeMillis(); + MemoryFileSegment segment = new MemoryFileSegment( + storeConfig, FileSegmentType.COMMIT_LOG, MessageStoreUtil.toFilePath(mq), baseOffset); + + long lastSize = segment.getSize(); + segment.setCheckSize(false); + segment.initPosition(lastSize); + segment.setSize((int) lastSize); + + ByteBuffer buffer1 = MessageFormatUtilTest.buildMockedMessageBuffer().putLong( + MessageFormatUtil.PHYSICAL_OFFSET_POSITION, baseOffset + lastSize); + ByteBuffer buffer2 = MessageFormatUtilTest.buildMockedMessageBuffer().putLong( + MessageFormatUtil.PHYSICAL_OFFSET_POSITION, baseOffset + lastSize + MessageFormatUtilTest.MSG_LEN); + segment.append(buffer1, 0); + segment.append(buffer2, 0); + + // Mock new message arrive + segment.blocker = new CompletableFuture<>(); + new Thread(() -> { + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + Assert.fail(e.getMessage()); + } + ByteBuffer buffer = MessageFormatUtilTest.buildMockedMessageBuffer(); + buffer.putLong(MessageFormatUtil.PHYSICAL_OFFSET_POSITION, MessageFormatUtilTest.MSG_LEN * 2); + buffer.putLong(MessageFormatUtil.STORE_TIMESTAMP_POSITION, startTime); + segment.append(buffer, 0); + segment.blocker.complete(false); + }).start(); + + for (int i = 0; i < 3; i++) { + Assert.assertFalse(segment.commitAsync().join()); + } + + FileSegment fileSpySegment = Mockito.spy(segment); + Mockito.when(fileSpySegment.getSize()).thenReturn(-1L); + Assert.assertFalse(fileSpySegment.commitAsync().join()); + + Assert.assertEquals(lastSize, segment.getCommitPosition()); + Assert.assertEquals(baseOffset + lastSize, segment.getCommitOffset()); + Assert.assertEquals(baseOffset + lastSize + MessageFormatUtilTest.MSG_LEN * 3, segment.getAppendOffset()); + + segment.blocker.join(); + segment.blocker = null; + + segment.commitAsync().join(); + Assert.assertEquals(lastSize + MessageFormatUtilTest.MSG_LEN * 2, segment.getCommitPosition()); + Assert.assertEquals(baseOffset + lastSize + MessageFormatUtilTest.MSG_LEN * 2, segment.getCommitOffset()); + Assert.assertEquals(baseOffset + lastSize + MessageFormatUtilTest.MSG_LEN * 3, segment.getAppendOffset()); + + segment.commitAsync().join(); + Assert.assertEquals(lastSize + MessageFormatUtilTest.MSG_LEN * 3, segment.getCommitPosition()); + Assert.assertEquals(baseOffset + lastSize + MessageFormatUtilTest.MSG_LEN * 3, segment.getCommitOffset()); + Assert.assertEquals(baseOffset + lastSize + MessageFormatUtilTest.MSG_LEN * 3, segment.getAppendOffset()); + + ByteBuffer msg1 = segment.read(lastSize, MessageFormatUtilTest.MSG_LEN); + Assert.assertEquals(baseOffset + lastSize, MessageFormatUtil.getCommitLogOffset(msg1)); + + ByteBuffer msg2 = segment.read(lastSize + MessageFormatUtilTest.MSG_LEN, MessageFormatUtilTest.MSG_LEN); + Assert.assertEquals(baseOffset + lastSize + MessageFormatUtilTest.MSG_LEN, MessageFormatUtil.getCommitLogOffset(msg2)); + + ByteBuffer msg3 = segment.read(lastSize + MessageFormatUtilTest.MSG_LEN * 2, MessageFormatUtilTest.MSG_LEN); + Assert.assertEquals(baseOffset + lastSize + MessageFormatUtilTest.MSG_LEN * 2, MessageFormatUtil.getCommitLogOffset(msg3)); + } + + @Test + public void handleCommitExceptionTest() { + MetadataStore metadataStore = new DefaultMetadataStore(storeConfig); + FileSegmentFactory factory = new FileSegmentFactory(metadataStore, storeConfig); + + { + FileSegment fileSegment = factory.createCommitLogFileSegment(MessageStoreUtil.toFilePath(mq), baseOffset); + FileSegment fileSpySegment = Mockito.spy(fileSegment); + fileSpySegment.append(MessageFormatUtilTest.buildMockedMessageBuffer(), 0L); + fileSpySegment.append(MessageFormatUtilTest.buildMockedMessageBuffer(), 0L); + + Mockito.when(fileSpySegment.commit0(any(), anyLong(), anyInt(), anyBoolean())) + .thenReturn(CompletableFuture.supplyAsync(() -> { + throw new TieredStoreException(TieredStoreErrorCode.IO_ERROR, "Test"); + })); + Assert.assertFalse(fileSpySegment.commitAsync().join()); + fileSegment.destroyFile(); + } + + { + FileSegment fileSegment = factory.createCommitLogFileSegment(MessageStoreUtil.toFilePath(mq), baseOffset); + FileSegment fileSpySegment = Mockito.spy(fileSegment); + fileSpySegment.append(MessageFormatUtilTest.buildMockedMessageBuffer(), 0L); + fileSpySegment.append(MessageFormatUtilTest.buildMockedMessageBuffer(), 0L); + + Mockito.when(fileSpySegment.commit0(any(), anyLong(), anyInt(), anyBoolean())) + .thenReturn(CompletableFuture.supplyAsync(() -> { + long size = MessageFormatUtilTest.buildMockedMessageBuffer().remaining(); + TieredStoreException exception = new TieredStoreException(TieredStoreErrorCode.IO_ERROR, "Test"); + exception.setPosition(size * 2L); + throw exception; + })); + Assert.assertTrue(fileSpySegment.commitAsync().join()); + fileSegment.destroyFile(); + } + + { + FileSegment fileSegment = factory.createCommitLogFileSegment(MessageStoreUtil.toFilePath(mq), baseOffset); + FileSegment fileSpySegment = Mockito.spy(fileSegment); + fileSpySegment.append(MessageFormatUtilTest.buildMockedMessageBuffer(), 0L); + fileSpySegment.append(MessageFormatUtilTest.buildMockedMessageBuffer(), 0L); + + Mockito.when(fileSpySegment.commit0(any(), anyLong(), anyInt(), anyBoolean())) + .thenReturn(CompletableFuture.supplyAsync(() -> { + throw new RuntimeException("Runtime Error for Test"); + })); + Mockito.when(fileSpySegment.getSize()).thenReturn(0L); + Assert.assertFalse(fileSpySegment.commitAsync().join()); + } + } +} diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/MemoryFileSegmentTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/MemoryFileSegmentTest.java new file mode 100644 index 00000000000..cc9793dc886 --- /dev/null +++ b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/MemoryFileSegmentTest.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.rocketmq.tieredstore.provider; + +import java.io.IOException; +import org.apache.rocketmq.common.message.MessageQueue; +import org.apache.rocketmq.tieredstore.MessageStoreConfig; +import org.apache.rocketmq.tieredstore.common.FileSegmentType; +import org.apache.rocketmq.tieredstore.stream.FileSegmentInputStream; +import org.apache.rocketmq.tieredstore.util.MessageStoreUtil; +import org.junit.Assert; +import org.junit.Test; +import org.mockito.Mockito; + +import static org.mockito.ArgumentMatchers.any; + +public class MemoryFileSegmentTest { + + @Test + public void memoryTest() throws IOException { + MemoryFileSegment fileSegment = new MemoryFileSegment( + new MessageStoreConfig(), FileSegmentType.COMMIT_LOG, + MessageStoreUtil.toFilePath(new MessageQueue()), 0L); + Assert.assertFalse(fileSegment.exists()); + fileSegment.createFile(); + MemoryFileSegment fileSpySegment = Mockito.spy(fileSegment); + FileSegmentInputStream inputStream = Mockito.mock(FileSegmentInputStream.class); + Mockito.when(inputStream.read(any())).thenThrow(new RuntimeException()); + Assert.assertFalse(fileSpySegment.commit0(inputStream, 0L, 0, false).join()); + fileSegment.destroyFile(); + } +} \ No newline at end of file diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/MockFileSegmentInputStream.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/MockFileSegmentInputStream.java deleted file mode 100644 index 3bbe41dd4b6..00000000000 --- a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/MockFileSegmentInputStream.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.rocketmq.tieredstore.provider; - -import java.io.InputStream; -import java.nio.ByteBuffer; -import java.util.List; -import org.apache.rocketmq.tieredstore.provider.stream.FileSegmentInputStream; - -public class MockFileSegmentInputStream extends FileSegmentInputStream { - - private final InputStream inputStream; - - public MockFileSegmentInputStream(InputStream inputStream) { - super(null, null, Integer.MAX_VALUE); - this.inputStream = inputStream; - } - - @Override - public int read() { - int res = -1; - try { - res = inputStream.read(); - } catch (Exception e) { - return -1; - } - return res; - } - - @Override - public List getBufferList() { - return null; - } - - @Override - public ByteBuffer getCodaBuffer() { - return null; - } -} diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/PosixFileSegmentTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/PosixFileSegmentTest.java new file mode 100644 index 00000000000..e74e46a5431 --- /dev/null +++ b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/PosixFileSegmentTest.java @@ -0,0 +1,21 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.rocketmq.tieredstore.provider; + +public class PosixFileSegmentTest { + +} diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/TieredFileSegmentTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/TieredFileSegmentTest.java deleted file mode 100644 index a655710a500..00000000000 --- a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/TieredFileSegmentTest.java +++ /dev/null @@ -1,235 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.rocketmq.tieredstore.provider; - -import java.nio.ByteBuffer; -import java.util.concurrent.CompletableFuture; -import org.apache.rocketmq.common.message.MessageQueue; -import org.apache.rocketmq.tieredstore.common.FileSegmentType; -import org.apache.rocketmq.tieredstore.common.TieredMessageStoreConfig; -import org.apache.rocketmq.tieredstore.file.TieredCommitLog; -import org.apache.rocketmq.tieredstore.file.TieredConsumeQueue; -import org.apache.rocketmq.tieredstore.provider.memory.MemoryFileSegment; -import org.apache.rocketmq.tieredstore.util.MessageBufferUtil; -import org.apache.rocketmq.tieredstore.util.MessageBufferUtilTest; -import org.junit.Assert; -import org.junit.Test; - -public class TieredFileSegmentTest { - - public int baseOffset = 1000; - - public TieredFileSegment createFileSegment(FileSegmentType fileType) { - String brokerName = new TieredMessageStoreConfig().getBrokerName(); - return new MemoryFileSegment(fileType, new MessageQueue("TieredFileSegmentTest", brokerName, 0), - baseOffset, new TieredMessageStoreConfig()); - } - - @Test - public void testCommitLog() { - TieredFileSegment segment = createFileSegment(FileSegmentType.COMMIT_LOG); - segment.initPosition(segment.getSize()); - long lastSize = segment.getSize(); - segment.append(MessageBufferUtilTest.buildMockedMessageBuffer(), 0); - segment.append(MessageBufferUtilTest.buildMockedMessageBuffer(), 0); - Assert.assertTrue(segment.needCommit()); - - ByteBuffer buffer = MessageBufferUtilTest.buildMockedMessageBuffer(); - long msg3StoreTime = System.currentTimeMillis(); - buffer.putLong(MessageBufferUtil.STORE_TIMESTAMP_POSITION, msg3StoreTime); - long queueOffset = baseOffset * 1000L; - buffer.putLong(MessageBufferUtil.QUEUE_OFFSET_POSITION, queueOffset); - segment.append(buffer, msg3StoreTime); - - Assert.assertEquals(baseOffset, segment.getBaseOffset()); - Assert.assertEquals(baseOffset + lastSize + MessageBufferUtilTest.MSG_LEN * 3, segment.getMaxOffset()); - Assert.assertEquals(0, segment.getMinTimestamp()); - Assert.assertEquals(msg3StoreTime, segment.getMaxTimestamp()); - - segment.setFull(); - segment.commit(); - Assert.assertFalse(segment.needCommit()); - Assert.assertEquals(segment.getMaxOffset(), segment.getCommitOffset()); - Assert.assertEquals(queueOffset, segment.getDispatchCommitOffset()); - - ByteBuffer msg1 = segment.read(lastSize, MessageBufferUtilTest.MSG_LEN); - Assert.assertEquals(baseOffset + lastSize, MessageBufferUtil.getCommitLogOffset(msg1)); - - ByteBuffer msg2 = segment.read(lastSize + MessageBufferUtilTest.MSG_LEN, MessageBufferUtilTest.MSG_LEN); - Assert.assertEquals(baseOffset + lastSize + MessageBufferUtilTest.MSG_LEN, MessageBufferUtil.getCommitLogOffset(msg2)); - - ByteBuffer msg3 = segment.read(lastSize + MessageBufferUtilTest.MSG_LEN * 2, MessageBufferUtilTest.MSG_LEN); - Assert.assertEquals(baseOffset + lastSize + MessageBufferUtilTest.MSG_LEN * 2, MessageBufferUtil.getCommitLogOffset(msg3)); - - ByteBuffer coda = segment.read(lastSize + MessageBufferUtilTest.MSG_LEN * 3, TieredCommitLog.CODA_SIZE); - Assert.assertEquals(msg3StoreTime, coda.getLong(4 + 4)); - } - - private ByteBuffer buildConsumeQueue(long commitLogOffset) { - ByteBuffer cqItem = ByteBuffer.allocate(TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE); - cqItem.putLong(commitLogOffset); - cqItem.putInt(2); - cqItem.putLong(3); - cqItem.flip(); - return cqItem; - } - - @Test - public void testConsumeQueue() { - TieredFileSegment segment = createFileSegment(FileSegmentType.CONSUME_QUEUE); - segment.initPosition(segment.getSize()); - long lastSize = segment.getSize(); - segment.append(buildConsumeQueue(baseOffset), 0); - segment.append(buildConsumeQueue(baseOffset + MessageBufferUtilTest.MSG_LEN), 0); - long cqItem3Timestamp = System.currentTimeMillis(); - segment.append(buildConsumeQueue(baseOffset + MessageBufferUtilTest.MSG_LEN * 2), cqItem3Timestamp); - - Assert.assertEquals(baseOffset + lastSize + TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE * 3, segment.getMaxOffset()); - Assert.assertEquals(0, segment.getMinTimestamp()); - Assert.assertEquals(cqItem3Timestamp, segment.getMaxTimestamp()); - - segment.commit(); - Assert.assertEquals(segment.getMaxOffset(), segment.getCommitOffset()); - - ByteBuffer cqItem1 = segment.read(lastSize, TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE); - Assert.assertEquals(baseOffset, cqItem1.getLong()); - - ByteBuffer cqItem2 = segment.read(lastSize + TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE, TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE); - Assert.assertEquals(baseOffset + MessageBufferUtilTest.MSG_LEN, cqItem2.getLong()); - - ByteBuffer cqItem3 = segment.read(lastSize + TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE * 2, TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE); - Assert.assertEquals(baseOffset + MessageBufferUtilTest.MSG_LEN * 2, cqItem3.getLong()); - } - - @Test - public void testCommitFailedThenSuccess() { - long startTime = System.currentTimeMillis(); - MemoryFileSegment segment = (MemoryFileSegment) createFileSegment(FileSegmentType.COMMIT_LOG); - long lastSize = segment.getSize(); - segment.setCheckSize(false); - segment.initPosition(lastSize); - segment.setSize((int) lastSize); - - ByteBuffer buffer1 = MessageBufferUtilTest.buildMockedMessageBuffer().putLong( - MessageBufferUtil.PHYSICAL_OFFSET_POSITION, baseOffset + lastSize); - ByteBuffer buffer2 = MessageBufferUtilTest.buildMockedMessageBuffer().putLong( - MessageBufferUtil.PHYSICAL_OFFSET_POSITION, baseOffset + lastSize + MessageBufferUtilTest.MSG_LEN); - segment.append(buffer1, 0); - segment.append(buffer2, 0); - - // Mock new message arrive - segment.blocker = new CompletableFuture<>(); - new Thread(() -> { - try { - Thread.sleep(1000); - } catch (InterruptedException e) { - Assert.fail(e.getMessage()); - } - ByteBuffer buffer = MessageBufferUtilTest.buildMockedMessageBuffer(); - buffer.putLong(MessageBufferUtil.PHYSICAL_OFFSET_POSITION, MessageBufferUtilTest.MSG_LEN * 2); - buffer.putLong(MessageBufferUtil.STORE_TIMESTAMP_POSITION, startTime); - segment.append(buffer, 0); - segment.blocker.complete(false); - }).start(); - - // Commit failed - segment.commit(); - segment.blocker.join(); - segment.blocker = null; - - // Copy data and assume commit success - segment.getMemStore().put(buffer1); - segment.getMemStore().put(buffer2); - segment.setSize((int) (lastSize + MessageBufferUtilTest.MSG_LEN * 2)); - - segment.commit(); - Assert.assertEquals(lastSize + MessageBufferUtilTest.MSG_LEN * 3, segment.getCommitPosition()); - Assert.assertEquals(baseOffset + lastSize + MessageBufferUtilTest.MSG_LEN * 3, segment.getCommitOffset()); - Assert.assertEquals(baseOffset + lastSize + MessageBufferUtilTest.MSG_LEN * 3, segment.getMaxOffset()); - - ByteBuffer msg1 = segment.read(lastSize, MessageBufferUtilTest.MSG_LEN); - Assert.assertEquals(baseOffset + lastSize, MessageBufferUtil.getCommitLogOffset(msg1)); - - ByteBuffer msg2 = segment.read(lastSize + MessageBufferUtilTest.MSG_LEN, MessageBufferUtilTest.MSG_LEN); - Assert.assertEquals(baseOffset + lastSize + MessageBufferUtilTest.MSG_LEN, MessageBufferUtil.getCommitLogOffset(msg2)); - - ByteBuffer msg3 = segment.read(lastSize + MessageBufferUtilTest.MSG_LEN * 2, MessageBufferUtilTest.MSG_LEN); - Assert.assertEquals(baseOffset + lastSize + MessageBufferUtilTest.MSG_LEN * 2, MessageBufferUtil.getCommitLogOffset(msg3)); - } - - @Test - public void testCommitFailed3Times() { - long startTime = System.currentTimeMillis(); - MemoryFileSegment segment = (MemoryFileSegment) createFileSegment(FileSegmentType.COMMIT_LOG); - long lastSize = segment.getSize(); - segment.setCheckSize(false); - segment.initPosition(lastSize); - segment.setSize((int) lastSize); - - ByteBuffer buffer1 = MessageBufferUtilTest.buildMockedMessageBuffer().putLong( - MessageBufferUtil.PHYSICAL_OFFSET_POSITION, baseOffset + lastSize); - ByteBuffer buffer2 = MessageBufferUtilTest.buildMockedMessageBuffer().putLong( - MessageBufferUtil.PHYSICAL_OFFSET_POSITION, baseOffset + lastSize + MessageBufferUtilTest.MSG_LEN); - segment.append(buffer1, 0); - segment.append(buffer2, 0); - - // Mock new message arrive - segment.blocker = new CompletableFuture<>(); - new Thread(() -> { - try { - Thread.sleep(3000); - } catch (InterruptedException e) { - Assert.fail(e.getMessage()); - } - ByteBuffer buffer = MessageBufferUtilTest.buildMockedMessageBuffer(); - buffer.putLong(MessageBufferUtil.PHYSICAL_OFFSET_POSITION, MessageBufferUtilTest.MSG_LEN * 2); - buffer.putLong(MessageBufferUtil.STORE_TIMESTAMP_POSITION, startTime); - segment.append(buffer, 0); - segment.blocker.complete(false); - }).start(); - - for (int i = 0; i < 3; i++) { - segment.commit(); - } - - Assert.assertEquals(lastSize, segment.getCommitPosition()); - Assert.assertEquals(baseOffset + lastSize, segment.getCommitOffset()); - Assert.assertEquals(baseOffset + lastSize + MessageBufferUtilTest.MSG_LEN * 3, segment.getMaxOffset()); - - segment.blocker.join(); - segment.blocker = null; - - segment.commit(); - Assert.assertEquals(lastSize + MessageBufferUtilTest.MSG_LEN * 2, segment.getCommitPosition()); - Assert.assertEquals(baseOffset + lastSize + MessageBufferUtilTest.MSG_LEN * 2, segment.getCommitOffset()); - Assert.assertEquals(baseOffset + lastSize + MessageBufferUtilTest.MSG_LEN * 3, segment.getMaxOffset()); - - segment.commit(); - Assert.assertEquals(lastSize + MessageBufferUtilTest.MSG_LEN * 3, segment.getCommitPosition()); - Assert.assertEquals(baseOffset + lastSize + MessageBufferUtilTest.MSG_LEN * 3, segment.getCommitOffset()); - Assert.assertEquals(baseOffset + lastSize + MessageBufferUtilTest.MSG_LEN * 3, segment.getMaxOffset()); - - ByteBuffer msg1 = segment.read(lastSize, MessageBufferUtilTest.MSG_LEN); - Assert.assertEquals(baseOffset + lastSize, MessageBufferUtil.getCommitLogOffset(msg1)); - - ByteBuffer msg2 = segment.read(lastSize + MessageBufferUtilTest.MSG_LEN, MessageBufferUtilTest.MSG_LEN); - Assert.assertEquals(baseOffset + lastSize + MessageBufferUtilTest.MSG_LEN, MessageBufferUtil.getCommitLogOffset(msg2)); - - ByteBuffer msg3 = segment.read(lastSize + MessageBufferUtilTest.MSG_LEN * 2, MessageBufferUtilTest.MSG_LEN); - Assert.assertEquals(baseOffset + lastSize + MessageBufferUtilTest.MSG_LEN * 2, MessageBufferUtil.getCommitLogOffset(msg3)); - } -} diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/memory/MemoryFileSegmentWithoutCheck.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/memory/MemoryFileSegmentWithoutCheck.java deleted file mode 100644 index 630fd22236c..00000000000 --- a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/memory/MemoryFileSegmentWithoutCheck.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.rocketmq.tieredstore.provider.memory; - -import java.io.File; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; -import org.apache.rocketmq.common.message.MessageQueue; -import org.apache.rocketmq.tieredstore.common.FileSegmentType; -import org.apache.rocketmq.tieredstore.common.TieredMessageStoreConfig; -import org.apache.rocketmq.tieredstore.provider.stream.FileSegmentInputStream; -import org.apache.rocketmq.tieredstore.util.TieredStoreUtil; -import org.junit.Assert; - -public class MemoryFileSegmentWithoutCheck extends MemoryFileSegment { - - public MemoryFileSegmentWithoutCheck(FileSegmentType fileType, - MessageQueue messageQueue, long baseOffset, TieredMessageStoreConfig storeConfig) { - super(storeConfig, fileType, - storeConfig.getStorePathRootDir() + File.separator + TieredStoreUtil.toPath(messageQueue), - baseOffset); - } - - public MemoryFileSegmentWithoutCheck(TieredMessageStoreConfig storeConfig, - FileSegmentType fileType, String filePath, long baseOffset) { - super(storeConfig, fileType, filePath, baseOffset); - } - - @Override - public long getSize() { - return 0; - } - - @Override - public CompletableFuture commit0(FileSegmentInputStream inputStream, long position, int length, - boolean append) { - try { - if (blocker != null && !blocker.get()) { - throw new IllegalStateException(); - } - } catch (InterruptedException | ExecutionException e) { - Assert.fail(e.getMessage()); - } - - byte[] buffer = new byte[1024]; - - int startPos = memStore.position(); - try { - int len; - while ((len = inputStream.read(buffer)) > 0) { - memStore.put(buffer, 0, len); - } - Assert.assertEquals(length, memStore.position() - startPos); - } catch (Exception e) { - Assert.fail(e.getMessage()); - return CompletableFuture.completedFuture(false); - } - return CompletableFuture.completedFuture(true); - } -} diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/posix/PosixFileSegmentTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/posix/PosixFileSegmentTest.java deleted file mode 100644 index db33ae847f2..00000000000 --- a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/posix/PosixFileSegmentTest.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.rocketmq.tieredstore.provider.posix; - -import com.google.common.io.ByteStreams; -import com.google.common.io.Files; -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.Random; -import org.apache.rocketmq.common.message.MessageQueue; -import org.apache.rocketmq.tieredstore.TieredStoreTestUtil; -import org.apache.rocketmq.tieredstore.common.FileSegmentType; -import org.apache.rocketmq.tieredstore.common.TieredMessageStoreConfig; -import org.apache.rocketmq.tieredstore.common.TieredStoreExecutor; -import org.apache.rocketmq.tieredstore.util.TieredStoreUtil; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -public class PosixFileSegmentTest { - - private final String storePath = TieredStoreTestUtil.getRandomStorePath(); - private TieredMessageStoreConfig storeConfig; - private MessageQueue mq; - - @Before - public void setUp() { - storeConfig = new TieredMessageStoreConfig(); - storeConfig.setTieredStoreFilePath(storePath); - mq = new MessageQueue("OSSFileSegmentTest", "broker", 0); - TieredStoreExecutor.init(); - } - - @After - public void tearDown() throws IOException { - TieredStoreTestUtil.destroyCompositeFlatFileManager(); - TieredStoreTestUtil.destroyMetadataStore(); - TieredStoreTestUtil.destroyTempDir(storePath); - TieredStoreExecutor.shutdown(); - } - - @Test - public void testCommitAndRead() throws IOException { - PosixFileSegment fileSegment = new PosixFileSegment( - storeConfig, FileSegmentType.CONSUME_QUEUE, TieredStoreUtil.toPath(mq), 0); - byte[] source = new byte[4096]; - new Random().nextBytes(source); - ByteBuffer buffer = ByteBuffer.wrap(source); - fileSegment.append(buffer, 0); - fileSegment.commit(); - - File file = new File(fileSegment.getPath()); - Assert.assertTrue(file.exists()); - byte[] result = new byte[4096]; - ByteStreams.read(Files.asByteSource(file).openStream(), result, 0, 4096); - Assert.assertArrayEquals(source, result); - - ByteBuffer read = fileSegment.read(0, 4096); - Assert.assertArrayEquals(source, read.array()); - } -} diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/TieredFileSegmentInputStreamTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/stream/FileSegmentInputStreamTest.java similarity index 87% rename from tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/TieredFileSegmentInputStreamTest.java rename to tieredstore/src/test/java/org/apache/rocketmq/tieredstore/stream/FileSegmentInputStreamTest.java index 743d9182ce3..3d0dd57a8b9 100644 --- a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/provider/TieredFileSegmentInputStreamTest.java +++ b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/stream/FileSegmentInputStreamTest.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.rocketmq.tieredstore.provider; +package org.apache.rocketmq.tieredstore.stream; import com.google.common.base.Supplier; import java.io.IOException; @@ -26,20 +26,16 @@ import java.util.List; import java.util.Random; import org.apache.rocketmq.tieredstore.common.FileSegmentType; -import org.apache.rocketmq.tieredstore.file.TieredCommitLog; -import org.apache.rocketmq.tieredstore.file.TieredConsumeQueue; -import org.apache.rocketmq.tieredstore.provider.stream.FileSegmentInputStream; -import org.apache.rocketmq.tieredstore.provider.stream.FileSegmentInputStreamFactory; -import org.apache.rocketmq.tieredstore.util.MessageBufferUtil; -import org.apache.rocketmq.tieredstore.util.MessageBufferUtilTest; +import org.apache.rocketmq.tieredstore.util.MessageFormatUtil; +import org.apache.rocketmq.tieredstore.util.MessageFormatUtilTest; import org.junit.Assert; import org.junit.Test; -public class TieredFileSegmentInputStreamTest { +public class FileSegmentInputStreamTest { private final static long COMMIT_LOG_START_OFFSET = 13131313; - private final static int MSG_LEN = MessageBufferUtilTest.MSG_LEN; + private final static int MSG_LEN = MessageFormatUtilTest.MSG_LEN; private final static int MSG_NUM = 10; @@ -52,7 +48,7 @@ public void testCommitLogTypeInputStream() { List uploadBufferList = new ArrayList<>(); int bufferSize = 0; for (int i = 0; i < MSG_NUM; i++) { - ByteBuffer byteBuffer = MessageBufferUtilTest.buildMockedMessageBuffer(); + ByteBuffer byteBuffer = MessageFormatUtilTest.buildMockedMessageBuffer(); uploadBufferList.add(byteBuffer); bufferSize += byteBuffer.remaining(); } @@ -66,13 +62,13 @@ public void testCommitLogTypeInputStream() { // set real physical offset for (int i = 0; i < MSG_NUM; i++) { long physicalOffset = COMMIT_LOG_START_OFFSET + i * MSG_LEN; - int position = i * MSG_LEN + MessageBufferUtil.PHYSICAL_OFFSET_POSITION; + int position = i * MSG_LEN + MessageFormatUtil.PHYSICAL_OFFSET_POSITION; expectedByteBuffer.putLong(position, physicalOffset); } int finalBufferSize = bufferSize; int[] batchReadSizeTestSet = { - MessageBufferUtil.PHYSICAL_OFFSET_POSITION - 1, MessageBufferUtil.PHYSICAL_OFFSET_POSITION, MessageBufferUtil.PHYSICAL_OFFSET_POSITION + 1, MSG_LEN - 1, MSG_LEN, MSG_LEN + 1 + MessageFormatUtil.PHYSICAL_OFFSET_POSITION - 1, MessageFormatUtil.PHYSICAL_OFFSET_POSITION, MessageFormatUtil.PHYSICAL_OFFSET_POSITION + 1, MSG_LEN - 1, MSG_LEN, MSG_LEN + 1 }; verifyReadAndReset(expectedByteBuffer, () -> FileSegmentInputStreamFactory.build( FileSegmentType.COMMIT_LOG, COMMIT_LOG_START_OFFSET, uploadBufferList, null, finalBufferSize), finalBufferSize, batchReadSizeTestSet); @@ -84,14 +80,14 @@ public void testCommitLogTypeInputStreamWithCoda() { List uploadBufferList = new ArrayList<>(); int bufferSize = 0; for (int i = 0; i < MSG_NUM; i++) { - ByteBuffer byteBuffer = MessageBufferUtilTest.buildMockedMessageBuffer(); + ByteBuffer byteBuffer = MessageFormatUtilTest.buildMockedMessageBuffer(); uploadBufferList.add(byteBuffer); bufferSize += byteBuffer.remaining(); } - ByteBuffer codaBuffer = ByteBuffer.allocate(TieredCommitLog.CODA_SIZE); - codaBuffer.putInt(TieredCommitLog.CODA_SIZE); - codaBuffer.putInt(TieredCommitLog.BLANK_MAGIC_CODE); + ByteBuffer codaBuffer = ByteBuffer.allocate(MessageFormatUtil.COMMIT_LOG_CODA_SIZE); + codaBuffer.putInt(MessageFormatUtil.COMMIT_LOG_CODA_SIZE); + codaBuffer.putInt(MessageFormatUtil.BLANK_MAGIC_CODE); long timeMillis = System.currentTimeMillis(); codaBuffer.putLong(timeMillis); codaBuffer.flip(); @@ -109,13 +105,13 @@ public void testCommitLogTypeInputStreamWithCoda() { // set real physical offset for (int i = 0; i < MSG_NUM; i++) { long physicalOffset = COMMIT_LOG_START_OFFSET + i * MSG_LEN; - int position = i * MSG_LEN + MessageBufferUtil.PHYSICAL_OFFSET_POSITION; + int position = i * MSG_LEN + MessageFormatUtil.PHYSICAL_OFFSET_POSITION; expectedByteBuffer.putLong(position, physicalOffset); } int finalBufferSize = bufferSize; int[] batchReadSizeTestSet = { - MessageBufferUtil.PHYSICAL_OFFSET_POSITION - 1, MessageBufferUtil.PHYSICAL_OFFSET_POSITION, MessageBufferUtil.PHYSICAL_OFFSET_POSITION + 1, + MessageFormatUtil.PHYSICAL_OFFSET_POSITION - 1, MessageFormatUtil.PHYSICAL_OFFSET_POSITION, MessageFormatUtil.PHYSICAL_OFFSET_POSITION + 1, MSG_LEN - 1, MSG_LEN, MSG_LEN + 1, bufferSize - 1, bufferSize, bufferSize + 1 }; @@ -129,7 +125,7 @@ public void testConsumeQueueTypeInputStream() { List uploadBufferList = new ArrayList<>(); int bufferSize = 0; for (int i = 0; i < MSG_NUM; i++) { - ByteBuffer byteBuffer = MessageBufferUtilTest.buildMockedConsumeQueueBuffer(); + ByteBuffer byteBuffer = MessageFormatUtilTest.buildMockedConsumeQueueBuffer(); uploadBufferList.add(byteBuffer); bufferSize += byteBuffer.remaining(); } @@ -142,7 +138,7 @@ public void testConsumeQueueTypeInputStream() { } int finalBufferSize = bufferSize; - int[] batchReadSizeTestSet = {TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE - 1, TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE, TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE + 1}; + int[] batchReadSizeTestSet = {MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE - 1, MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE, MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE + 1}; verifyReadAndReset(expectedByteBuffer, () -> FileSegmentInputStreamFactory.build( FileSegmentType.CONSUME_QUEUE, COMMIT_LOG_START_OFFSET, uploadBufferList, null, finalBufferSize), bufferSize, batchReadSizeTestSet); } diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/util/MessageBufferUtilTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/util/MessageFormatUtilTest.java similarity index 54% rename from tieredstore/src/test/java/org/apache/rocketmq/tieredstore/util/MessageBufferUtilTest.java rename to tieredstore/src/test/java/org/apache/rocketmq/tieredstore/util/MessageFormatUtilTest.java index a0b43894817..be4805be833 100644 --- a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/util/MessageBufferUtilTest.java +++ b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/util/MessageFormatUtilTest.java @@ -25,70 +25,37 @@ import org.apache.rocketmq.common.message.MessageConst; import org.apache.rocketmq.common.message.MessageDecoder; import org.apache.rocketmq.tieredstore.common.SelectBufferResult; -import org.apache.rocketmq.tieredstore.file.TieredCommitLog; -import org.apache.rocketmq.tieredstore.file.TieredConsumeQueue; import org.junit.Assert; import org.junit.Test; -public class MessageBufferUtilTest { - public static final int MSG_LEN = 4 //TOTALSIZE - + 4 //MAGICCODE - + 4 //BODYCRC - + 4 //QUEUEID - + 4 //FLAG - + 8 //QUEUEOFFSET - + 8 //PHYSICALOFFSET - + 4 //SYSFLAG - + 8 //BORNTIMESTAMP - + 8 //BORNHOST - + 8 //STORETIMESTAMP - + 8 //STOREHOSTADDRESS - + 4 //RECONSUMETIMES - + 8 //Prepared Transaction Offset - + 4 + 0 //BODY - + 2 + 0 //TOPIC - + 2 + 31 //properties - + 0; +import static org.apache.rocketmq.tieredstore.util.MessageFormatUtil.COMMIT_LOG_CODA_SIZE; + +public class MessageFormatUtilTest { + + public static final int MSG_LEN = 123; public static ByteBuffer buildMockedMessageBuffer() { - // Initialization of storage space ByteBuffer buffer = ByteBuffer.allocate(MSG_LEN); - // 1 TOTALSIZE buffer.putInt(MSG_LEN); - // 2 MAGICCODE buffer.putInt(MessageDecoder.MESSAGE_MAGIC_CODE_V2); - // 3 BODYCRC buffer.putInt(3); - // 4 QUEUEID buffer.putInt(4); - // 5 FLAG buffer.putInt(5); - // 6 QUEUEOFFSET buffer.putLong(6); - // 7 PHYSICALOFFSET buffer.putLong(7); - // 8 SYSFLAG buffer.putInt(8); - // 9 BORNTIMESTAMP buffer.putLong(9); - // 10 BORNHOST buffer.putLong(10); - // 11 STORETIMESTAMP buffer.putLong(11); - // 12 STOREHOSTADDRESS buffer.putLong(10); - // 13 RECONSUMETIMES buffer.putInt(13); - // 14 Prepared Transaction Offset buffer.putLong(14); - // 15 BODY buffer.putInt(0); - // 16 TOPIC buffer.putShort((short) 0); - // 17 PROPERTIES + Map map = new HashMap<>(); map.put(MessageConst.PROPERTY_UNIQ_CLIENT_MESSAGE_ID_KEYIDX, "uk"); - map.put("userkey", "uservalue0"); + map.put("UserKey", "UserValue0"); String properties = MessageDecoder.messageProperties2String(map); byte[] propertiesBytes = properties.getBytes(StandardCharsets.UTF_8); buffer.putShort((short) propertiesBytes.length); @@ -99,19 +66,9 @@ public static ByteBuffer buildMockedMessageBuffer() { return buffer; } - public static ByteBuffer buildMockedConsumeQueueBuffer() { - ByteBuffer byteBuffer = ByteBuffer.allocate(TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE); - // 1 COMMIT_LOG_OFFSET - byteBuffer.putLong(1); - // 2 MESSAGE_SIZE - byteBuffer.putInt(2); - // 3 TAG_HASH_CODE - byteBuffer.putLong(3); - byteBuffer.flip(); - return byteBuffer; - } - - public static void verifyMockedMessageBuffer(ByteBuffer buffer, int phyOffset) { + @Test + public void verifyMockedMessageBuffer() { + ByteBuffer buffer = buildMockedMessageBuffer(); Assert.assertEquals(MSG_LEN, buffer.remaining()); Assert.assertEquals(MSG_LEN, buffer.getInt()); Assert.assertEquals(MessageDecoder.MESSAGE_MAGIC_CODE_V2, buffer.getInt()); @@ -119,7 +76,7 @@ public static void verifyMockedMessageBuffer(ByteBuffer buffer, int phyOffset) { Assert.assertEquals(4, buffer.getInt()); Assert.assertEquals(5, buffer.getInt()); Assert.assertEquals(6, buffer.getLong()); - Assert.assertEquals(phyOffset, buffer.getLong()); + Assert.assertEquals(7, buffer.getLong()); Assert.assertEquals(8, buffer.getInt()); Assert.assertEquals(9, buffer.getLong()); Assert.assertEquals(10, buffer.getLong()); @@ -130,38 +87,79 @@ public static void verifyMockedMessageBuffer(ByteBuffer buffer, int phyOffset) { Assert.assertEquals(0, buffer.getInt()); Assert.assertEquals(0, buffer.getShort()); buffer.rewind(); - Map properties = MessageBufferUtil.getProperties(buffer); + Map properties = MessageFormatUtil.getProperties(buffer); Assert.assertEquals("uk", properties.get(MessageConst.PROPERTY_UNIQ_CLIENT_MESSAGE_ID_KEYIDX)); - Assert.assertEquals("uservalue0", properties.get("userkey")); + Assert.assertEquals("UserValue0", properties.get("UserKey")); + } + + public static ByteBuffer buildMockedConsumeQueueBuffer() { + ByteBuffer buffer = ByteBuffer.allocate(MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE); + buffer.putLong(1L); + buffer.putInt(2); + buffer.putLong(3L); + buffer.flip(); + return buffer; } @Test - public void testGetTotalSize() { + public void verifyMockedConsumeQueueBuffer() { + ByteBuffer buffer = buildMockedConsumeQueueBuffer(); + Assert.assertEquals(1L, MessageFormatUtil.getCommitLogOffsetFromItem(buffer)); + Assert.assertEquals(2, MessageFormatUtil.getSizeFromItem(buffer)); + Assert.assertEquals(3L, MessageFormatUtil.getTagCodeFromItem(buffer)); + } + + @Test + public void messageFormatBasicTest() { ByteBuffer buffer = buildMockedMessageBuffer(); - int totalSize = MessageBufferUtil.getTotalSize(buffer); - Assert.assertEquals(MSG_LEN, totalSize); + Assert.assertEquals(MSG_LEN, MessageFormatUtil.getTotalSize(buffer)); + Assert.assertEquals(MessageDecoder.MESSAGE_MAGIC_CODE_V2, MessageFormatUtil.getMagicCode(buffer)); + Assert.assertEquals(6L, MessageFormatUtil.getQueueOffset(buffer)); + Assert.assertEquals(7L, MessageFormatUtil.getCommitLogOffset(buffer)); + Assert.assertEquals(11L, MessageFormatUtil.getStoreTimeStamp(buffer)); } @Test - public void testGetMagicCode() { + public void getOffsetIdTest() { ByteBuffer buffer = buildMockedMessageBuffer(); - int magicCode = MessageBufferUtil.getMagicCode(buffer); - Assert.assertEquals(MessageDecoder.MESSAGE_MAGIC_CODE_V2, magicCode); + InetSocketAddress inetSocketAddress = new InetSocketAddress("127.0.0.1", 65535); + ByteBuffer address = ByteBuffer.allocate(Long.BYTES); + address.put(inetSocketAddress.getAddress().getAddress(), 0, 4); + address.putInt(inetSocketAddress.getPort()); + address.flip(); + for (int i = 0; i < address.remaining(); i++) { + buffer.put(MessageFormatUtil.STORE_HOST_POSITION + i, address.get(i)); + } + String excepted = MessageDecoder.createMessageId( + ByteBuffer.allocate(MessageFormatUtil.MSG_ID_LENGTH), address, 7); + String offsetId = MessageFormatUtil.getOffsetId(buffer); + Assert.assertEquals(excepted, offsetId); + } + + @Test + public void getPropertiesTest() { + ByteBuffer buffer = buildMockedMessageBuffer(); + Map properties = MessageFormatUtil.getProperties(buffer); + Assert.assertEquals(2, properties.size()); + Assert.assertTrue(properties.containsKey(MessageConst.PROPERTY_UNIQ_CLIENT_MESSAGE_ID_KEYIDX)); + Assert.assertEquals("uk", properties.get(MessageConst.PROPERTY_UNIQ_CLIENT_MESSAGE_ID_KEYIDX)); + Assert.assertTrue(properties.containsKey("UserKey")); + Assert.assertEquals("UserValue0", properties.get("UserKey")); } @Test public void testSplitMessages() { ByteBuffer msgBuffer1 = buildMockedMessageBuffer(); - msgBuffer1.putLong(MessageBufferUtil.QUEUE_OFFSET_POSITION, 10); + msgBuffer1.putLong(MessageFormatUtil.QUEUE_OFFSET_POSITION, 10); - ByteBuffer msgBuffer2 = ByteBuffer.allocate(TieredCommitLog.CODA_SIZE); - msgBuffer2.putInt(TieredCommitLog.CODA_SIZE); - msgBuffer2.putInt(TieredCommitLog.BLANK_MAGIC_CODE); + ByteBuffer msgBuffer2 = ByteBuffer.allocate(COMMIT_LOG_CODA_SIZE); + msgBuffer2.putInt(MessageFormatUtil.COMMIT_LOG_CODA_SIZE); + msgBuffer2.putInt(MessageFormatUtil.BLANK_MAGIC_CODE); msgBuffer2.putLong(System.currentTimeMillis()); msgBuffer2.flip(); ByteBuffer msgBuffer3 = buildMockedMessageBuffer(); - msgBuffer3.putLong(MessageBufferUtil.QUEUE_OFFSET_POSITION, 11); + msgBuffer3.putLong(MessageFormatUtil.QUEUE_OFFSET_POSITION, 11); ByteBuffer msgBuffer = ByteBuffer.allocate( msgBuffer1.remaining() + msgBuffer2.remaining() + msgBuffer3.remaining()); @@ -170,116 +168,99 @@ public void testSplitMessages() { msgBuffer.put(msgBuffer3); msgBuffer.flip(); - ByteBuffer cqBuffer1 = ByteBuffer.allocate(TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE); + ByteBuffer cqBuffer1 = ByteBuffer.allocate(MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE); cqBuffer1.putLong(1000); cqBuffer1.putInt(MSG_LEN); cqBuffer1.putLong(0); cqBuffer1.flip(); - ByteBuffer cqBuffer2 = ByteBuffer.allocate(TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE); - cqBuffer2.putLong(1000 + TieredCommitLog.CODA_SIZE + MSG_LEN); + ByteBuffer cqBuffer2 = ByteBuffer.allocate(MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE); + cqBuffer2.putLong(1000 + MessageFormatUtil.COMMIT_LOG_CODA_SIZE + MSG_LEN); cqBuffer2.putInt(MSG_LEN); cqBuffer2.putLong(0); cqBuffer2.flip(); - ByteBuffer cqBuffer3 = ByteBuffer.allocate(TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE); + ByteBuffer cqBuffer3 = ByteBuffer.allocate(MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE); cqBuffer3.putLong(1000 + MSG_LEN); cqBuffer3.putInt(MSG_LEN); cqBuffer3.putLong(0); cqBuffer3.flip(); - ByteBuffer cqBuffer4 = ByteBuffer.allocate(TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE); - cqBuffer4.putLong(1000 + TieredCommitLog.CODA_SIZE + MSG_LEN); + ByteBuffer cqBuffer4 = ByteBuffer.allocate(MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE); + cqBuffer4.putLong(1000 + MessageFormatUtil.COMMIT_LOG_CODA_SIZE + MSG_LEN); cqBuffer4.putInt(MSG_LEN - 10); cqBuffer4.putLong(0); cqBuffer4.flip(); - ByteBuffer cqBuffer5 = ByteBuffer.allocate(TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE); - cqBuffer5.putLong(1000 + TieredCommitLog.CODA_SIZE + MSG_LEN); + ByteBuffer cqBuffer5 = ByteBuffer.allocate(MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE); + cqBuffer5.putLong(1000 + MessageFormatUtil.COMMIT_LOG_CODA_SIZE + MSG_LEN); cqBuffer5.putInt(MSG_LEN * 10); cqBuffer5.putLong(0); cqBuffer5.flip(); - ByteBuffer cqBuffer = ByteBuffer.allocate(TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE * 2); + // Message buffer size is 0 or consume queue buffer size is 0 + Assert.assertEquals(0, + MessageFormatUtil.splitMessageBuffer(null, ByteBuffer.allocate(0)).size()); + Assert.assertEquals(0, + MessageFormatUtil.splitMessageBuffer(cqBuffer1, null).size()); + Assert.assertEquals(0, + MessageFormatUtil.splitMessageBuffer(cqBuffer1, ByteBuffer.allocate(0)).size()); + Assert.assertEquals(0, + MessageFormatUtil.splitMessageBuffer(ByteBuffer.allocate(0), msgBuffer).size()); + Assert.assertEquals(0, + MessageFormatUtil.splitMessageBuffer(ByteBuffer.allocate(10), msgBuffer).size()); + + ByteBuffer cqBuffer = ByteBuffer.allocate(MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE * 2); cqBuffer.put(cqBuffer1); cqBuffer.put(cqBuffer2); cqBuffer.flip(); cqBuffer1.rewind(); cqBuffer2.rewind(); - List msgList = MessageBufferUtil.splitMessageBuffer(cqBuffer, msgBuffer); + List msgList = MessageFormatUtil.splitMessageBuffer(cqBuffer, msgBuffer); Assert.assertEquals(2, msgList.size()); Assert.assertEquals(0, msgList.get(0).getStartOffset()); Assert.assertEquals(MSG_LEN, msgList.get(0).getSize()); - Assert.assertEquals(MSG_LEN + TieredCommitLog.CODA_SIZE, msgList.get(1).getStartOffset()); + Assert.assertEquals(MSG_LEN + MessageFormatUtil.COMMIT_LOG_CODA_SIZE, msgList.get(1).getStartOffset()); Assert.assertEquals(MSG_LEN, msgList.get(1).getSize()); - cqBuffer = ByteBuffer.allocate(TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE * 2); + cqBuffer = ByteBuffer.allocate(MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE * 2); cqBuffer.put(cqBuffer1); cqBuffer.put(cqBuffer4); cqBuffer.flip(); cqBuffer1.rewind(); cqBuffer4.rewind(); - msgList = MessageBufferUtil.splitMessageBuffer(cqBuffer, msgBuffer); + msgList = MessageFormatUtil.splitMessageBuffer(cqBuffer, msgBuffer); Assert.assertEquals(1, msgList.size()); Assert.assertEquals(0, msgList.get(0).getStartOffset()); Assert.assertEquals(MSG_LEN, msgList.get(0).getSize()); - cqBuffer = ByteBuffer.allocate(TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE * 3); + cqBuffer = ByteBuffer.allocate(MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE * 3); cqBuffer.put(cqBuffer1); cqBuffer.put(cqBuffer3); cqBuffer.flip(); - msgList = MessageBufferUtil.splitMessageBuffer(cqBuffer, msgBuffer); + cqBuffer1.rewind(); + cqBuffer3.rewind(); + msgList = MessageFormatUtil.splitMessageBuffer(cqBuffer, msgBuffer); Assert.assertEquals(2, msgList.size()); Assert.assertEquals(0, msgList.get(0).getStartOffset()); Assert.assertEquals(MSG_LEN, msgList.get(0).getSize()); - Assert.assertEquals(MSG_LEN + TieredCommitLog.CODA_SIZE, msgList.get(1).getStartOffset()); + Assert.assertEquals(MSG_LEN + MessageFormatUtil.COMMIT_LOG_CODA_SIZE, msgList.get(1).getStartOffset()); Assert.assertEquals(MSG_LEN, msgList.get(1).getSize()); - cqBuffer = ByteBuffer.allocate(TieredConsumeQueue.CONSUME_QUEUE_STORE_UNIT_SIZE); + cqBuffer = ByteBuffer.allocate(MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE); cqBuffer.put(cqBuffer5); cqBuffer.flip(); - msgList = MessageBufferUtil.splitMessageBuffer(cqBuffer, msgBuffer); + msgList = MessageFormatUtil.splitMessageBuffer(cqBuffer, msgBuffer); Assert.assertEquals(0, msgList.size()); - } - - @Test - public void testGetQueueOffset() { - ByteBuffer buffer = buildMockedMessageBuffer(); - long queueOffset = MessageBufferUtil.getQueueOffset(buffer); - Assert.assertEquals(6, queueOffset); - } - - @Test - public void testGetStoreTimeStamp() { - ByteBuffer buffer = buildMockedMessageBuffer(); - long storeTimeStamp = MessageBufferUtil.getStoreTimeStamp(buffer); - Assert.assertEquals(11, storeTimeStamp); - } - - @Test - public void testGetOffsetId() { - ByteBuffer buffer = buildMockedMessageBuffer(); - InetSocketAddress inetSocketAddress = new InetSocketAddress("255.255.255.255", 65535); - ByteBuffer addr = ByteBuffer.allocate(Long.BYTES); - addr.put(inetSocketAddress.getAddress().getAddress(), 0, 4); - addr.putInt(inetSocketAddress.getPort()); - addr.flip(); - for (int i = 0; i < addr.remaining(); i++) { - buffer.put(MessageBufferUtil.STORE_HOST_POSITION + i, addr.get(i)); - } - String excepted = MessageDecoder.createMessageId(ByteBuffer.allocate(TieredStoreUtil.MSG_ID_LENGTH), addr, 7); - String offsetId = MessageBufferUtil.getOffsetId(buffer); - Assert.assertEquals(excepted, offsetId); - } - @Test - public void testGetProperties() { - ByteBuffer buffer = buildMockedMessageBuffer(); - Map properties = MessageBufferUtil.getProperties(buffer); - Assert.assertEquals(2, properties.size()); - Assert.assertTrue(properties.containsKey(MessageConst.PROPERTY_UNIQ_CLIENT_MESSAGE_ID_KEYIDX)); - Assert.assertEquals("uk", properties.get(MessageConst.PROPERTY_UNIQ_CLIENT_MESSAGE_ID_KEYIDX)); - Assert.assertTrue(properties.containsKey("userkey")); - Assert.assertEquals("uservalue0", properties.get("userkey")); + // Wrong magic code, it will destroy the mocked message buffer + msgBuffer.putInt(MessageFormatUtil.MAGIC_CODE_POSITION, -1); + cqBuffer = ByteBuffer.allocate(MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE * 2); + cqBuffer.put(cqBuffer1); + cqBuffer.put(cqBuffer2); + cqBuffer.flip(); + cqBuffer1.rewind(); + cqBuffer2.rewind(); + Assert.assertEquals(1, MessageFormatUtil.splitMessageBuffer(cqBuffer, msgBuffer).size()); } } diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/util/MessageStoreUtilTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/util/MessageStoreUtilTest.java new file mode 100644 index 00000000000..cadaef8708f --- /dev/null +++ b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/util/MessageStoreUtilTest.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.rocketmq.tieredstore.util; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Paths; +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; +import org.apache.commons.io.FileUtils; +import org.apache.rocketmq.common.message.MessageQueue; +import org.junit.Assert; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class MessageStoreUtilTest { + + private static final Logger log = LoggerFactory.getLogger(MessageStoreUtil.TIERED_STORE_LOGGER_NAME); + private static final String TIERED_STORE_PATH = "tiered_store_test"; + + public static String getRandomStorePath() { + return Paths.get(System.getProperty("user.home"), TIERED_STORE_PATH, + UUID.randomUUID().toString().replace("-", "").toUpperCase().substring(0, 16)).toString(); + } + + public static void deleteStoreDirectory(String storePath) { + try { + FileUtils.deleteDirectory(new File(storePath)); + } catch (IOException e) { + log.error("Delete store directory failed, filePath: {}", storePath, e); + } + } + + @Test + public void toHumanReadableTest() { + Map capacityTable = new HashMap() { + { + put(-1L, "-1"); + put(0L, "0B"); + put(1023L, "1023B"); + put(1024L, "1KB"); + put(12_345L, "12.06KB"); + put(10_123_456L, "9.65MB"); + put(10_123_456_798L, "9.43GB"); + put(123 * 1024L * 1024L * 1024L * 1024L, "123TB"); + put(123 * 1024L * 1024L * 1024L * 1024L * 1024L, "123PB"); + put(1_777_777_777_777_777_777L, "1.54EB"); + } + }; + capacityTable.forEach((in, expected) -> + Assert.assertEquals(expected, MessageStoreUtil.toHumanReadable(in))); + } + + @Test + public void getHashTest() { + Assert.assertEquals("161c08ff", MessageStoreUtil.getHash("TieredStorageDailyTest")); + } + + @Test + public void filePathTest() { + MessageQueue mq = new MessageQueue(); + mq.setBrokerName("BrokerName"); + mq.setTopic("topicName"); + mq.setQueueId(2); + Assert.assertEquals("BrokerName/topicName/2", MessageStoreUtil.toFilePath(mq)); + } + + @Test + public void offset2FileNameTest() { + Assert.assertEquals("cfcd208400000000000000000000", MessageStoreUtil.offset2FileName(0)); + Assert.assertEquals("b10da56800000000004294937144", MessageStoreUtil.offset2FileName(4294937144L)); + } + + @Test + public void fileName2OffsetTest() { + Assert.assertEquals(0, MessageStoreUtil.fileName2Offset("cfcd208400000000000000000000")); + Assert.assertEquals(4294937144L, MessageStoreUtil.fileName2Offset("b10da56800000000004294937144")); + } + + @Test + public void indexServicePathTest() { + Assert.assertEquals("brokerName/rmq_sys_INDEX/0", MessageStoreUtil.getIndexFilePath("brokerName")); + } +} diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/util/TieredStoreUtilTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/util/TieredStoreUtilTest.java deleted file mode 100644 index 82e11252485..00000000000 --- a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/util/TieredStoreUtilTest.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.rocketmq.tieredstore.util; - -import java.util.HashMap; -import java.util.Map; -import org.junit.Assert; -import org.junit.Test; - -public class TieredStoreUtilTest { - - private static final Map DATA_MAP = new HashMap() { - { - put(0L, "0Bytes"); - put(1023L, "1023Bytes"); - put(1024L, "1KB"); - put(12_345L, "12.06KB"); - put(10_123_456L, "9.65MB"); - put(10_123_456_798L, "9.43GB"); - put(1_777_777_777_777_777_777L, "1.54EB"); - } - }; - - @Test - public void getHash() { - Assert.assertEquals("161c08ff", TieredStoreUtil.getHash("TieredStorageDailyTest")); - } - - @Test - public void testOffset2FileName() { - Assert.assertEquals("cfcd208400000000000000000000", TieredStoreUtil.offset2FileName(0)); - Assert.assertEquals("b10da56800000000004294937144", TieredStoreUtil.offset2FileName(4294937144L)); - } - - @Test - public void testFileName2Offset() { - Assert.assertEquals(0, TieredStoreUtil.fileName2Offset("cfcd208400000000000000000000")); - Assert.assertEquals(4294937144L, TieredStoreUtil.fileName2Offset("b10da56800000000004294937144")); - } - - @Test - public void testToHumanReadable() { - DATA_MAP.forEach((in, expected) -> Assert.assertEquals(expected, TieredStoreUtil.toHumanReadable(in))); - } -} diff --git a/tieredstore/src/test/resources/rmq.logback-test.xml b/tieredstore/src/test/resources/rmq.logback-test.xml index ac0895e05e4..070bf134cb9 100644 --- a/tieredstore/src/test/resources/rmq.logback-test.xml +++ b/tieredstore/src/test/resources/rmq.logback-test.xml @@ -24,7 +24,7 @@ + value="%d{yyyy-MM-dd HH:mm:ss.SSS,GMT+8} ${LOG_LEVEL_PATTERN:-%5p} [%20.20thread] %m%n"/>