From 047fa33704cb13ee438636a9ad6c5c1171f1c9c2 Mon Sep 17 00:00:00 2001 From: OpenIM-Robot Date: Wed, 30 Oct 2024 11:47:21 +0800 Subject: [PATCH] deps: Merge #2804 #2805 #2806 #2808 PRs into pre-release-v3.8.2 (#2812) * fix: improve time condition check mehtod. (#2804) * fix: improve time condition check mehtod. * fix * fix: webhook before online push (#2805) * fix: solve err Notification when setGroupInfo. (#2806) * fix: solve err Notification when setGroupInfo. * build: update checkout version. * fix: update notification contents. * fix: set own read seq in MongoDB when sender send a message. (#2808) --------- Co-authored-by: Monet Lee Co-authored-by: icey-yu <119291641+icey-yu@users.noreply.github.com> Co-authored-by: OpenIM-Gordon <46924906+FGadvancer@users.noreply.github.com> --- .../cleanup-after-milestone-prs-merged.yml | 2 +- config/notification.yml | 23 +---- internal/msgtransfer/init.go | 3 + .../msgtransfer/online_history_msg_handler.go | 99 +++++++++++++------ internal/push/callback.go | 3 +- internal/rpc/group/group.go | 7 +- pkg/common/storage/controller/msg_transfer.go | 35 ++++--- 7 files changed, 99 insertions(+), 73 deletions(-) diff --git a/.github/workflows/cleanup-after-milestone-prs-merged.yml b/.github/workflows/cleanup-after-milestone-prs-merged.yml index ff4e8029da..8a3e381d60 100644 --- a/.github/workflows/cleanup-after-milestone-prs-merged.yml +++ b/.github/workflows/cleanup-after-milestone-prs-merged.yml @@ -11,7 +11,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4.2.0 - name: Get the PR title and extract PR numbers id: extract_pr_numbers diff --git a/config/notification.yml b/config/notification.yml index 85ca91af18..ba5ca1c21a 100644 --- a/config/notification.yml +++ b/config/notification.yml @@ -1,20 +1,3 @@ -# Copyright © 2023 OpenIM. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Determines if a message should be sent. If set to false, it triggers a silent sync without a message. If true, it requires triggering a conversation. -# For rpc notification, send twice: once as a message and once as a notification. -# The options field 'isNotification' indicates if it's a notification. groupCreated: isSendMsg: true # Reliability level of the message sending. @@ -309,9 +292,9 @@ userInfoUpdated: unreadCount: false offlinePush: enable: true - title: Remove a blocked user - desc: Remove a blocked user - ext: Remove a blocked user + title: userInfo updated + desc: userInfo updated + ext: userInfo updated userStatusChanged: isSendMsg: false diff --git a/internal/msgtransfer/init.go b/internal/msgtransfer/init.go index 7dc2ebeea0..f11cfde1af 100644 --- a/internal/msgtransfer/init.go +++ b/internal/msgtransfer/init.go @@ -128,6 +128,7 @@ func (m *MsgTransfer) Start(index int, config *Config) error { go m.historyCH.historyConsumerGroup.RegisterHandleAndConsumer(m.ctx, m.historyCH) go m.historyMongoCH.historyConsumerGroup.RegisterHandleAndConsumer(m.ctx, m.historyMongoCH) + go m.historyCH.HandleUserHasReadSeqMessages(m.ctx) err := m.historyCH.redisMessageBatches.Start() if err != nil { return err @@ -157,12 +158,14 @@ func (m *MsgTransfer) Start(index int, config *Config) error { // graceful close kafka client. m.cancel() m.historyCH.redisMessageBatches.Close() + m.historyCH.Close() m.historyCH.historyConsumerGroup.Close() m.historyMongoCH.historyConsumerGroup.Close() return nil case <-netDone: m.cancel() m.historyCH.redisMessageBatches.Close() + m.historyCH.Close() m.historyCH.historyConsumerGroup.Close() m.historyMongoCH.historyConsumerGroup.Close() close(netDone) diff --git a/internal/msgtransfer/online_history_msg_handler.go b/internal/msgtransfer/online_history_msg_handler.go index b0078649cb..84453c8df4 100644 --- a/internal/msgtransfer/online_history_msg_handler.go +++ b/internal/msgtransfer/online_history_msg_handler.go @@ -18,8 +18,10 @@ import ( "context" "encoding/json" "errors" + "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" "strconv" "strings" + "sync" "time" "github.com/IBM/sarama" @@ -40,11 +42,12 @@ import ( ) const ( - size = 500 - mainDataBuffer = 500 - subChanBuffer = 50 - worker = 50 - interval = 100 * time.Millisecond + size = 500 + mainDataBuffer = 500 + subChanBuffer = 50 + worker = 50 + interval = 100 * time.Millisecond + hasReadChanBuffer = 1000 ) type ContextMsg struct { @@ -52,14 +55,23 @@ type ContextMsg struct { ctx context.Context } +// This structure is used for asynchronously writing the sender’s read sequence (seq) regarding a message into MongoDB. +// For example, if the sender sends a message with a seq of 10, then their own read seq for this conversation should be set to 10. +type userHasReadSeq struct { + conversationID string + userHasReadMap map[string]int64 +} + type OnlineHistoryRedisConsumerHandler struct { historyConsumerGroup *kafka.MConsumerGroup redisMessageBatches *batcher.Batcher[sarama.ConsumerMessage] - msgTransferDatabase controller.MsgTransferDatabase - conversationRpcClient *rpcclient.ConversationRpcClient - groupRpcClient *rpcclient.GroupRpcClient + msgTransferDatabase controller.MsgTransferDatabase + conversationRpcClient *rpcclient.ConversationRpcClient + groupRpcClient *rpcclient.GroupRpcClient + conversationUserHasReadChan chan *userHasReadSeq + wg sync.WaitGroup } func NewOnlineHistoryRedisConsumerHandler(kafkaConf *config.Kafka, database controller.MsgTransferDatabase, @@ -70,6 +82,8 @@ func NewOnlineHistoryRedisConsumerHandler(kafkaConf *config.Kafka, database cont } var och OnlineHistoryRedisConsumerHandler och.msgTransferDatabase = database + och.conversationUserHasReadChan = make(chan *userHasReadSeq, hasReadChanBuffer) + och.wg.Add(1) b := batcher.New[sarama.ConsumerMessage]( batcher.WithSize(size), @@ -115,25 +129,25 @@ func (och *OnlineHistoryRedisConsumerHandler) do(ctx context.Context, channelID } func (och *OnlineHistoryRedisConsumerHandler) doSetReadSeq(ctx context.Context, msgs []*ContextMsg) { - type seqKey struct { - conversationID string - userID string - } - var readSeq map[seqKey]int64 + + var conversationID string + var userSeqMap map[string]int64 for _, msg := range msgs { if msg.message.ContentType != constant.HasReadReceipt { continue } var elem sdkws.NotificationElem if err := json.Unmarshal(msg.message.Content, &elem); err != nil { - log.ZError(ctx, "handlerConversationRead Unmarshal NotificationElem msg err", err, "msg", msg) + log.ZWarn(ctx, "handlerConversationRead Unmarshal NotificationElem msg err", err, "msg", msg) continue } var tips sdkws.MarkAsReadTips if err := json.Unmarshal([]byte(elem.Detail), &tips); err != nil { - log.ZError(ctx, "handlerConversationRead Unmarshal MarkAsReadTips msg err", err, "msg", msg) + log.ZWarn(ctx, "handlerConversationRead Unmarshal MarkAsReadTips msg err", err, "msg", msg) continue } + //The conversation ID for each batch of messages processed by the batcher is the same. + conversationID = tips.ConversationID if len(tips.Seqs) > 0 { for _, seq := range tips.Seqs { if tips.HasReadSeq < seq { @@ -146,26 +160,25 @@ func (och *OnlineHistoryRedisConsumerHandler) doSetReadSeq(ctx context.Context, if tips.HasReadSeq < 0 { continue } - if readSeq == nil { - readSeq = make(map[seqKey]int64) - } - key := seqKey{ - conversationID: tips.ConversationID, - userID: tips.MarkAsReadUserID, + if userSeqMap == nil { + userSeqMap = make(map[string]int64) } - if readSeq[key] > tips.HasReadSeq { + + if userSeqMap[tips.MarkAsReadUserID] > tips.HasReadSeq { continue } - readSeq[key] = tips.HasReadSeq + userSeqMap[tips.MarkAsReadUserID] = tips.HasReadSeq } - if readSeq == nil { + if userSeqMap == nil { return } - for key, seq := range readSeq { - if err := och.msgTransferDatabase.SetHasReadSeqToDB(ctx, key.userID, key.conversationID, seq); err != nil { - log.ZError(ctx, "set read seq to db error", err, "userID", key.userID, "conversationID", key.conversationID, "seq", seq) - } + if len(conversationID) == 0 { + log.ZWarn(ctx, "conversation err", nil, "conversationID", conversationID) } + if err := och.msgTransferDatabase.SetHasReadSeqToDB(ctx, conversationID, userSeqMap); err != nil { + log.ZWarn(ctx, "set read seq to db error", err, "conversationID", conversationID, "userSeqMap", userSeqMap) + } + } func (och *OnlineHistoryRedisConsumerHandler) parseConsumerMessages(ctx context.Context, consumerMessages []*sarama.ConsumerMessage) []*ContextMsg { @@ -250,12 +263,21 @@ func (och *OnlineHistoryRedisConsumerHandler) handleMsg(ctx context.Context, key } if len(storageMessageList) > 0 { msg := storageMessageList[0] - lastSeq, isNewConversation, err := och.msgTransferDatabase.BatchInsertChat2Cache(ctx, conversationID, storageMessageList) + lastSeq, isNewConversation, userSeqMap, err := och.msgTransferDatabase.BatchInsertChat2Cache(ctx, conversationID, storageMessageList) if err != nil && !errors.Is(errs.Unwrap(err), redis.Nil) { - log.ZError(ctx, "batch data insert to redis err", err, "storageMsgList", storageMessageList) + log.ZWarn(ctx, "batch data insert to redis err", err, "storageMsgList", storageMessageList) return } log.ZInfo(ctx, "BatchInsertChat2Cache end") + err = och.msgTransferDatabase.SetHasReadSeqs(ctx, conversationID, userSeqMap) + if err != nil { + log.ZWarn(ctx, "SetHasReadSeqs error", err, "userSeqMap", userSeqMap, "conversationID", conversationID) + prommetrics.SeqSetFailedCounter.Inc() + } + och.conversationUserHasReadChan <- &userHasReadSeq{ + conversationID: conversationID, + userHasReadMap: userSeqMap, + } if isNewConversation { switch msg.SessionType { @@ -308,7 +330,7 @@ func (och *OnlineHistoryRedisConsumerHandler) handleNotification(ctx context.Con storageMessageList = append(storageMessageList, msg.message) } if len(storageMessageList) > 0 { - lastSeq, _, err := och.msgTransferDatabase.BatchInsertChat2Cache(ctx, conversationID, storageMessageList) + lastSeq, _, _, err := och.msgTransferDatabase.BatchInsertChat2Cache(ctx, conversationID, storageMessageList) if err != nil { log.ZError(ctx, "notification batch insert to redis error", err, "conversationID", conversationID, "storageList", storageMessageList) @@ -323,6 +345,21 @@ func (och *OnlineHistoryRedisConsumerHandler) handleNotification(ctx context.Con och.toPushTopic(ctx, key, conversationID, storageList) } } +func (och *OnlineHistoryRedisConsumerHandler) HandleUserHasReadSeqMessages(ctx context.Context) { + defer och.wg.Done() + + for msg := range och.conversationUserHasReadChan { + if err := och.msgTransferDatabase.SetHasReadSeqToDB(ctx, msg.conversationID, msg.userHasReadMap); err != nil { + log.ZWarn(ctx, "set read seq to db error", err, "conversationID", msg.conversationID, "userSeqMap", msg.userHasReadMap) + } + } + + log.ZInfo(ctx, "Channel closed, exiting handleUserHasReadSeqMessages") +} +func (och *OnlineHistoryRedisConsumerHandler) Close() { + close(och.conversationUserHasReadChan) + och.wg.Wait() +} func (och *OnlineHistoryRedisConsumerHandler) toPushTopic(ctx context.Context, key, conversationID string, msgs []*ContextMsg) { for _, v := range msgs { diff --git a/internal/push/callback.go b/internal/push/callback.go index 8897295827..f8e17bb8c0 100644 --- a/internal/push/callback.go +++ b/internal/push/callback.go @@ -24,7 +24,6 @@ import ( "github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/sdkws" "github.com/openimsdk/tools/mcontext" - "github.com/openimsdk/tools/utils/datautil" ) func (c *ConsumerHandler) webhookBeforeOfflinePush(ctx context.Context, before *config.BeforeConfig, userIDs []string, msg *sdkws.MsgData, offlinePushUserIDs *[]string) error { @@ -70,7 +69,7 @@ func (c *ConsumerHandler) webhookBeforeOfflinePush(ctx context.Context, before * func (c *ConsumerHandler) webhookBeforeOnlinePush(ctx context.Context, before *config.BeforeConfig, userIDs []string, msg *sdkws.MsgData) error { return webhook.WithCondition(ctx, before, func(ctx context.Context) error { - if datautil.Contain(msg.SendID, userIDs...) || msg.ContentType == constant.Typing { + if msg.ContentType == constant.Typing { return nil } req := callbackstruct.CallbackBeforePushReq{ diff --git a/internal/rpc/group/group.go b/internal/rpc/group/group.go index ef917d5395..30ac19a76a 100644 --- a/internal/rpc/group/group.go +++ b/internal/rpc/group/group.go @@ -1026,7 +1026,7 @@ func (g *groupServer) SetGroupInfo(ctx context.Context, req *pbgroup.SetGroupInf } num := len(update) if req.GroupInfoForSet.Notification != "" { - num-- + num -= 3 func() { conversation := &pbconversation.ConversationReq{ ConversationID: msgprocessor.GetConversationIDBySessionType(constant.ReadGroupChatType, req.GroupInfoForSet.GroupID), @@ -1133,8 +1133,9 @@ func (g *groupServer) SetGroupInfoEx(ctx context.Context, req *pbgroup.SetGroupI } num := len(updatedData) + if req.Notification != nil { - num-- + num -= 3 if req.Notification.Value != "" { func() { @@ -1219,7 +1220,7 @@ func (g *groupServer) TransferGroupOwner(ctx context.Context, req *pbgroup.Trans } } - if newOwner.MuteEndTime != time.Unix(0, 0) { + if newOwner.MuteEndTime.After(time.Now()) { if _, err := g.CancelMuteGroupMember(ctx, &pbgroup.CancelMuteGroupMemberReq{ GroupID: group.GroupID, UserID: req.NewOwnerUserID}); err != nil { diff --git a/pkg/common/storage/controller/msg_transfer.go b/pkg/common/storage/controller/msg_transfer.go index c6013dbc12..1ecd786aa3 100644 --- a/pkg/common/storage/controller/msg_transfer.go +++ b/pkg/common/storage/controller/msg_transfer.go @@ -24,8 +24,11 @@ type MsgTransferDatabase interface { DeleteMessagesFromCache(ctx context.Context, conversationID string, seqs []int64) error // BatchInsertChat2Cache increments the sequence number and then batch inserts messages into the cache. - BatchInsertChat2Cache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (seq int64, isNewConversation bool, err error) - SetHasReadSeqToDB(ctx context.Context, userID string, conversationID string, hasReadSeq int64) error + BatchInsertChat2Cache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (seq int64, isNewConversation bool, userHasReadMap map[string]int64, err error) + + SetHasReadSeqs(ctx context.Context, conversationID string, userSeqMap map[string]int64) error + + SetHasReadSeqToDB(ctx context.Context, conversationID string, userSeqMap map[string]int64) error // to mq MsgToPushMQ(ctx context.Context, key, conversationID string, msg2mq *sdkws.MsgData) (int32, int64, error) @@ -219,18 +222,18 @@ func (db *msgTransferDatabase) DeleteMessagesFromCache(ctx context.Context, conv return db.msg.DeleteMessagesFromCache(ctx, conversationID, seqs) } -func (db *msgTransferDatabase) BatchInsertChat2Cache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (seq int64, isNew bool, err error) { +func (db *msgTransferDatabase) BatchInsertChat2Cache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (seq int64, isNew bool, userHasReadMap map[string]int64, err error) { lenList := len(msgs) if int64(lenList) > db.msgTable.GetSingleGocMsgNum() { - return 0, false, errs.New("message count exceeds limit", "limit", db.msgTable.GetSingleGocMsgNum()).Wrap() + return 0, false, nil, errs.New("message count exceeds limit", "limit", db.msgTable.GetSingleGocMsgNum()).Wrap() } if lenList < 1 { - return 0, false, errs.New("no messages to insert", "minCount", 1).Wrap() + return 0, false, nil, errs.New("no messages to insert", "minCount", 1).Wrap() } currentMaxSeq, err := db.seqConversation.Malloc(ctx, conversationID, int64(len(msgs))) if err != nil { log.ZError(ctx, "storage.seq.Malloc", err) - return 0, false, err + return 0, false, nil, err } isNew = currentMaxSeq == 0 lastMaxSeq := currentMaxSeq @@ -248,25 +251,25 @@ func (db *msgTransferDatabase) BatchInsertChat2Cache(ctx context.Context, conver } else { prommetrics.MsgInsertRedisSuccessCounter.Inc() } - err = db.setHasReadSeqs(ctx, conversationID, userSeqMap) - if err != nil { - log.ZError(ctx, "SetHasReadSeqs error", err, "userSeqMap", userSeqMap, "conversationID", conversationID) - prommetrics.SeqSetFailedCounter.Inc() - } - return lastMaxSeq, isNew, errs.Wrap(err) + return lastMaxSeq, isNew, userSeqMap, errs.Wrap(err) } -func (db *msgTransferDatabase) setHasReadSeqs(ctx context.Context, conversationID string, userSeqMap map[string]int64) error { +func (db *msgTransferDatabase) SetHasReadSeqs(ctx context.Context, conversationID string, userSeqMap map[string]int64) error { for userID, seq := range userSeqMap { - if err := db.seqUser.SetUserReadSeqToDB(ctx, conversationID, userID, seq); err != nil { + if err := db.seqUser.SetUserReadSeq(ctx, conversationID, userID, seq); err != nil { return err } } return nil } -func (db *msgTransferDatabase) SetHasReadSeqToDB(ctx context.Context, userID string, conversationID string, hasReadSeq int64) error { - return db.seqUser.SetUserReadSeqToDB(ctx, conversationID, userID, hasReadSeq) +func (db *msgTransferDatabase) SetHasReadSeqToDB(ctx context.Context, conversationID string, userSeqMap map[string]int64) error { + for userID, seq := range userSeqMap { + if err := db.seqUser.SetUserReadSeqToDB(ctx, conversationID, userID, seq); err != nil { + return err + } + } + return nil } func (db *msgTransferDatabase) MsgToPushMQ(ctx context.Context, key, conversationID string, msg2mq *sdkws.MsgData) (int32, int64, error) {