nim-sds/src/reliability.nim

355 lines
12 KiB
Nim
Raw Normal View History

2025-03-10 16:07:00 +05:30
import std/[times, locks, tables, sets, options]
2025-02-17 16:16:08 +05:30
import chronos, results, chronicles
import ./[message, protobuf, reliability_utils, rolling_bloom_filter]
2025-02-17 14:47:01 +05:30
proc newReliabilityManager*(
2025-03-10 16:07:00 +05:30
channelId: Option[SdsChannelID], config: ReliabilityConfig = defaultConfig()
2025-02-17 14:47:01 +05:30
): Result[ReliabilityManager, ReliabilityError] =
## Creates a new ReliabilityManager with the specified channel ID and configuration.
##
## Parameters:
## - channelId: A unique identifier for the communication channel.
## - config: Configuration options for the ReliabilityManager. If not provided, default configuration is used.
##
## Returns:
## A Result containing either a new ReliabilityManager instance or an error.
2025-03-10 16:07:00 +05:30
if not channelId.isSome():
2025-02-17 16:16:08 +05:30
return err(ReliabilityError.reInvalidArgument)
2025-02-17 14:47:01 +05:30
try:
2025-02-17 16:16:08 +05:30
let bloomFilter =
newRollingBloomFilter(config.bloomFilterCapacity, config.bloomFilterErrorRate)
2025-02-17 14:47:01 +05:30
let rm = ReliabilityManager(
lamportTimestamp: 0,
messageHistory: @[],
bloomFilter: bloomFilter,
outgoingBuffer: @[],
2025-03-10 16:07:00 +05:30
incomingBuffer: initTable[SdsMessageID, IncomingMessage](),
2025-02-17 14:47:01 +05:30
channelId: channelId,
config: config,
)
initLock(rm.lock)
return ok(rm)
2025-02-17 16:16:08 +05:30
except Exception:
error "Failed to create ReliabilityManager", msg = getCurrentExceptionMsg()
return err(ReliabilityError.reOutOfMemory)
2025-02-17 14:47:01 +05:30
2025-03-10 16:07:00 +05:30
proc isAcknowledged*(
msg: UnacknowledgedMessage,
causalHistory: seq[SdsMessageID],
rbf: Option[RollingBloomFilter],
): bool =
if msg.message.messageId in causalHistory:
return true
if rbf.isSome():
return rbf.get().contains(msg.message.messageId)
false
2025-02-17 14:47:01 +05:30
2025-03-10 16:07:00 +05:30
proc reviewAckStatus(rm: ReliabilityManager, msg: SdsMessage) {.gcsafe.} =
# Parse bloom filter
var rbf: Option[RollingBloomFilter]
if msg.bloomFilter.len > 0:
let bfResult = deserializeBloomFilter(msg.bloomFilter)
if bfResult.isOk():
rbf = some(
RollingBloomFilter(
2025-02-17 16:16:08 +05:30
filter: bfResult.get(),
capacity: bfResult.get().capacity,
minCapacity: (
bfResult.get().capacity.float * (100 - CapacityFlexPercent).float / 100.0
).int,
maxCapacity: (
bfResult.get().capacity.float * (100 + CapacityFlexPercent).float / 100.0
).int,
messages: @[],
2025-02-17 14:47:01 +05:30
)
2025-03-10 16:07:00 +05:30
)
else:
error "Failed to deserialize bloom filter", error = bfResult.error
rbf = none[RollingBloomFilter]()
else:
rbf = none[RollingBloomFilter]()
# Keep track of indices to delete
var toDelete: seq[int] = @[]
var i = 0
2025-02-17 14:47:01 +05:30
2025-03-10 16:07:00 +05:30
while i < rm.outgoingBuffer.len:
let outMsg = rm.outgoingBuffer[i]
if outMsg.isAcknowledged(msg.causalHistory, rbf):
2025-02-17 16:16:08 +05:30
if not rm.onMessageSent.isNil():
2025-02-17 14:47:01 +05:30
rm.onMessageSent(outMsg.message.messageId)
2025-03-10 16:07:00 +05:30
toDelete.add(i)
inc i
for i in countdown(toDelete.high, 0): # Delete in reverse order to maintain indices
rm.outgoingBuffer.delete(toDelete[i])
2025-02-17 14:47:01 +05:30
proc wrapOutgoingMessage*(
2025-02-17 16:16:08 +05:30
rm: ReliabilityManager, message: seq[byte], messageId: SdsMessageID
2025-02-17 14:47:01 +05:30
): Result[seq[byte], ReliabilityError] =
## Wraps an outgoing message with reliability metadata.
##
## Parameters:
## - message: The content of the message to be sent.
2025-02-17 16:16:08 +05:30
## - messageId: Unique identifier for the message
2025-02-17 14:47:01 +05:30
##
## Returns:
2025-02-17 16:16:08 +05:30
## A Result containing either wrapped message bytes or an error.
2025-02-17 14:47:01 +05:30
if message.len == 0:
2025-02-17 16:16:08 +05:30
return err(ReliabilityError.reInvalidArgument)
2025-02-17 14:47:01 +05:30
if message.len > MaxMessageSize:
2025-02-17 16:16:08 +05:30
return err(ReliabilityError.reMessageTooLarge)
2025-02-17 14:47:01 +05:30
withLock rm.lock:
try:
rm.updateLamportTimestamp(getTime().toUnix)
let bfResult = serializeBloomFilter(rm.bloomFilter.filter)
if bfResult.isErr:
2025-02-17 16:16:08 +05:30
error "Failed to serialize bloom filter"
return err(ReliabilityError.reSerializationError)
2025-02-17 14:47:01 +05:30
2025-02-17 16:16:08 +05:30
let msg = SdsMessage(
2025-02-17 14:47:01 +05:30
messageId: messageId,
lamportTimestamp: rm.lamportTimestamp,
2025-02-17 16:16:08 +05:30
causalHistory: rm.getRecentSdsMessageIDs(rm.config.maxCausalHistory),
2025-02-17 14:47:01 +05:30
channelId: rm.channelId,
content: message,
2025-02-17 16:16:08 +05:30
bloomFilter: bfResult.get(),
2025-02-17 14:47:01 +05:30
)
# Add to outgoing buffer
rm.outgoingBuffer.add(
UnacknowledgedMessage(message: msg, sendTime: getTime(), resendAttempts: 0)
)
# Add to causal history and bloom filter
rm.bloomFilter.add(msg.messageId)
rm.addToHistory(msg.messageId)
return serializeMessage(msg)
2025-02-17 16:16:08 +05:30
except Exception:
error "Failed to wrap message", msg = getCurrentExceptionMsg()
return err(ReliabilityError.reSerializationError)
2025-02-17 14:47:01 +05:30
2025-02-17 16:16:08 +05:30
proc processIncomingBuffer(rm: ReliabilityManager) {.gcsafe.} =
2025-02-17 14:47:01 +05:30
withLock rm.lock:
if rm.incomingBuffer.len == 0:
return
2025-02-17 16:16:08 +05:30
var processed = initHashSet[SdsMessageID]()
2025-03-10 16:07:00 +05:30
var readyToProcess = newSeq[SdsMessageID]()
2025-02-17 14:47:01 +05:30
2025-03-10 16:07:00 +05:30
# Find initially ready messages
for msgId, entry in rm.incomingBuffer:
if entry.missingDeps.len == 0:
readyToProcess.add(msgId)
2025-02-17 14:47:01 +05:30
while readyToProcess.len > 0:
let msgId = readyToProcess.pop()
if msgId in processed:
continue
2025-03-10 16:07:00 +05:30
if msgId in rm.incomingBuffer:
rm.addToHistory(msgId)
if not rm.onMessageReady.isNil():
rm.onMessageReady(msgId)
processed.incl(msgId)
# Update dependencies for remaining messages
for remainingId, entry in rm.incomingBuffer:
if remainingId notin processed:
if msgId in entry.missingDeps:
rm.incomingBuffer[remainingId].missingDeps.excl(msgId)
if rm.incomingBuffer[remainingId].missingDeps.len == 0:
readyToProcess.add(remainingId)
2025-02-17 14:47:01 +05:30
2025-03-10 16:07:00 +05:30
# Remove processed messages
for msgId in processed:
rm.incomingBuffer.del(msgId)
2025-02-17 14:47:01 +05:30
proc unwrapReceivedMessage*(
rm: ReliabilityManager, message: seq[byte]
2025-02-17 16:16:08 +05:30
): Result[tuple[message: seq[byte], missingDeps: seq[SdsMessageID]], ReliabilityError] =
2025-02-17 14:47:01 +05:30
## Unwraps a received message and processes its reliability metadata.
##
## Parameters:
2025-02-17 16:16:08 +05:30
## - message: The received message bytes
2025-02-17 14:47:01 +05:30
##
## Returns:
2025-02-17 16:16:08 +05:30
## A Result containing either tuple of (processed message, missing dependencies) or an error.
2025-02-17 14:47:01 +05:30
try:
2025-02-17 16:16:08 +05:30
let msg = deserializeMessage(message).valueOr:
return err(ReliabilityError.reDeserializationError)
2025-02-17 14:47:01 +05:30
2025-03-10 16:07:00 +05:30
if msg.messageId in rm.messageHistory:
2025-02-17 14:47:01 +05:30
return ok((msg.content, @[]))
rm.bloomFilter.add(msg.messageId)
# Update Lamport timestamp
rm.updateLamportTimestamp(msg.lamportTimestamp)
# Review ACK status for outgoing messages
rm.reviewAckStatus(msg)
2025-03-10 16:07:00 +05:30
var missingDeps = rm.checkDependencies(msg.causalHistory)
2025-02-17 14:47:01 +05:30
if missingDeps.len == 0:
# Check if any dependencies are still in incoming buffer
var depsInBuffer = false
2025-03-10 16:07:00 +05:30
for msgId, entry in rm.incomingBuffer.pairs():
if msgId in msg.causalHistory:
2025-02-17 14:47:01 +05:30
depsInBuffer = true
break
2025-02-17 16:16:08 +05:30
2025-02-17 14:47:01 +05:30
if depsInBuffer:
2025-03-10 16:07:00 +05:30
rm.incomingBuffer[msg.messageId] = IncomingMessage(
message: msg,
missingDeps: initHashSet[SdsMessageID]()
)
2025-02-17 14:47:01 +05:30
else:
# All dependencies met, add to history
rm.addToHistory(msg.messageId)
rm.processIncomingBuffer()
2025-02-17 16:16:08 +05:30
if not rm.onMessageReady.isNil():
2025-02-17 14:47:01 +05:30
rm.onMessageReady(msg.messageId)
else:
2025-03-10 16:07:00 +05:30
rm.incomingBuffer[msg.messageId] = IncomingMessage(
message: msg,
missingDeps: missingDeps.toHashSet()
)
2025-02-17 16:16:08 +05:30
if not rm.onMissingDependencies.isNil():
2025-02-17 14:47:01 +05:30
rm.onMissingDependencies(msg.messageId, missingDeps)
return ok((msg.content, missingDeps))
2025-02-17 16:16:08 +05:30
except Exception:
error "Failed to unwrap message", msg = getCurrentExceptionMsg()
return err(ReliabilityError.reDeserializationError)
2025-02-17 14:47:01 +05:30
proc markDependenciesMet*(
2025-02-17 16:16:08 +05:30
rm: ReliabilityManager, messageIds: seq[SdsMessageID]
2025-02-17 14:47:01 +05:30
): Result[void, ReliabilityError] =
## Marks the specified message dependencies as met.
##
## Parameters:
## - messageIds: A sequence of message IDs to mark as met.
##
## Returns:
## A Result indicating success or an error.
try:
# Add all messageIds to bloom filter
for msgId in messageIds:
if not rm.bloomFilter.contains(msgId):
rm.bloomFilter.add(msgId)
# rm.addToHistory(msgId) -- not needed as this proc usually called when msg in long-term storage of application?
2025-03-10 16:07:00 +05:30
# Update any pending messages that depend on this one
for pendingId, entry in rm.incomingBuffer:
if msgId in entry.missingDeps:
rm.incomingBuffer[pendingId].missingDeps.excl(msgId)
2025-02-17 16:16:08 +05:30
rm.processIncomingBuffer()
2025-02-17 14:47:01 +05:30
return ok()
2025-02-17 16:16:08 +05:30
except Exception:
error "Failed to mark dependencies as met", msg = getCurrentExceptionMsg()
return err(ReliabilityError.reInternalError)
2025-02-17 14:47:01 +05:30
proc setCallbacks*(
rm: ReliabilityManager,
2025-02-17 16:16:08 +05:30
onMessageReady: proc(messageId: SdsMessageID) {.gcsafe.},
onMessageSent: proc(messageId: SdsMessageID) {.gcsafe.},
2025-02-17 14:47:01 +05:30
onMissingDependencies:
2025-02-17 16:16:08 +05:30
proc(messageId: SdsMessageID, missingDeps: seq[SdsMessageID]) {.gcsafe.},
2025-02-17 14:47:01 +05:30
onPeriodicSync: PeriodicSyncCallback = nil,
) =
## Sets the callback functions for various events in the ReliabilityManager.
##
## Parameters:
## - onMessageReady: Callback function called when a message is ready to be processed.
## - onMessageSent: Callback function called when a message is confirmed as sent.
## - onMissingDependencies: Callback function called when a message has missing dependencies.
## - onPeriodicSync: Callback function called to notify about periodic sync
withLock rm.lock:
rm.onMessageReady = onMessageReady
rm.onMessageSent = onMessageSent
rm.onMissingDependencies = onMissingDependencies
rm.onPeriodicSync = onPeriodicSync
2025-02-17 16:16:08 +05:30
proc checkUnacknowledgedMessages(rm: ReliabilityManager) {.gcsafe.} =
2025-02-17 14:47:01 +05:30
## Checks and processes unacknowledged messages in the outgoing buffer.
withLock rm.lock:
let now = getTime()
var newOutgoingBuffer: seq[UnacknowledgedMessage] = @[]
2025-02-17 16:16:08 +05:30
for unackMsg in rm.outgoingBuffer:
let elapsed = now - unackMsg.sendTime
if elapsed > rm.config.resendInterval:
# Time to attempt resend
if unackMsg.resendAttempts < rm.config.maxResendAttempts:
var updatedMsg = unackMsg
updatedMsg.resendAttempts += 1
updatedMsg.sendTime = now
newOutgoingBuffer.add(updatedMsg)
2025-02-17 14:47:01 +05:30
else:
2025-02-17 16:16:08 +05:30
if not rm.onMessageSent.isNil():
rm.onMessageSent(unackMsg.message.messageId)
else:
newOutgoingBuffer.add(unackMsg)
2025-02-17 14:47:01 +05:30
2025-02-17 16:16:08 +05:30
rm.outgoingBuffer = newOutgoingBuffer
2025-02-17 14:47:01 +05:30
2025-02-17 16:16:08 +05:30
proc periodicBufferSweep(
rm: ReliabilityManager
) {.async: (raises: [CancelledError]), gcsafe.} =
2025-02-17 14:47:01 +05:30
## Periodically sweeps the buffer to clean up and check unacknowledged messages.
while true:
2025-02-17 16:16:08 +05:30
try:
rm.checkUnacknowledgedMessages()
rm.cleanBloomFilter()
except Exception:
error "Error in periodic buffer sweep", msg = getCurrentExceptionMsg()
2025-02-17 14:47:01 +05:30
await sleepAsync(chronos.milliseconds(rm.config.bufferSweepInterval.inMilliseconds))
2025-02-17 16:16:08 +05:30
proc periodicSyncMessage(
rm: ReliabilityManager
) {.async: (raises: [CancelledError]), gcsafe.} =
2025-02-17 14:47:01 +05:30
## Periodically notifies to send a sync message to maintain connectivity.
while true:
2025-02-17 16:16:08 +05:30
try:
if not rm.onPeriodicSync.isNil():
rm.onPeriodicSync()
except Exception:
error "Error in periodic sync", msg = getCurrentExceptionMsg()
2025-02-17 14:47:01 +05:30
await sleepAsync(chronos.seconds(rm.config.syncMessageInterval.inSeconds))
proc startPeriodicTasks*(rm: ReliabilityManager) =
## Starts the periodic tasks for buffer sweeping and sync message sending.
##
## This procedure should be called after creating a ReliabilityManager to enable automatic maintenance.
asyncSpawn rm.periodicBufferSweep()
asyncSpawn rm.periodicSyncMessage()
proc resetReliabilityManager*(rm: ReliabilityManager): Result[void, ReliabilityError] =
## Resets the ReliabilityManager to its initial state.
##
## This procedure clears all buffers and resets the Lamport timestamp.
withLock rm.lock:
try:
rm.lamportTimestamp = 0
rm.messageHistory.setLen(0)
rm.outgoingBuffer.setLen(0)
2025-03-10 16:07:00 +05:30
rm.incomingBuffer.clear()
2025-02-17 14:47:01 +05:30
rm.bloomFilter = newRollingBloomFilter(
2025-02-17 16:16:08 +05:30
rm.config.bloomFilterCapacity, rm.config.bloomFilterErrorRate
2025-02-17 14:47:01 +05:30
)
return ok()
2025-02-17 16:16:08 +05:30
except Exception:
error "Failed to reset ReliabilityManager", msg = getCurrentExceptionMsg()
return err(ReliabilityError.reInternalError)