From 94328a3eef81ec40785f5eb3cbf7679120752c25 Mon Sep 17 00:00:00 2001 From: Kopper Date: Wed, 6 Nov 2024 13:37:19 +0300 Subject: [PATCH] [backend] Use a stack instead of queue when backfilling This makes each reply chain load completely before loading the next chain, instead of the current behavior that loads all replies of one depth before loading the next "layer". This won't make much of a difference *now*, but should result in more intuitive behavior when live updating of newly loaded replies gets implemented. --- .../Core/Queues/BackfillQueue.cs | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/Iceshrimp.Backend/Core/Queues/BackfillQueue.cs b/Iceshrimp.Backend/Core/Queues/BackfillQueue.cs index 9bfb8936..99c6cbcc 100644 --- a/Iceshrimp.Backend/Core/Queues/BackfillQueue.cs +++ b/Iceshrimp.Backend/Core/Queues/BackfillQueue.cs @@ -25,7 +25,7 @@ public class BackfillQueue(int parallelism) o.PoolSize = 100; o.PoolInitialFill = 5; }); - + private static async Task BackfillQueueProcessorDelegateAsync( Job job, BackfillJobData jobData, @@ -35,38 +35,38 @@ public class BackfillQueue(int parallelism) { if (KeyedLocker.IsInUse(jobData.ThreadId)) return; using var _ = await KeyedLocker.LockAsync(jobData.ThreadId, token); - + var logger = scope.GetRequiredService>(); var backfillConfig = scope.GetRequiredService>(); var db = scope.GetRequiredService(); var noteSvc = scope.GetRequiredService(); var objectResolver = scope.GetRequiredService(); - + var user = jobData.AuthenticatedUserId == null ? null : await db.Users.Where(u => u.Id == jobData.AuthenticatedUserId).FirstOrDefaultAsync(token); - + logger.LogDebug("Backfilling replies for thread {id} as user {userId}", jobData.ThreadId, user?.Username); var cfg = backfillConfig.Value.Replies; var backfillLimit = MaxRepliesPerThread; var history = new HashSet(); - + var toBackfillArray = await db.Notes .Where(n => n.ThreadId == jobData.ThreadId && n.RepliesCount < MaxRepliesPerNote && n.UserHost != null - && n.RepliesCollection != null - && n.CreatedAt <= DateTime.UtcNow - cfg.NewNoteDelayTimeSpan + && n.RepliesCollection != null + && n.CreatedAt <= DateTime.UtcNow - cfg.NewNoteDelayTimeSpan && (n.RepliesFetchedAt == null || n.RepliesFetchedAt <= DateTime.UtcNow - cfg.RefreshAfterTimeSpan)) .Select(n => new BackfillData(n.Id, n.RepliesCollection!)) .ToArrayAsync(token); - - var toBackfill = new Queue(toBackfillArray); - while (toBackfill.TryDequeue(out var currentItem)) + + var toBackfill = new Stack(toBackfillArray); + while (toBackfill.TryPop(out var currentItem)) { var current = currentItem; - if (!history.Add(current.RepliesCollection)) + if (!history.Add(current.RepliesCollection)) { logger.LogDebug("Skipping {collection} as it was already backfilled in this run", current.RepliesCollection); continue; @@ -78,7 +78,7 @@ public class BackfillQueue(int parallelism) break; } logger.LogTrace("Backfilling collection {collection} (remaining limit {limit})", current.RepliesCollection, backfillLimit); - + await db.Notes .Where(n => n.Id == current.Id) .ExecuteUpdateAsync(p => p.SetProperty(n => n.RepliesFetchedAt, DateTime.UtcNow), token); @@ -108,7 +108,7 @@ public class BackfillQueue(int parallelism) (note.RepliesFetchedAt == null || note.RepliesFetchedAt <= DateTime.UtcNow - cfg.RefreshAfterTimeSpan)) { - toBackfill.Enqueue(new BackfillData(note.Id, note.RepliesCollection!)); + toBackfill.Push(new BackfillData(note.Id, note.RepliesCollection!)); } } catch (Exception e) @@ -128,4 +128,4 @@ public class BackfillJobData { [JR] [J("threadId")] public required string ThreadId { get; set; } [JR] [J("authenticatedUserId")] public required string? AuthenticatedUserId { get; set; } -} \ No newline at end of file +}