[backend/core] Fix sporadic "key not found" background task failures

This commit is contained in:
Laura Hausmann 2024-04-07 21:16:10 +02:00
parent 22a4de63f3
commit 60545e25ae
No known key found for this signature in database
GPG key ID: D044E84C5BE01605
3 changed files with 59 additions and 21 deletions

View file

@ -48,10 +48,12 @@ public class BackgroundTaskQueue()
CancellationToken token
)
{
var db = scope.GetRequiredService<DatabaseContext>();
var usedAsAvatarOrBanner =
await db.Users.AnyAsync(p => p.AvatarId == jobData.DriveFileId ||
p.BannerId == jobData.DriveFileId, token);
var db = scope.GetRequiredService<DatabaseContext>();
var logger = scope.GetRequiredService<ILogger<BackgroundTaskQueue>>();
logger.LogDebug("Expiring file {id}...", jobData.DriveFileId);
var usedAsAvatarOrBanner = await db.Users.AnyAsync(p => p.AvatarId == jobData.DriveFileId ||
p.BannerId == jobData.DriveFileId, token);
var usedInNote = await db.Notes.AnyAsync(p => p.FileIds.Contains(jobData.DriveFileId), token);
@ -195,14 +197,14 @@ public class BackgroundTaskQueue()
var eventSvc = scope.GetRequiredService<EventService>();
eventSvc.RaiseUserUnmuted(null, muting.Muter, muting.Mutee);
}
private static async Task ProcessFilterExpiry(
FilterExpiryJobData jobData,
IServiceProvider scope,
CancellationToken token
)
{
var db = scope.GetRequiredService<DatabaseContext>();
var db = scope.GetRequiredService<DatabaseContext>();
var filter = await db.Filters.FirstOrDefaultAsync(p => p.Id == jobData.FilterId, token);
if (filter is not { Expiry: not null }) return;

View file

@ -9,42 +9,74 @@ using Microsoft.Extensions.Options;
namespace Iceshrimp.Backend.Core.Services;
public class ObjectStorageService(IOptions<Config.StorageSection> config, HttpClient httpClient)
public class ObjectStorageService
{
private readonly string? _accessUrl = config.Value.ObjectStorage?.AccessUrl;
private readonly string? _accessUrl;
private readonly S3Bucket? _bucket = GetBucketSafely(config);
private readonly S3Bucket? _bucket;
private readonly S3Client? _client;
private readonly string? _prefix = config.Value.ObjectStorage?.Prefix?.Trim('/');
private readonly string? _prefix;
private readonly HttpClient _httpClient;
private static S3Bucket? GetBucketSafely(IOptions<Config.StorageSection> config)
public ObjectStorageService(IOptions<Config.StorageSection> config, HttpClient httpClient)
{
if (config.Value.Mode != Enums.FileStorage.Local) return GetBucket(config);
_httpClient = httpClient;
_accessUrl = config.Value.ObjectStorage?.AccessUrl;
_client = GetClientSafely(config);
_bucket = GetBucketSafely(config, _client);
_prefix = config.Value.ObjectStorage?.Prefix?.Trim('/');
}
private static S3Client? GetClientSafely(IOptions<Config.StorageSection> config)
{
try
{
return GetBucket(config);
return GetClient(config);
}
catch
{
if (config.Value.Mode == Enums.FileStorage.ObjectStorage)
throw;
return null;
}
}
private static S3Bucket GetBucket(IOptions<Config.StorageSection> config)
private static S3Bucket? GetBucketSafely(IOptions<Config.StorageSection> config, S3Client? client)
{
var s3Config = config.Value.ObjectStorage ?? throw new Exception("Invalid object storage configuration");
try
{
return GetBucket(config, client);
}
catch
{
if (config.Value.Mode == Enums.FileStorage.ObjectStorage)
throw;
return null;
}
}
private static S3Client GetClient(IOptions<Config.StorageSection> config)
{
var s3Config = config.Value.ObjectStorage ?? throw new Exception("Invalid object storage configuration");
var region = s3Config.Region ?? throw new Exception("Invalid object storage region");
var endpoint = s3Config.Endpoint ?? throw new Exception("Invalid object storage endpoint");
var accessKey = s3Config.KeyId ?? throw new Exception("Invalid object storage access key");
var secretKey = s3Config.SecretKey ?? throw new Exception("Invalid object storage secret key");
var bucket = s3Config.Bucket ?? throw new Exception("Invalid object storage bucket");
return new S3Client(new AwsRegion(region), endpoint, new AwsCredential(accessKey, secretKey));
}
private static S3Bucket GetBucket(IOptions<Config.StorageSection> config, S3Client? client)
{
if (client == null) throw new Exception("S3Client is null");
var s3Config = config.Value.ObjectStorage ?? throw new Exception("Invalid object storage configuration");
var bucket = s3Config.Bucket ?? throw new Exception("Invalid object storage bucket");
if (config.Value.ObjectStorage?.AccessUrl == null)
throw new Exception("Invalid object storage access url");
var client = new S3Client(new AwsRegion(region), endpoint, new AwsCredential(accessKey, secretKey));
return new S3Bucket(bucket, client);
}
@ -58,7 +90,7 @@ public class ObjectStorageService(IOptions<Config.StorageSection> config, HttpCl
string result;
try
{
result = await httpClient.GetStringAsync(GetFilePublicUrl(filename));
result = await _httpClient.GetStringAsync(GetFilePublicUrl(filename));
}
catch (Exception e)
{
@ -113,9 +145,13 @@ public class ObjectStorageService(IOptions<Config.StorageSection> config, HttpCl
public async Task RemoveFilesAsync(params string[] filenames)
{
if (_bucket == null)
if (_bucket == null || _client == null)
throw new Exception("Refusing to remove file from object storage with invalid configuration");
await _bucket.DeleteAsync(filenames.Select(GetFilenameWithPrefix).ToImmutableList());
// We need to construct this request manually as the library will throw an exception on missing keys otherwise
var batch = new DeleteBatch(filenames.Select(GetFilenameWithPrefix).ToImmutableList(), quite: true);
var request = new DeleteObjectsRequest(_client.Host, _bucket.Name, batch);
await _client.DeleteObjectsAsync(request);
}
private string GetFilenameWithPrefix(string filename)

View file

@ -436,7 +436,7 @@ public class PostgresJobQueue<T>(
}
else
{
logger.LogError("Failed to process job in {queue} queue: {error}", name, e.Message);
logger.LogError(e, "Failed to process job in {queue} queue:", name);
}
}