[backend/core] Fix sporadic "key not found" background task failures
This commit is contained in:
parent
22a4de63f3
commit
60545e25ae
3 changed files with 59 additions and 21 deletions
|
@ -48,10 +48,12 @@ public class BackgroundTaskQueue()
|
||||||
CancellationToken token
|
CancellationToken token
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
var db = scope.GetRequiredService<DatabaseContext>();
|
var db = scope.GetRequiredService<DatabaseContext>();
|
||||||
var usedAsAvatarOrBanner =
|
var logger = scope.GetRequiredService<ILogger<BackgroundTaskQueue>>();
|
||||||
await db.Users.AnyAsync(p => p.AvatarId == jobData.DriveFileId ||
|
logger.LogDebug("Expiring file {id}...", jobData.DriveFileId);
|
||||||
p.BannerId == jobData.DriveFileId, token);
|
|
||||||
|
var usedAsAvatarOrBanner = await db.Users.AnyAsync(p => p.AvatarId == jobData.DriveFileId ||
|
||||||
|
p.BannerId == jobData.DriveFileId, token);
|
||||||
|
|
||||||
var usedInNote = await db.Notes.AnyAsync(p => p.FileIds.Contains(jobData.DriveFileId), token);
|
var usedInNote = await db.Notes.AnyAsync(p => p.FileIds.Contains(jobData.DriveFileId), token);
|
||||||
|
|
||||||
|
@ -202,7 +204,7 @@ public class BackgroundTaskQueue()
|
||||||
CancellationToken token
|
CancellationToken token
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
var db = scope.GetRequiredService<DatabaseContext>();
|
var db = scope.GetRequiredService<DatabaseContext>();
|
||||||
var filter = await db.Filters.FirstOrDefaultAsync(p => p.Id == jobData.FilterId, token);
|
var filter = await db.Filters.FirstOrDefaultAsync(p => p.Id == jobData.FilterId, token);
|
||||||
|
|
||||||
if (filter is not { Expiry: not null }) return;
|
if (filter is not { Expiry: not null }) return;
|
||||||
|
|
|
@ -9,42 +9,74 @@ using Microsoft.Extensions.Options;
|
||||||
|
|
||||||
namespace Iceshrimp.Backend.Core.Services;
|
namespace Iceshrimp.Backend.Core.Services;
|
||||||
|
|
||||||
public class ObjectStorageService(IOptions<Config.StorageSection> config, HttpClient httpClient)
|
public class ObjectStorageService
|
||||||
{
|
{
|
||||||
private readonly string? _accessUrl = config.Value.ObjectStorage?.AccessUrl;
|
private readonly string? _accessUrl;
|
||||||
|
|
||||||
private readonly S3Bucket? _bucket = GetBucketSafely(config);
|
private readonly S3Bucket? _bucket;
|
||||||
|
private readonly S3Client? _client;
|
||||||
|
|
||||||
private readonly string? _prefix = config.Value.ObjectStorage?.Prefix?.Trim('/');
|
private readonly string? _prefix;
|
||||||
|
private readonly HttpClient _httpClient;
|
||||||
|
|
||||||
private static S3Bucket? GetBucketSafely(IOptions<Config.StorageSection> config)
|
public ObjectStorageService(IOptions<Config.StorageSection> config, HttpClient httpClient)
|
||||||
{
|
{
|
||||||
if (config.Value.Mode != Enums.FileStorage.Local) return GetBucket(config);
|
_httpClient = httpClient;
|
||||||
|
_accessUrl = config.Value.ObjectStorage?.AccessUrl;
|
||||||
|
_client = GetClientSafely(config);
|
||||||
|
_bucket = GetBucketSafely(config, _client);
|
||||||
|
_prefix = config.Value.ObjectStorage?.Prefix?.Trim('/');
|
||||||
|
}
|
||||||
|
|
||||||
|
private static S3Client? GetClientSafely(IOptions<Config.StorageSection> config)
|
||||||
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
return GetBucket(config);
|
return GetClient(config);
|
||||||
}
|
}
|
||||||
catch
|
catch
|
||||||
{
|
{
|
||||||
|
if (config.Value.Mode == Enums.FileStorage.ObjectStorage)
|
||||||
|
throw;
|
||||||
|
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static S3Bucket GetBucket(IOptions<Config.StorageSection> config)
|
private static S3Bucket? GetBucketSafely(IOptions<Config.StorageSection> config, S3Client? client)
|
||||||
{
|
{
|
||||||
var s3Config = config.Value.ObjectStorage ?? throw new Exception("Invalid object storage configuration");
|
try
|
||||||
|
{
|
||||||
|
return GetBucket(config, client);
|
||||||
|
}
|
||||||
|
catch
|
||||||
|
{
|
||||||
|
if (config.Value.Mode == Enums.FileStorage.ObjectStorage)
|
||||||
|
throw;
|
||||||
|
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static S3Client GetClient(IOptions<Config.StorageSection> config)
|
||||||
|
{
|
||||||
|
var s3Config = config.Value.ObjectStorage ?? throw new Exception("Invalid object storage configuration");
|
||||||
var region = s3Config.Region ?? throw new Exception("Invalid object storage region");
|
var region = s3Config.Region ?? throw new Exception("Invalid object storage region");
|
||||||
var endpoint = s3Config.Endpoint ?? throw new Exception("Invalid object storage endpoint");
|
var endpoint = s3Config.Endpoint ?? throw new Exception("Invalid object storage endpoint");
|
||||||
var accessKey = s3Config.KeyId ?? throw new Exception("Invalid object storage access key");
|
var accessKey = s3Config.KeyId ?? throw new Exception("Invalid object storage access key");
|
||||||
var secretKey = s3Config.SecretKey ?? throw new Exception("Invalid object storage secret key");
|
var secretKey = s3Config.SecretKey ?? throw new Exception("Invalid object storage secret key");
|
||||||
var bucket = s3Config.Bucket ?? throw new Exception("Invalid object storage bucket");
|
return new S3Client(new AwsRegion(region), endpoint, new AwsCredential(accessKey, secretKey));
|
||||||
|
}
|
||||||
|
|
||||||
|
private static S3Bucket GetBucket(IOptions<Config.StorageSection> config, S3Client? client)
|
||||||
|
{
|
||||||
|
if (client == null) throw new Exception("S3Client is null");
|
||||||
|
var s3Config = config.Value.ObjectStorage ?? throw new Exception("Invalid object storage configuration");
|
||||||
|
var bucket = s3Config.Bucket ?? throw new Exception("Invalid object storage bucket");
|
||||||
|
|
||||||
if (config.Value.ObjectStorage?.AccessUrl == null)
|
if (config.Value.ObjectStorage?.AccessUrl == null)
|
||||||
throw new Exception("Invalid object storage access url");
|
throw new Exception("Invalid object storage access url");
|
||||||
|
|
||||||
var client = new S3Client(new AwsRegion(region), endpoint, new AwsCredential(accessKey, secretKey));
|
|
||||||
return new S3Bucket(bucket, client);
|
return new S3Bucket(bucket, client);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -58,7 +90,7 @@ public class ObjectStorageService(IOptions<Config.StorageSection> config, HttpCl
|
||||||
string result;
|
string result;
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
result = await httpClient.GetStringAsync(GetFilePublicUrl(filename));
|
result = await _httpClient.GetStringAsync(GetFilePublicUrl(filename));
|
||||||
}
|
}
|
||||||
catch (Exception e)
|
catch (Exception e)
|
||||||
{
|
{
|
||||||
|
@ -113,9 +145,13 @@ public class ObjectStorageService(IOptions<Config.StorageSection> config, HttpCl
|
||||||
|
|
||||||
public async Task RemoveFilesAsync(params string[] filenames)
|
public async Task RemoveFilesAsync(params string[] filenames)
|
||||||
{
|
{
|
||||||
if (_bucket == null)
|
if (_bucket == null || _client == null)
|
||||||
throw new Exception("Refusing to remove file from object storage with invalid configuration");
|
throw new Exception("Refusing to remove file from object storage with invalid configuration");
|
||||||
await _bucket.DeleteAsync(filenames.Select(GetFilenameWithPrefix).ToImmutableList());
|
|
||||||
|
// We need to construct this request manually as the library will throw an exception on missing keys otherwise
|
||||||
|
var batch = new DeleteBatch(filenames.Select(GetFilenameWithPrefix).ToImmutableList(), quite: true);
|
||||||
|
var request = new DeleteObjectsRequest(_client.Host, _bucket.Name, batch);
|
||||||
|
await _client.DeleteObjectsAsync(request);
|
||||||
}
|
}
|
||||||
|
|
||||||
private string GetFilenameWithPrefix(string filename)
|
private string GetFilenameWithPrefix(string filename)
|
||||||
|
|
|
@ -436,7 +436,7 @@ public class PostgresJobQueue<T>(
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
logger.LogError("Failed to process job in {queue} queue: {error}", name, e.Message);
|
logger.LogError(e, "Failed to process job in {queue} queue:", name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue