Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remaining todos #4022

Merged
merged 6 commits into from Mar 22, 2024
Expand Up @@ -6,7 +6,6 @@ class CachingHttpHandler : IResultFilter
{
public void OnResultExecuting(ResultExecutingContext context)
{
// TODO do we even need to do this
var response = context.HttpContext.Response;
if (!response.Headers.ContainsKey("Cache-Control"))
{
Expand Down
Expand Up @@ -53,9 +53,7 @@ public static class HostApplicationBuilderExtensions

services.AddHttpLogging(options =>
{
// TODO Do we need to expose the host?
// we could also include the time it took to process the request
options.LoggingFields = HttpLoggingFields.RequestPath | HttpLoggingFields.RequestMethod | HttpLoggingFields.ResponseStatusCode;
options.LoggingFields = HttpLoggingFields.RequestPath | HttpLoggingFields.RequestMethod | HttpLoggingFields.ResponseStatusCode | HttpLoggingFields.Duration;
});

// Core registers the message dispatcher to be resolved from the transport seam. The dispatcher
Expand Down
3 changes: 0 additions & 3 deletions src/ServiceControl.Monitoring/WebApplicationExtensions.cs
Expand Up @@ -10,9 +10,6 @@ public static void UseServiceControlMonitoring(this WebApplication appBuilder)

appBuilder.UseCors(policyBuilder =>
{
// TODO verify that the default is no headers and no methods allowed
//builder.AllowAnyHeader();
//builder.AllowAnyMethod();
policyBuilder.AllowAnyOrigin();
policyBuilder.WithExposedHeaders(["ETag", "Last-Modified", "Link", "Total-Count", "X-Particular-Version"]);
policyBuilder.WithHeaders(["Origin", "X-Requested-With", "Content-Type", "Accept"]);
Expand Down
34 changes: 13 additions & 21 deletions src/ServiceControl.Persistence.RavenDB/ErrorMessagesDataStore.cs
Expand Up @@ -609,34 +609,26 @@ public async Task RemoveFailedMessageRetryDocument(string uniqueMessageId)
await session.Advanced.RequestExecutor.ExecuteAsync(new DeleteDocumentCommand(FailedMessageRetry.MakeDocumentId(uniqueMessageId), null), session.Advanced.Context);
}

// TODO: Once using .NET, consider using IAsyncEnumerable here as this is an unbounded query
public async Task<string[]> GetRetryPendingMessages(DateTime from, DateTime to, string queueAddress)
{
var ids = new List<string>();

using var session = documentStore.OpenAsyncSession();
var query = session.Advanced
.AsyncDocumentQuery<FailedMessageViewIndex.SortAndFilterOptions, FailedMessageViewIndex>()
.WhereEquals("Status", (int)FailedMessageStatus.RetryIssued)
.AndAlso()
.WhereBetween(options => options.LastModified, from.Ticks, to.Ticks)
.AndAlso()
.WhereEquals(o => o.QueueAddress, queueAddress)
.SelectFields<FailedMessage>()
.ToQueryable()
.TransformToFailedMessageView();

await using (var ie = await session.Advanced.StreamAsync(query))
var query = session
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@mauroservienti you have a bit more knowledge about RavenDB. This whole thing is weird. We basically pulled down the entire FailedMessage to then just use the UniqueMessageId. Then we bring that up to the outer layers which then iterates through those, creates FailedMessageRetry Ids to then delete them 1:1. Horrible. I tried to do a DeleteByQuery but that doesn't work to my knowledge since the collection I query from and have to delete from are different. So I tried to at least materialize less from the server.

As a next small improvement, we could change the RemoveFailedMessageRetryDocument accept a number of IDs. I was thinking a bit about streaming IDs back and then the outer layers doing batch deletes per stream, but I wasn't sure of the implications of that whole change, since we would keep the cursor open while we are trying to delete batch deletes.

This TODO is a bit of a rabbit hole and not even from us 👯‍♂️

.Query<FailedMessageViewIndex.SortAndFilterOptions, FailedMessageViewIndex>()
.Where(o => o.Status == FailedMessageStatus.RetryIssued && o.LastModified >= from.Ticks && o.LastModified <= to.Ticks && o.QueueAddress == queueAddress)
.OfType<FailedMessageProjection>();

int index = 0;
await using var streamResults = await session.Advanced.StreamAsync(query, out var streamQueryStatistics);
string[] ids = new string[streamQueryStatistics.TotalResults];
while (await streamResults.MoveNextAsync())
{
while (await ie.MoveNextAsync())
{
ids.Add(ie.Current.Document.Id);
}
ids[index++] = streamResults.Current.Document.UniqueMessageId;
}

return ids.ToArray();
return ids;
}

record struct FailedMessageProjection(string UniqueMessageId);

public async Task<byte[]> FetchFromFailedMessage(string uniqueMessageId)
{
byte[] body = null;
Expand Down
2 changes: 0 additions & 2 deletions src/ServiceControl/Hosting/Commands/MaintenanceModeCommand.cs
Expand Up @@ -26,9 +26,7 @@ public override async Task Execute(HostArguments args, Settings settings)
hostBuilder.Services.AddSingleton<IHostLifetime, PersisterInitializingConsoleLifetime>();
}

// TODO: Update to use the same pattern as the main Bootstrapper
var host = hostBuilder.Build();

await host.RunAsync();
}
}
Expand Down
Expand Up @@ -27,7 +27,6 @@ public MessageStreamerHub(IMessageDispatcher sender, IReadOnlySettings settings,
localAddress = receiveAddresses.MainReceiveAddress;
}

// TODO Change service pulse to call this method instead?
public async Task SendMessage(string data)
{
try
Expand Down