When we are at flush_max_threshold and the next bucket is a metadata (i.e. next->length == 0), we still need to re-check for flush_max_threshold and associated optimisation (is_in_memory_bucket()) when we process this metadata bucket in the next iteration of the loop.

Follow-up to r1892450.

git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1909966 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Christophe Jaillet
2023-05-21 17:46:22 +00:00
parent afad6e2a78
commit bbe60a5b3d

View File

@ -584,41 +584,41 @@ static apr_status_t send_brigade_nonblocking(apr_socket_t *s,
if (!nvec) {
delete_meta_bucket(bucket);
}
continue;
}
/* Make sure that these new data fit in our iovec. */
if (nvec == ctx->nvec) {
if (nvec == NVEC_MAX) {
sock_nopush(s, 1);
rv = writev_nonblocking(s, bb, ctx, nbytes, nvec, c);
if (rv != APR_SUCCESS) {
goto cleanup;
else {
/* Make sure that these new data fit in our iovec. */
if (nvec == ctx->nvec) {
if (nvec == NVEC_MAX) {
sock_nopush(s, 1);
rv = writev_nonblocking(s, bb, ctx, nbytes, nvec, c);
if (rv != APR_SUCCESS) {
goto cleanup;
}
nbytes = 0;
nvec = 0;
}
else {
struct iovec *newvec;
apr_size_t newn = nvec * 2;
if (newn < NVEC_MIN) {
newn = NVEC_MIN;
}
else if (newn > NVEC_MAX) {
newn = NVEC_MAX;
}
newvec = apr_palloc(c->pool, newn * sizeof(struct iovec));
if (nvec) {
memcpy(newvec, ctx->vec, nvec * sizeof(struct iovec));
}
ctx->vec = newvec;
ctx->nvec = newn;
}
nbytes = 0;
nvec = 0;
}
else {
struct iovec *newvec;
apr_size_t newn = nvec * 2;
if (newn < NVEC_MIN) {
newn = NVEC_MIN;
}
else if (newn > NVEC_MAX) {
newn = NVEC_MAX;
}
newvec = apr_palloc(c->pool, newn * sizeof(struct iovec));
if (nvec) {
memcpy(newvec, ctx->vec, nvec * sizeof(struct iovec));
}
ctx->vec = newvec;
ctx->nvec = newn;
}
nbytes += length;
ctx->vec[nvec].iov_base = (void *)data;
ctx->vec[nvec].iov_len = length;
nvec++;
}
nbytes += length;
ctx->vec[nvec].iov_base = (void *)data;
ctx->vec[nvec].iov_len = length;
nvec++;
/* Flush above max threshold, unless the brigade still contains in
* memory buckets which we want to try writing in the same pass (if