mirror of
https://github.com/apache/httpd.git
synced 2025-08-10 02:56:11 +00:00
On the trunk:
*) mod_http2: rework of stream resource cleanup to avoid a crash in a close of a lingering connection. Prohibit special file bucket beaming for shared buckets. Files sent in stream output now use the stream pool as read buffer, reducing memory footprint of connections. [Yann Ylavic, Stefan Eissing] git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1779738 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
9
CHANGES
9
CHANGES
@ -1,14 +1,17 @@
|
||||
-*- coding: utf-8 -*-
|
||||
Changes with Apache 2.5.0
|
||||
|
||||
*) mod_http2: rework of stream resource cleanup to avoid a crash in a close
|
||||
of a lingering connection. Prohibit special file bucket beaming for
|
||||
shared buckets. Files sent in stream output now use the stream pool
|
||||
as read buffer, reducing memory footprint of connections.
|
||||
[Yann Ylavic, Stefan Eissing]
|
||||
|
||||
*) mod_proxy_hcheck: Don't validate timed out responses. [Yann Ylavic]
|
||||
|
||||
*) mod_proxy_hcheck: Ensure thread-safety when concurrent healthchecks are
|
||||
in use (ProxyHCTPsize > 0). PR 60071. [Yann Ylavic, Jim Jagielski]
|
||||
|
||||
*) mod_http2: change lifetime of h2_session record to address crashes
|
||||
during connection shutdown. [Yann Ylavic, Stefan Eissing]
|
||||
|
||||
*) mod_proxy_fcgi: Return to 2.4.20-and-earlier behavior of leaving
|
||||
a "proxy:fcgi://" prefix in the SCRIPT_FILENAME environment variable by
|
||||
default. Add ProxyFCGIBackendType to allow the type of backend to be
|
||||
|
@ -428,7 +428,7 @@ SET(mod_http2_extra_defines ssize_t=long)
|
||||
SET(mod_http2_extra_includes ${NGHTTP2_INCLUDE_DIR})
|
||||
SET(mod_http2_extra_libs ${NGHTTP2_LIBRARIES})
|
||||
SET(mod_http2_extra_sources
|
||||
modules/http2/h2_alt_svc.c modules/http2/h2_bucket_eoc.c
|
||||
modules/http2/h2_alt_svc.c
|
||||
modules/http2/h2_bucket_eos.c modules/http2/h2_config.c
|
||||
modules/http2/h2_conn.c modules/http2/h2_conn_io.c
|
||||
modules/http2/h2_ctx.c modules/http2/h2_filter.c
|
||||
|
@ -186,7 +186,6 @@ TARGET_lib = \
|
||||
FILES_nlm_objs = \
|
||||
$(OBJDIR)/h2_alt_svc.o \
|
||||
$(OBJDIR)/h2_bucket_beam.o \
|
||||
$(OBJDIR)/h2_bucket_eoc.o \
|
||||
$(OBJDIR)/h2_bucket_eos.o \
|
||||
$(OBJDIR)/h2_config.o \
|
||||
$(OBJDIR)/h2_conn.o \
|
||||
|
@ -21,7 +21,6 @@ http2_objs="dnl
|
||||
mod_http2.lo dnl
|
||||
h2_alt_svc.lo dnl
|
||||
h2_bucket_beam.lo dnl
|
||||
h2_bucket_eoc.lo dnl
|
||||
h2_bucket_eos.lo dnl
|
||||
h2_config.lo dnl
|
||||
h2_conn.lo dnl
|
||||
|
@ -68,7 +68,7 @@ struct h2_beam_proxy {
|
||||
apr_bucket_refcount refcount;
|
||||
APR_RING_ENTRY(h2_beam_proxy) link;
|
||||
h2_bucket_beam *beam;
|
||||
apr_bucket *bred;
|
||||
apr_bucket *bsender;
|
||||
apr_size_t n;
|
||||
};
|
||||
|
||||
@ -78,9 +78,9 @@ static apr_status_t beam_bucket_read(apr_bucket *b, const char **str,
|
||||
apr_size_t *len, apr_read_type_e block)
|
||||
{
|
||||
h2_beam_proxy *d = b->data;
|
||||
if (d->bred) {
|
||||
if (d->bsender) {
|
||||
const char *data;
|
||||
apr_status_t status = apr_bucket_read(d->bred, &data, len, block);
|
||||
apr_status_t status = apr_bucket_read(d->bsender, &data, len, block);
|
||||
if (status == APR_SUCCESS) {
|
||||
*str = data + b->start;
|
||||
*len = b->length;
|
||||
@ -111,24 +111,24 @@ static void beam_bucket_destroy(void *data)
|
||||
|
||||
static apr_bucket * h2_beam_bucket_make(apr_bucket *b,
|
||||
h2_bucket_beam *beam,
|
||||
apr_bucket *bred, apr_size_t n)
|
||||
apr_bucket *bsender, apr_size_t n)
|
||||
{
|
||||
h2_beam_proxy *d;
|
||||
|
||||
d = apr_bucket_alloc(sizeof(*d), b->list);
|
||||
H2_BPROXY_LIST_INSERT_TAIL(&beam->proxies, d);
|
||||
d->beam = beam;
|
||||
d->bred = bred;
|
||||
d->bsender = bsender;
|
||||
d->n = n;
|
||||
|
||||
b = apr_bucket_shared_make(b, d, 0, bred? bred->length : 0);
|
||||
b = apr_bucket_shared_make(b, d, 0, bsender? bsender->length : 0);
|
||||
b->type = &h2_bucket_type_beam;
|
||||
|
||||
return b;
|
||||
}
|
||||
|
||||
static apr_bucket *h2_beam_bucket_create(h2_bucket_beam *beam,
|
||||
apr_bucket *bred,
|
||||
apr_bucket *bsender,
|
||||
apr_bucket_alloc_t *list,
|
||||
apr_size_t n)
|
||||
{
|
||||
@ -137,7 +137,7 @@ static apr_bucket *h2_beam_bucket_create(h2_bucket_beam *beam,
|
||||
APR_BUCKET_INIT(b);
|
||||
b->free = apr_bucket_free;
|
||||
b->list = list;
|
||||
return h2_beam_bucket_make(b, beam, bred, n);
|
||||
return h2_beam_bucket_make(b, beam, bsender, n);
|
||||
}
|
||||
|
||||
const apr_bucket_type_t h2_bucket_type_beam = {
|
||||
@ -350,11 +350,11 @@ static void h2_beam_emitted(h2_bucket_beam *beam, h2_beam_proxy *proxy)
|
||||
/* invoked from receiver thread, the last beam bucket for the send
|
||||
* bucket is about to be destroyed.
|
||||
* remove it from the hold, where it should be now */
|
||||
if (proxy->bred) {
|
||||
if (proxy->bsender) {
|
||||
for (b = H2_BLIST_FIRST(&beam->hold_list);
|
||||
b != H2_BLIST_SENTINEL(&beam->hold_list);
|
||||
b = APR_BUCKET_NEXT(b)) {
|
||||
if (b == proxy->bred) {
|
||||
if (b == proxy->bsender) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -367,7 +367,7 @@ static void h2_beam_emitted(h2_bucket_beam *beam, h2_beam_proxy *proxy)
|
||||
b != H2_BLIST_SENTINEL(&beam->hold_list);
|
||||
b = next) {
|
||||
next = APR_BUCKET_NEXT(b);
|
||||
if (b == proxy->bred) {
|
||||
if (b == proxy->bsender) {
|
||||
APR_BUCKET_REMOVE(b);
|
||||
H2_BLIST_INSERT_TAIL(&beam->purge_list, b);
|
||||
break;
|
||||
@ -383,7 +383,7 @@ static void h2_beam_emitted(h2_bucket_beam *beam, h2_beam_proxy *proxy)
|
||||
}
|
||||
}
|
||||
|
||||
proxy->bred = NULL;
|
||||
proxy->bsender = NULL;
|
||||
}
|
||||
else {
|
||||
/* it should be there unless we screwed up */
|
||||
@ -391,7 +391,7 @@ static void h2_beam_emitted(h2_bucket_beam *beam, h2_beam_proxy *proxy)
|
||||
APLOGNO(03384) "h2_beam(%d-%s): emitted bucket not "
|
||||
"in hold, n=%d", beam->id, beam->tag,
|
||||
(int)proxy->n);
|
||||
ap_assert(!proxy->bred);
|
||||
ap_assert(!proxy->bsender);
|
||||
}
|
||||
}
|
||||
/* notify anyone waiting on space to become available */
|
||||
@ -463,7 +463,7 @@ static apr_status_t beam_send_cleanup(void *data)
|
||||
h2_beam_proxy *proxy = H2_BPROXY_LIST_FIRST(&beam->proxies);
|
||||
H2_BPROXY_REMOVE(proxy);
|
||||
proxy->beam = NULL;
|
||||
proxy->bred = NULL;
|
||||
proxy->bsender = NULL;
|
||||
}
|
||||
h2_blist_cleanup(&beam->purge_list);
|
||||
h2_blist_cleanup(&beam->hold_list);
|
||||
@ -685,11 +685,11 @@ apr_status_t h2_beam_wait_empty(h2_bucket_beam *beam, apr_read_type_e block)
|
||||
}
|
||||
|
||||
static void move_to_hold(h2_bucket_beam *beam,
|
||||
apr_bucket_brigade *red_brigade)
|
||||
apr_bucket_brigade *sender_bb)
|
||||
{
|
||||
apr_bucket *b;
|
||||
while (red_brigade && !APR_BRIGADE_EMPTY(red_brigade)) {
|
||||
b = APR_BRIGADE_FIRST(red_brigade);
|
||||
while (sender_bb && !APR_BRIGADE_EMPTY(sender_bb)) {
|
||||
b = APR_BRIGADE_FIRST(sender_bb);
|
||||
APR_BUCKET_REMOVE(b);
|
||||
H2_BLIST_INSERT_TAIL(&beam->send_list, b);
|
||||
}
|
||||
@ -726,7 +726,7 @@ static apr_status_t append_bucket(h2_bucket_beam *beam,
|
||||
}
|
||||
}
|
||||
|
||||
if (space_left < b->length) {
|
||||
if (space_left <= 0) {
|
||||
status = r_wait_space(beam, block, pbl, &space_left);
|
||||
if (status != APR_SUCCESS) {
|
||||
return status;
|
||||
@ -739,7 +739,7 @@ static apr_status_t append_bucket(h2_bucket_beam *beam,
|
||||
}
|
||||
|
||||
|
||||
/* The fundamental problem is that reading a red bucket from
|
||||
/* The fundamental problem is that reading a sender bucket from
|
||||
* a green thread is a total NO GO, because the bucket might use
|
||||
* its pool/bucket_alloc from a foreign thread and that will
|
||||
* corrupt. */
|
||||
@ -772,14 +772,21 @@ static apr_status_t append_bucket(h2_bucket_beam *beam,
|
||||
* is used on the first read to allocate buffer/mmap.
|
||||
* Since setting aside a file bucket will de-register the
|
||||
* file cleanup function from the previous pool, we need to
|
||||
* call that from a red thread.
|
||||
* call that only from the sender thread.
|
||||
*
|
||||
* Currently, we do not handle file bucket with refcount > 1 as
|
||||
* the beam is then not in complete control of the file's lifetime.
|
||||
* Which results in the bug that a file get closed by the receiver
|
||||
* while the sender or the beam still have buckets using it.
|
||||
*
|
||||
* Additionally, we allow callbacks to prevent beaming file
|
||||
* handles across. The use case for this is to limit the number
|
||||
* of open file handles and rather use a less efficient beam
|
||||
* transport. */
|
||||
apr_file_t *fd = ((apr_bucket_file *)b->data)->fd;
|
||||
int can_beam = 1;
|
||||
if (beam->last_beamed != fd && beam->can_beam_fn) {
|
||||
apr_bucket_file *bf = b->data;
|
||||
apr_file_t *fd = bf->fd;
|
||||
int can_beam = (bf->refcount.refcount == 1);
|
||||
if (can_beam && beam->last_beamed != fd && beam->can_beam_fn) {
|
||||
can_beam = beam->can_beam_fn(beam->can_beam_ctx, beam, fd);
|
||||
}
|
||||
if (can_beam) {
|
||||
@ -794,7 +801,7 @@ static apr_status_t append_bucket(h2_bucket_beam *beam,
|
||||
* but hope that after read, its data stays immutable for the
|
||||
* lifetime of the bucket. (see pool bucket handling above for
|
||||
* a counter example).
|
||||
* We do the read while in a red thread, so that the bucket may
|
||||
* We do the read while in the sender thread, so that the bucket may
|
||||
* use pools/allocators safely. */
|
||||
if (space_left < APR_BUCKET_BUFF_SIZE) {
|
||||
space_left = APR_BUCKET_BUFF_SIZE;
|
||||
@ -822,7 +829,7 @@ static apr_status_t append_bucket(h2_bucket_beam *beam,
|
||||
void h2_beam_send_from(h2_bucket_beam *beam, apr_pool_t *p)
|
||||
{
|
||||
h2_beam_lock bl;
|
||||
/* Called from the red thread to add buckets to the beam */
|
||||
/* Called from the sender thread to add buckets to the beam */
|
||||
if (enter_yellow(beam, &bl) == APR_SUCCESS) {
|
||||
r_purge_sent(beam);
|
||||
beam_set_send_pool(beam, p);
|
||||
@ -831,29 +838,28 @@ void h2_beam_send_from(h2_bucket_beam *beam, apr_pool_t *p)
|
||||
}
|
||||
|
||||
apr_status_t h2_beam_send(h2_bucket_beam *beam,
|
||||
apr_bucket_brigade *red_brigade,
|
||||
apr_bucket_brigade *sender_bb,
|
||||
apr_read_type_e block)
|
||||
{
|
||||
apr_bucket *b;
|
||||
apr_status_t status = APR_SUCCESS;
|
||||
h2_beam_lock bl;
|
||||
|
||||
/* Called from the red thread to add buckets to the beam */
|
||||
/* Called from the sender thread to add buckets to the beam */
|
||||
if (enter_yellow(beam, &bl) == APR_SUCCESS) {
|
||||
r_purge_sent(beam);
|
||||
if (red_brigade && !beam->send_pool) {
|
||||
beam_set_send_pool(beam, red_brigade->p);
|
||||
if (sender_bb && !beam->send_pool) {
|
||||
beam_set_send_pool(beam, sender_bb->p);
|
||||
}
|
||||
|
||||
if (beam->aborted) {
|
||||
move_to_hold(beam, red_brigade);
|
||||
move_to_hold(beam, sender_bb);
|
||||
status = APR_ECONNABORTED;
|
||||
}
|
||||
else if (red_brigade) {
|
||||
int force_report = !APR_BRIGADE_EMPTY(red_brigade);
|
||||
while (!APR_BRIGADE_EMPTY(red_brigade)
|
||||
&& status == APR_SUCCESS) {
|
||||
b = APR_BRIGADE_FIRST(red_brigade);
|
||||
else if (sender_bb) {
|
||||
int force_report = !APR_BRIGADE_EMPTY(sender_bb);
|
||||
while (!APR_BRIGADE_EMPTY(sender_bb) && status == APR_SUCCESS) {
|
||||
b = APR_BRIGADE_FIRST(sender_bb);
|
||||
status = append_bucket(beam, b, block, &bl);
|
||||
}
|
||||
report_prod_io(beam, force_report);
|
||||
@ -873,7 +879,7 @@ apr_status_t h2_beam_receive(h2_bucket_beam *beam,
|
||||
apr_off_t readbytes)
|
||||
{
|
||||
h2_beam_lock bl;
|
||||
apr_bucket *bred, *bgreen, *ng;
|
||||
apr_bucket *bsender, *bgreen, *ng;
|
||||
int transferred = 0;
|
||||
apr_status_t status = APR_SUCCESS;
|
||||
apr_off_t remain = readbytes;
|
||||
@ -905,35 +911,35 @@ transfer:
|
||||
++transferred;
|
||||
}
|
||||
|
||||
/* transfer from our red brigade, transforming red buckets to
|
||||
/* transfer from our sender brigade, transforming sender buckets to
|
||||
* green ones until we have enough */
|
||||
while (!H2_BLIST_EMPTY(&beam->send_list) && (readbytes <= 0 || remain >= 0)) {
|
||||
bred = H2_BLIST_FIRST(&beam->send_list);
|
||||
bsender = H2_BLIST_FIRST(&beam->send_list);
|
||||
bgreen = NULL;
|
||||
|
||||
if (readbytes > 0 && bred->length > 0 && remain <= 0) {
|
||||
if (readbytes > 0 && bsender->length > 0 && remain <= 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (APR_BUCKET_IS_METADATA(bred)) {
|
||||
if (APR_BUCKET_IS_EOS(bred)) {
|
||||
if (APR_BUCKET_IS_METADATA(bsender)) {
|
||||
if (APR_BUCKET_IS_EOS(bsender)) {
|
||||
bgreen = apr_bucket_eos_create(bb->bucket_alloc);
|
||||
beam->close_sent = 1;
|
||||
}
|
||||
else if (APR_BUCKET_IS_FLUSH(bred)) {
|
||||
else if (APR_BUCKET_IS_FLUSH(bsender)) {
|
||||
bgreen = apr_bucket_flush_create(bb->bucket_alloc);
|
||||
}
|
||||
else if (AP_BUCKET_IS_ERROR(bred)) {
|
||||
ap_bucket_error *eb = (ap_bucket_error *)bred;
|
||||
else if (AP_BUCKET_IS_ERROR(bsender)) {
|
||||
ap_bucket_error *eb = (ap_bucket_error *)bsender;
|
||||
bgreen = ap_bucket_error_create(eb->status, eb->data,
|
||||
bb->p, bb->bucket_alloc);
|
||||
}
|
||||
}
|
||||
else if (APR_BUCKET_IS_FILE(bred)) {
|
||||
else if (APR_BUCKET_IS_FILE(bsender)) {
|
||||
/* This is set aside into the target brigade pool so that
|
||||
* any read operation messes with that pool and not
|
||||
* the red one. */
|
||||
apr_bucket_file *f = (apr_bucket_file *)bred->data;
|
||||
* the sender one. */
|
||||
apr_bucket_file *f = (apr_bucket_file *)bsender->data;
|
||||
apr_file_t *fd = f->fd;
|
||||
int setaside = (f->readpool != bb->p);
|
||||
|
||||
@ -944,7 +950,7 @@ transfer:
|
||||
}
|
||||
++beam->files_beamed;
|
||||
}
|
||||
ng = apr_brigade_insert_file(bb, fd, bred->start, bred->length,
|
||||
ng = apr_brigade_insert_file(bb, fd, bsender->start, bsender->length,
|
||||
bb->p);
|
||||
#if APR_HAS_MMAP
|
||||
/* disable mmap handling as this leads to segfaults when
|
||||
@ -952,28 +958,28 @@ transfer:
|
||||
* been handed out. See also PR 59348 */
|
||||
apr_bucket_file_enable_mmap(ng, 0);
|
||||
#endif
|
||||
remain -= bred->length;
|
||||
remain -= bsender->length;
|
||||
++transferred;
|
||||
APR_BUCKET_REMOVE(bred);
|
||||
H2_BLIST_INSERT_TAIL(&beam->hold_list, bred);
|
||||
APR_BUCKET_REMOVE(bsender);
|
||||
H2_BLIST_INSERT_TAIL(&beam->hold_list, bsender);
|
||||
++transferred;
|
||||
continue;
|
||||
}
|
||||
else {
|
||||
/* create a "green" standin bucket. we took care about the
|
||||
* underlying red bucket and its data when we placed it into
|
||||
* the red brigade.
|
||||
* the beam bucket will notify us on destruction that bred is
|
||||
* underlying sender bucket and its data when we placed it into
|
||||
* the sender brigade.
|
||||
* the beam bucket will notify us on destruction that bsender is
|
||||
* no longer needed. */
|
||||
bgreen = h2_beam_bucket_create(beam, bred, bb->bucket_alloc,
|
||||
bgreen = h2_beam_bucket_create(beam, bsender, bb->bucket_alloc,
|
||||
beam->buckets_sent++);
|
||||
}
|
||||
|
||||
/* Place the red bucket into our hold, to be destroyed when no
|
||||
/* Place the sender bucket into our hold, to be destroyed when no
|
||||
* green bucket references it any more. */
|
||||
APR_BUCKET_REMOVE(bred);
|
||||
H2_BLIST_INSERT_TAIL(&beam->hold_list, bred);
|
||||
beam->received_bytes += bred->length;
|
||||
APR_BUCKET_REMOVE(bsender);
|
||||
H2_BLIST_INSERT_TAIL(&beam->hold_list, bsender);
|
||||
beam->received_bytes += bsender->length;
|
||||
++transferred_buckets;
|
||||
|
||||
if (bgreen) {
|
||||
@ -982,7 +988,7 @@ transfer:
|
||||
++transferred;
|
||||
}
|
||||
else {
|
||||
bgreen = h2_beam_bucket(beam, bb, bred);
|
||||
bgreen = h2_beam_bucket(beam, bb, bsender);
|
||||
while (bgreen && bgreen != APR_BRIGADE_SENTINEL(bb)) {
|
||||
++transferred;
|
||||
remain -= bgreen->length;
|
||||
|
@ -72,7 +72,7 @@ apr_size_t h2_util_bl_print(char *buffer, apr_size_t bmax,
|
||||
* A h2_bucket_beam solves the task of transferring buckets, esp. their data,
|
||||
* across threads with zero buffer copies.
|
||||
*
|
||||
* When a thread, let's call it the red thread, wants to send buckets to
|
||||
* When a thread, let's call it the sender thread, wants to send buckets to
|
||||
* another, the green thread, it creates a h2_bucket_beam and adds buckets
|
||||
* via the h2_beam_send(). It gives the beam to the green thread which then
|
||||
* can receive buckets into its own brigade via h2_beam_receive().
|
||||
@ -92,7 +92,7 @@ apr_size_t h2_util_bl_print(char *buffer, apr_size_t bmax,
|
||||
*
|
||||
* The proper way of shutting down a beam is to first make sure there are no
|
||||
* more green buckets out there, then cleanup the beam to purge eventually
|
||||
* still existing red buckets and then, possibly, terminate the beam itself
|
||||
* still existing sender buckets and then, possibly, terminate the beam itself
|
||||
* (or the pool it was created with).
|
||||
*
|
||||
* The following restrictions apply to bucket transport:
|
||||
@ -105,32 +105,32 @@ apr_size_t h2_util_bl_print(char *buffer, apr_size_t bmax,
|
||||
* - file buckets will transfer the file itself into a new bucket, if allowed
|
||||
* - all other buckets are read on send to make sure data is present
|
||||
*
|
||||
* This assures that when the red thread sends its red buckets, the data
|
||||
* is made accessible while still on the red side. The red bucket then enters
|
||||
* This assures that when the sender thread sends its sender buckets, the data
|
||||
* is made accessible while still on the sender side. The sender bucket then enters
|
||||
* the beams hold storage.
|
||||
* When the green thread calls receive, red buckets in the hold are wrapped
|
||||
* When the green thread calls receive, sender buckets in the hold are wrapped
|
||||
* into special beam buckets. Beam buckets on read present the data directly
|
||||
* from the internal red one, but otherwise live on the green side. When a
|
||||
* from the internal sender one, but otherwise live on the green side. When a
|
||||
* beam bucket gets destroyed, it notifies its beam that the corresponding
|
||||
* red bucket from the hold may be destroyed.
|
||||
* sender bucket from the hold may be destroyed.
|
||||
* Since the destruction of green buckets happens in the green thread, any
|
||||
* corresponding red bucket can not immediately be destroyed, as that would
|
||||
* corresponding sender bucket can not immediately be destroyed, as that would
|
||||
* result in race conditions.
|
||||
* Instead, the beam transfers such red buckets from the hold to the purge
|
||||
* storage. Next time there is a call from the red side, the buckets in
|
||||
* Instead, the beam transfers such sender buckets from the hold to the purge
|
||||
* storage. Next time there is a call from the sender side, the buckets in
|
||||
* purge will be deleted.
|
||||
*
|
||||
* There are callbacks that can be registered with a beam:
|
||||
* - a "consumed" callback that gets called on the red side with the
|
||||
* There are callbacks that can be registesender with a beam:
|
||||
* - a "consumed" callback that gets called on the sender side with the
|
||||
* amount of data that has been received by the green side. The amount
|
||||
* is a delta from the last callback invocation. The red side can trigger
|
||||
* is a delta from the last callback invocation. The sender side can trigger
|
||||
* these callbacks by calling h2_beam_send() with a NULL brigade.
|
||||
* - a "can_beam_file" callback that can prohibit the transfer of file handles
|
||||
* through the beam. This will cause file buckets to be read on send and
|
||||
* its data buffer will then be transports just like a heap bucket would.
|
||||
* When no callback is registered, no restrictions apply and all files are
|
||||
* passed through.
|
||||
* File handles transferred to the green side will stay there until the
|
||||
* File handles transfersender to the green side will stay there until the
|
||||
* receiving brigade's pool is destroyed/cleared. If the pool lives very
|
||||
* long or if many different files are beamed, the process might run out
|
||||
* of available file handles.
|
||||
|
@ -1,88 +0,0 @@
|
||||
/* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <assert.h>
|
||||
#include <stddef.h>
|
||||
|
||||
#include <httpd.h>
|
||||
#include <http_core.h>
|
||||
#include <http_connection.h>
|
||||
#include <http_log.h>
|
||||
|
||||
#include "h2_private.h"
|
||||
#include "h2.h"
|
||||
#include "h2_mplx.h"
|
||||
#include "h2_session.h"
|
||||
#include "h2_bucket_eoc.h"
|
||||
|
||||
typedef struct {
|
||||
apr_bucket_refcount refcount;
|
||||
h2_session *session;
|
||||
} h2_bucket_eoc;
|
||||
|
||||
static apr_status_t bucket_read(apr_bucket *b, const char **str,
|
||||
apr_size_t *len, apr_read_type_e block)
|
||||
{
|
||||
(void)b;
|
||||
(void)block;
|
||||
*str = NULL;
|
||||
*len = 0;
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
apr_bucket * h2_bucket_eoc_make(apr_bucket *b, h2_session *session)
|
||||
{
|
||||
h2_bucket_eoc *h;
|
||||
|
||||
h = apr_bucket_alloc(sizeof(*h), b->list);
|
||||
h->session = session;
|
||||
|
||||
b = apr_bucket_shared_make(b, h, 0, 0);
|
||||
b->type = &h2_bucket_type_eoc;
|
||||
|
||||
return b;
|
||||
}
|
||||
|
||||
apr_bucket * h2_bucket_eoc_create(apr_bucket_alloc_t *list, h2_session *session)
|
||||
{
|
||||
apr_bucket *b = apr_bucket_alloc(sizeof(*b), list);
|
||||
|
||||
APR_BUCKET_INIT(b);
|
||||
b->free = apr_bucket_free;
|
||||
b->list = list;
|
||||
return h2_bucket_eoc_make(b, session);
|
||||
}
|
||||
|
||||
static void bucket_destroy(void *data)
|
||||
{
|
||||
h2_bucket_eoc *h = data;
|
||||
|
||||
if (apr_bucket_shared_destroy(h)) {
|
||||
h2_session *session = h->session;
|
||||
apr_bucket_free(h);
|
||||
h2_session_eoc_callback(session);
|
||||
}
|
||||
}
|
||||
|
||||
const apr_bucket_type_t h2_bucket_type_eoc = {
|
||||
"H2EOC", 5, APR_BUCKET_METADATA,
|
||||
bucket_destroy,
|
||||
bucket_read,
|
||||
apr_bucket_setaside_noop,
|
||||
apr_bucket_split_notimpl,
|
||||
apr_bucket_shared_copy
|
||||
};
|
||||
|
@ -1,32 +0,0 @@
|
||||
/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef mod_http2_h2_bucket_eoc_h
|
||||
#define mod_http2_h2_bucket_eoc_h
|
||||
|
||||
struct h2_session;
|
||||
|
||||
/** End Of HTTP/2 SESSION (H2EOC) bucket */
|
||||
extern const apr_bucket_type_t h2_bucket_type_eoc;
|
||||
|
||||
#define H2_BUCKET_IS_H2EOC(e) (e->type == &h2_bucket_type_eoc)
|
||||
|
||||
apr_bucket * h2_bucket_eoc_make(apr_bucket *b,
|
||||
struct h2_session *session);
|
||||
|
||||
apr_bucket * h2_bucket_eoc_create(apr_bucket_alloc_t *list,
|
||||
struct h2_session *session);
|
||||
|
||||
#endif /* mod_http2_h2_bucket_eoc_h */
|
@ -48,7 +48,7 @@ static h2_config defconf = {
|
||||
-1, /* min workers */
|
||||
-1, /* max workers */
|
||||
10 * 60, /* max workers idle secs */
|
||||
64 * 1024, /* stream max mem size */
|
||||
32 * 1024, /* stream max mem size */
|
||||
NULL, /* no alt-svcs */
|
||||
-1, /* alt-svc max age */
|
||||
0, /* serialize headers */
|
||||
|
@ -24,7 +24,6 @@
|
||||
#include <http_request.h>
|
||||
|
||||
#include "h2_private.h"
|
||||
#include "h2_bucket_eoc.h"
|
||||
#include "h2_bucket_eos.h"
|
||||
#include "h2_config.h"
|
||||
#include "h2_conn_io.h"
|
||||
@ -72,9 +71,6 @@ static void h2_conn_io_bb_log(conn_rec *c, int stream_id, int level,
|
||||
else if (AP_BUCKET_IS_EOR(b)) {
|
||||
off += apr_snprintf(buffer+off, bmax-off, "eor ");
|
||||
}
|
||||
else if (H2_BUCKET_IS_H2EOC(b)) {
|
||||
off += apr_snprintf(buffer+off, bmax-off, "h2eoc ");
|
||||
}
|
||||
else if (H2_BUCKET_IS_H2EOS(b)) {
|
||||
off += apr_snprintf(buffer+off, bmax-off, "h2eos ");
|
||||
}
|
||||
@ -269,8 +265,7 @@ static void check_write_size(h2_conn_io *io)
|
||||
}
|
||||
}
|
||||
|
||||
static apr_status_t pass_output(h2_conn_io *io, int flush,
|
||||
h2_session *session_eoc)
|
||||
static apr_status_t pass_output(h2_conn_io *io, int flush)
|
||||
{
|
||||
conn_rec *c = io->c;
|
||||
apr_bucket_brigade *bb = io->output;
|
||||
@ -303,20 +298,6 @@ static apr_status_t pass_output(h2_conn_io *io, int flush,
|
||||
}
|
||||
apr_brigade_cleanup(bb);
|
||||
|
||||
if (session_eoc) {
|
||||
apr_status_t tmp;
|
||||
b = h2_bucket_eoc_create(c->bucket_alloc, session_eoc);
|
||||
APR_BRIGADE_INSERT_TAIL(bb, b);
|
||||
h2_conn_io_bb_log(c, 0, APLOG_TRACE2, "master conn pass", bb);
|
||||
tmp = ap_pass_brigade(c->output_filters, bb);
|
||||
if (status == APR_SUCCESS) {
|
||||
status = tmp;
|
||||
}
|
||||
/* careful with access to io after this, we have flushed an EOC bucket
|
||||
* that de-allocated us all. */
|
||||
apr_brigade_cleanup(bb);
|
||||
}
|
||||
|
||||
if (status != APR_SUCCESS) {
|
||||
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, APLOGNO(03044)
|
||||
"h2_conn_io(%ld): pass_out brigade %ld bytes",
|
||||
@ -343,16 +324,11 @@ int h2_conn_io_needs_flush(h2_conn_io *io)
|
||||
apr_status_t h2_conn_io_flush(h2_conn_io *io)
|
||||
{
|
||||
apr_status_t status;
|
||||
status = pass_output(io, 1, NULL);
|
||||
status = pass_output(io, 1);
|
||||
check_write_size(io);
|
||||
return status;
|
||||
}
|
||||
|
||||
apr_status_t h2_conn_io_write_eoc(h2_conn_io *io, h2_session *session)
|
||||
{
|
||||
return pass_output(io, 1, session);
|
||||
}
|
||||
|
||||
apr_status_t h2_conn_io_write(h2_conn_io *io, const char *data, size_t length)
|
||||
{
|
||||
apr_status_t status = APR_SUCCESS;
|
||||
|
@ -61,12 +61,6 @@ apr_status_t h2_conn_io_write(h2_conn_io *io,
|
||||
|
||||
apr_status_t h2_conn_io_pass(h2_conn_io *io, apr_bucket_brigade *bb);
|
||||
|
||||
/**
|
||||
* Append an End-Of-Connection bucket to the output that, once destroyed,
|
||||
* will tear down the complete http2 session.
|
||||
*/
|
||||
apr_status_t h2_conn_io_write_eoc(h2_conn_io *io, struct h2_session *session);
|
||||
|
||||
/**
|
||||
* Pass any buffered data on to the connection output filters.
|
||||
* @param io the connection io
|
||||
|
@ -199,9 +199,13 @@ static void check_tx_free(h2_mplx *m)
|
||||
}
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
h2_mplx *m;
|
||||
} purge_ctx;
|
||||
|
||||
static int purge_stream(void *ctx, void *val)
|
||||
{
|
||||
h2_mplx *m = ctx;
|
||||
purge_ctx *pctx = ctx;
|
||||
h2_stream *stream = val;
|
||||
int stream_id = stream->id;
|
||||
h2_task *task;
|
||||
@ -211,20 +215,22 @@ static int purge_stream(void *ctx, void *val)
|
||||
* before task destruction, otherwise it will complain. */
|
||||
h2_stream_cleanup(stream);
|
||||
|
||||
task = h2_ihash_get(m->tasks, stream_id);
|
||||
task = h2_ihash_get(pctx->m->tasks, stream_id);
|
||||
if (task) {
|
||||
task_destroy(m, task, 1);
|
||||
task_destroy(pctx->m, task, 1);
|
||||
}
|
||||
|
||||
h2_stream_destroy(stream);
|
||||
h2_ihash_remove(m->spurge, stream_id);
|
||||
h2_ihash_remove(pctx->m->spurge, stream_id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void purge_streams(h2_mplx *m)
|
||||
{
|
||||
if (!h2_ihash_empty(m->spurge)) {
|
||||
while(!h2_ihash_iter(m->spurge, purge_stream, m)) {
|
||||
purge_ctx ctx;
|
||||
ctx.m = m;
|
||||
while(!h2_ihash_iter(m->spurge, purge_stream, &ctx)) {
|
||||
/* repeat until empty */
|
||||
}
|
||||
h2_ihash_clear(m->spurge);
|
||||
@ -233,20 +239,13 @@ static void purge_streams(h2_mplx *m)
|
||||
|
||||
static void h2_mplx_destroy(h2_mplx *m)
|
||||
{
|
||||
conn_rec **pslave;
|
||||
ap_assert(m);
|
||||
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
|
||||
"h2_mplx(%ld): destroy, tasks=%d",
|
||||
m->id, (int)h2_ihash_count(m->tasks));
|
||||
check_tx_free(m);
|
||||
|
||||
while (m->spare_slaves->nelts > 0) {
|
||||
pslave = (conn_rec **)apr_array_pop(m->spare_slaves);
|
||||
h2_slave_destroy(*pslave);
|
||||
}
|
||||
if (m->pool) {
|
||||
apr_pool_destroy(m->pool);
|
||||
}
|
||||
/* pool will be destroyed as child of h2_session->pool,
|
||||
slave connection pools are children of m->pool */
|
||||
}
|
||||
|
||||
/**
|
||||
@ -442,6 +441,7 @@ static void stream_done(h2_mplx *m, h2_stream *stream, int rst_error)
|
||||
*/
|
||||
h2_iq_remove(m->q, stream->id);
|
||||
h2_ihash_remove(m->streams, stream->id);
|
||||
h2_ihash_remove(m->shold, stream->id);
|
||||
|
||||
h2_stream_cleanup(stream);
|
||||
m->tx_handles_reserved += h2_beam_get_files_beamed(stream->input);
|
||||
@ -467,7 +467,7 @@ static void stream_done(h2_mplx *m, h2_stream *stream, int rst_error)
|
||||
task_destroy(m, task, 1);
|
||||
}
|
||||
}
|
||||
h2_stream_destroy(stream);
|
||||
h2_ihash_add(m->spurge, stream);
|
||||
}
|
||||
|
||||
static int stream_done_iter(void *ctx, void *val)
|
||||
@ -568,7 +568,7 @@ apr_status_t h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
|
||||
h2_mplx_set_consumed_cb(m, NULL, NULL);
|
||||
h2_mplx_abort(m);
|
||||
h2_iq_clear(m->q);
|
||||
purge_streams(m);
|
||||
h2_ihash_clear(m->spurge);
|
||||
|
||||
/* 3. wakeup all sleeping tasks. Mark all still active streams as 'done'.
|
||||
* m->streams has to be empty afterwards with streams either in
|
||||
@ -581,10 +581,7 @@ apr_status_t h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
|
||||
}
|
||||
ap_assert(h2_ihash_empty(m->streams));
|
||||
|
||||
/* 4. purge all streams we collected by marking them 'done' */
|
||||
purge_streams(m);
|
||||
|
||||
/* 5. while workers are busy on this connection, meaning they
|
||||
/* 4. while workers are busy on this connection, meaning they
|
||||
* are processing tasks from this connection, wait on them finishing
|
||||
* to wake us and check again. Eventually, this has to succeed. */
|
||||
m->join_wait = wait;
|
||||
@ -602,11 +599,10 @@ apr_status_t h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
|
||||
h2_ihash_iter(m->shold, report_stream_iter, m);
|
||||
h2_ihash_iter(m->tasks, task_print, m);
|
||||
}
|
||||
purge_streams(m);
|
||||
}
|
||||
m->join_wait = NULL;
|
||||
|
||||
/* 6. All workers for this connection are done, we are in
|
||||
/* 5. All workers for this connection are done, we are in
|
||||
* single-threaded processing now effectively. */
|
||||
leave_mutex(m, acquired);
|
||||
|
||||
@ -625,12 +621,10 @@ apr_status_t h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
|
||||
}
|
||||
}
|
||||
|
||||
/* 7. With all tasks done, the stream hold should be empty and all
|
||||
* remaining streams are ready for purging */
|
||||
/* 6. With all tasks done, the stream hold should be empty now. */
|
||||
ap_assert(h2_ihash_empty(m->shold));
|
||||
purge_streams(m);
|
||||
|
||||
/* 8. close the h2_req_enginge shed and self destruct */
|
||||
/* 7. close the h2_req_enginge shed and self destruct */
|
||||
h2_ngn_shed_destroy(m->ngn_shed);
|
||||
m->ngn_shed = NULL;
|
||||
h2_mplx_destroy(m);
|
||||
@ -913,7 +907,6 @@ static h2_task *next_stream_task(h2_mplx *m)
|
||||
h2_slave_run_pre_connection(slave, ap_get_conn_socket(slave));
|
||||
}
|
||||
stream->started = 1;
|
||||
stream->can_be_cleaned = 0;
|
||||
task->worker_started = 1;
|
||||
task->started_at = apr_time_now();
|
||||
if (sid > m->max_stream_started) {
|
||||
@ -1392,6 +1385,7 @@ apr_status_t h2_mplx_dispatch_master_events(h2_mplx *m,
|
||||
apr_atomic_set32(&m->event_pending, 0);
|
||||
/* update input windows for streams */
|
||||
h2_ihash_iter(m->streams, report_consumption_iter, m);
|
||||
purge_streams(m);
|
||||
|
||||
if (!h2_iq_empty(m->readyq)) {
|
||||
n = h2_iq_mshift(m->readyq, ids, H2_ALEN(ids));
|
||||
|
@ -29,7 +29,6 @@
|
||||
|
||||
#include "h2_private.h"
|
||||
#include "h2.h"
|
||||
#include "h2_bucket_eoc.h"
|
||||
#include "h2_bucket_eos.h"
|
||||
#include "h2_config.h"
|
||||
#include "h2_ctx.h"
|
||||
@ -756,13 +755,6 @@ static void h2_session_cleanup(h2_session *session)
|
||||
}
|
||||
}
|
||||
|
||||
static void h2_session_destroy(h2_session *session)
|
||||
{
|
||||
apr_pool_t *p = session->pool;
|
||||
session->pool = NULL;
|
||||
apr_pool_destroy(p);
|
||||
}
|
||||
|
||||
static apr_status_t h2_session_shutdown_notice(h2_session *session)
|
||||
{
|
||||
apr_status_t status;
|
||||
@ -969,7 +961,6 @@ static h2_session *h2_session_create_int(conn_rec *c,
|
||||
if (status != APR_SUCCESS) {
|
||||
ap_log_cerror(APLOG_MARK, APLOG_ERR, status, c, APLOGNO(02927)
|
||||
"nghttp2: error in init_callbacks");
|
||||
h2_session_destroy(session);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -978,7 +969,6 @@ static h2_session *h2_session_create_int(conn_rec *c,
|
||||
ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, c,
|
||||
APLOGNO(02928) "nghttp2_option_new: %s",
|
||||
nghttp2_strerror(rv));
|
||||
h2_session_destroy(session);
|
||||
return NULL;
|
||||
}
|
||||
nghttp2_option_set_peer_max_concurrent_streams(
|
||||
@ -1009,7 +999,6 @@ static h2_session *h2_session_create_int(conn_rec *c,
|
||||
ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, c,
|
||||
APLOGNO(02929) "nghttp2_session_server_new: %s",
|
||||
nghttp2_strerror(rv));
|
||||
h2_session_destroy(session);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -1042,18 +1031,6 @@ h2_session *h2_session_rcreate(request_rec *r, h2_ctx *ctx, h2_workers *workers)
|
||||
return h2_session_create_int(r->connection, r, ctx, workers);
|
||||
}
|
||||
|
||||
void h2_session_eoc_callback(h2_session *session)
|
||||
{
|
||||
/* keep us from destroying the pool, if it's already done (cleanup). */
|
||||
if (session->pool) {
|
||||
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
|
||||
"session(%ld): cleanup and destroy", session->id);
|
||||
apr_pool_cleanup_kill(session->pool, session, session_pool_cleanup);
|
||||
h2_session_cleanup(session);
|
||||
h2_session_destroy(session);
|
||||
}
|
||||
}
|
||||
|
||||
static apr_status_t h2_session_start(h2_session *session, int *rv)
|
||||
{
|
||||
apr_status_t status = APR_SUCCESS;
|
||||
@ -2351,10 +2328,6 @@ out:
|
||||
status = APR_SUCCESS;
|
||||
if (session->state == H2_SESSION_ST_DONE) {
|
||||
status = APR_EOF;
|
||||
if (!session->eoc_written) {
|
||||
session->eoc_written = 1;
|
||||
h2_conn_io_write_eoc(&session->io, session);
|
||||
}
|
||||
}
|
||||
|
||||
return status;
|
||||
|
@ -94,7 +94,6 @@ typedef struct h2_session {
|
||||
h2_session_props remote; /* properites of remote session */
|
||||
|
||||
unsigned int reprioritize : 1; /* scheduled streams priority changed */
|
||||
unsigned int eoc_written : 1; /* h2 eoc bucket written */
|
||||
unsigned int flush : 1; /* flushing output necessary */
|
||||
unsigned int have_read : 1; /* session has read client data */
|
||||
unsigned int have_written : 1; /* session did write data to client */
|
||||
@ -165,13 +164,6 @@ apr_status_t h2_session_process(h2_session *session, int async);
|
||||
*/
|
||||
apr_status_t h2_session_pre_close(h2_session *session, int async);
|
||||
|
||||
/**
|
||||
* Cleanup the session and all objects it still contains. This will not
|
||||
* destroy h2_task instances that have not finished yet.
|
||||
* @param session the session to destroy
|
||||
*/
|
||||
void h2_session_eoc_callback(h2_session *session);
|
||||
|
||||
/**
|
||||
* Called when a serious error occurred and the session needs to terminate
|
||||
* without further connection io.
|
||||
|
@ -168,27 +168,6 @@ static void prepend_response(h2_stream *stream, h2_headers *response)
|
||||
APR_BRIGADE_INSERT_HEAD(stream->out_buffer, b);
|
||||
}
|
||||
|
||||
static apr_status_t stream_pool_cleanup(void *ctx)
|
||||
{
|
||||
h2_stream *stream = ctx;
|
||||
apr_status_t status;
|
||||
|
||||
ap_assert(stream->can_be_cleaned);
|
||||
if (stream->files) {
|
||||
apr_file_t *file;
|
||||
int i;
|
||||
for (i = 0; i < stream->files->nelts; ++i) {
|
||||
file = APR_ARRAY_IDX(stream->files, i, apr_file_t*);
|
||||
status = apr_file_close(file);
|
||||
ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, stream->session->c,
|
||||
"h2_stream(%ld-%d): destroy, closed file %d",
|
||||
stream->session->id, stream->id, i);
|
||||
}
|
||||
stream->files = NULL;
|
||||
}
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
h2_stream *h2_stream_open(int id, apr_pool_t *pool, h2_session *session,
|
||||
int initiated_on)
|
||||
{
|
||||
@ -200,15 +179,12 @@ h2_stream *h2_stream_open(int id, apr_pool_t *pool, h2_session *session,
|
||||
stream->state = H2_STREAM_ST_IDLE;
|
||||
stream->pool = pool;
|
||||
stream->session = session;
|
||||
stream->can_be_cleaned = 1;
|
||||
|
||||
|
||||
h2_beam_create(&stream->input, pool, id, "input", H2_BEAM_OWNER_SEND, 0);
|
||||
h2_beam_send_from(stream->input, stream->pool);
|
||||
h2_beam_create(&stream->output, pool, id, "output", H2_BEAM_OWNER_RECV, 0);
|
||||
|
||||
set_state(stream, H2_STREAM_ST_OPEN);
|
||||
apr_pool_cleanup_register(pool, stream, stream_pool_cleanup,
|
||||
apr_pool_cleanup_null);
|
||||
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03082)
|
||||
"h2_stream(%ld-%d): opened", session->id, stream->id);
|
||||
return stream;
|
||||
@ -240,13 +216,12 @@ void h2_stream_cleanup(h2_stream *stream)
|
||||
void h2_stream_destroy(h2_stream *stream)
|
||||
{
|
||||
ap_assert(stream);
|
||||
ap_assert(!h2_mplx_stream_get(stream->session->mplx, stream->id));
|
||||
ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, stream->session->c,
|
||||
"h2_stream(%ld-%d): destroy",
|
||||
stream->session->id, stream->id);
|
||||
stream->can_be_cleaned = 1;
|
||||
if (stream->pool) {
|
||||
apr_pool_destroy(stream->pool);
|
||||
stream->pool = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -525,8 +500,6 @@ apr_status_t h2_stream_write_data(h2_stream *stream,
|
||||
|
||||
static apr_status_t fill_buffer(h2_stream *stream, apr_size_t amount)
|
||||
{
|
||||
conn_rec *c = stream->session->c;
|
||||
apr_bucket *b;
|
||||
apr_status_t status;
|
||||
|
||||
if (!stream->output) {
|
||||
@ -537,33 +510,6 @@ static apr_status_t fill_buffer(h2_stream *stream, apr_size_t amount)
|
||||
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, stream->session->c,
|
||||
"h2_stream(%ld-%d): beam_received",
|
||||
stream->session->id, stream->id);
|
||||
/* The buckets we reveive are using the stream->out_buffer pool as
|
||||
* lifetime which is exactly what we want since this is stream->pool.
|
||||
*
|
||||
* However: when we send these buckets down the core output filters, the
|
||||
* filter might decide to setaside them into a pool of its own. And it
|
||||
* might decide, after having sent the buckets, to clear its pool.
|
||||
*
|
||||
* This is problematic for file buckets because it then closed the contained
|
||||
* file. Any split off buckets we sent afterwards will result in a
|
||||
* APR_EBADF.
|
||||
*/
|
||||
for (b = APR_BRIGADE_FIRST(stream->out_buffer);
|
||||
b != APR_BRIGADE_SENTINEL(stream->out_buffer);
|
||||
b = APR_BUCKET_NEXT(b)) {
|
||||
if (APR_BUCKET_IS_FILE(b)) {
|
||||
apr_bucket_file *f = (apr_bucket_file *)b->data;
|
||||
apr_pool_t *fpool = apr_file_pool_get(f->fd);
|
||||
if (fpool != c->pool) {
|
||||
apr_bucket_setaside(b, c->pool);
|
||||
if (!stream->files) {
|
||||
stream->files = apr_array_make(stream->pool,
|
||||
5, sizeof(apr_file_t*));
|
||||
}
|
||||
APR_ARRAY_PUSH(stream->files, apr_file_t*) = f->fd;
|
||||
}
|
||||
}
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -57,7 +57,6 @@ struct h2_stream {
|
||||
struct h2_bucket_beam *input;
|
||||
struct h2_bucket_beam *output;
|
||||
apr_bucket_brigade *out_buffer;
|
||||
apr_array_header_t *files; /* apr_file_t* we collected during I/O */
|
||||
|
||||
int rst_error; /* stream error for RST_STREAM */
|
||||
unsigned int aborted : 1; /* was aborted */
|
||||
@ -65,7 +64,6 @@ struct h2_stream {
|
||||
unsigned int started : 1; /* stream has started processing */
|
||||
unsigned int has_response : 1; /* response headers are known */
|
||||
unsigned int push_policy; /* which push policy to use for this request */
|
||||
unsigned int can_be_cleaned : 1; /* stream pool can be cleaned */
|
||||
|
||||
const h2_priority *pref_priority; /* preferred priority for this stream */
|
||||
apr_off_t out_data_frames; /* # of DATA frames sent */
|
||||
|
@ -109,10 +109,6 @@ SOURCE=./h2_bucket_beam.c
|
||||
# End Source File
|
||||
# Begin Source File
|
||||
|
||||
SOURCE=./h2_bucket_eoc.c
|
||||
# End Source File
|
||||
# Begin Source File
|
||||
|
||||
SOURCE=./h2_bucket_eos.c
|
||||
# End Source File
|
||||
# Begin Source File
|
||||
|
Reference in New Issue
Block a user