mirror of
https://github.com/apache/httpd.git
synced 2025-07-25 17:01:22 +00:00
On the trunk:
*) mod_md: v1.0.1, ServerName/Alias names from pure-http: virtual hosts are no longer auto-added to a Managed Domain. Error counts of jobs are presisted. When the server restarts (gracefully) any errored staging areas are purged to reset the signup/renewal process. git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1812999 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
5
CHANGES
5
CHANGES
@ -1,6 +1,11 @@
|
||||
-*- coding: utf-8 -*-
|
||||
Changes with Apache 2.5.0
|
||||
|
||||
*) mod_md: v1.0.1, ServerName/Alias names from pure-http: virtual hosts are no longer
|
||||
auto-added to a Managed Domain. Error counts of jobs are presisted. When the server
|
||||
restarts (gracefully) any errored staging areas are purged to reset the signup/renewal
|
||||
process. [Stefan Eissing]
|
||||
|
||||
*) htpasswd: Don't fail in -v mode if password file is unwritable.
|
||||
PR 61631. [Joe Orton]
|
||||
|
||||
|
@ -124,6 +124,7 @@ struct md_t {
|
||||
#define MD_KEY_DOMAIN "domain"
|
||||
#define MD_KEY_DOMAINS "domains"
|
||||
#define MD_KEY_DRIVE_MODE "drive-mode"
|
||||
#define MD_KEY_ERRORS "errors"
|
||||
#define MD_KEY_EXPIRES "expires"
|
||||
#define MD_KEY_HTTP "http"
|
||||
#define MD_KEY_HTTPS "https"
|
||||
|
@ -26,7 +26,7 @@
|
||||
* @macro
|
||||
* Version number of the md module as c string
|
||||
*/
|
||||
#define MOD_MD_VERSION "1.0.0"
|
||||
#define MOD_MD_VERSION "1.0.1"
|
||||
|
||||
/**
|
||||
* @macro
|
||||
@ -34,7 +34,7 @@
|
||||
* release. This is a 24 bit number with 8 bits for major number, 8 bits
|
||||
* for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203.
|
||||
*/
|
||||
#define MOD_MD_VERSION_NUM 0x010000
|
||||
#define MOD_MD_VERSION_NUM 0x010001
|
||||
|
||||
#define MD_EXPERIMENTAL 0
|
||||
#define MD_ACME_DEF_URL "https://acme-v01.api.letsencrypt.org/directory"
|
||||
|
@ -163,6 +163,23 @@ static int matches_port_somewhere(server_rec *s, int port)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int uses_port_only(server_rec *s, int port)
|
||||
{
|
||||
server_addr_rec *sa;
|
||||
int match = 0;
|
||||
for (sa = s->addrs; sa; sa = sa->next) {
|
||||
if (sa->host_port == port) {
|
||||
/* host_addr might be general (0.0.0.0) or specific, we count this as match */
|
||||
match = 1;
|
||||
}
|
||||
else {
|
||||
/* uses other port/wildcard */
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return match;
|
||||
}
|
||||
|
||||
static apr_status_t assign_to_servers(md_t *md, server_rec *base_server,
|
||||
apr_pool_t *p, apr_pool_t *ptemp)
|
||||
{
|
||||
@ -209,10 +226,15 @@ static apr_status_t assign_to_servers(md_t *md, server_rec *base_server,
|
||||
return APR_EINVAL;
|
||||
}
|
||||
|
||||
/* If server has name or an alias not covered,
|
||||
* a generated certificate will not match.
|
||||
*/
|
||||
if (APR_SUCCESS != (rv = md_covers_server(md, s, ptemp))) {
|
||||
/* If this server_rec is only for http: requests. Defined
|
||||
* alias names to not matter for this MD.
|
||||
* (see gh issue https://github.com/icing/mod_md/issues/57)
|
||||
* Otherwise, if server has name or an alias not covered,
|
||||
* it is by default auto-added (config transitive).
|
||||
* If mode is "manual", a generated certificate will not match
|
||||
* all necessary names. */
|
||||
if ((!mc->local_80 || !uses_port_only(s, mc->local_80))
|
||||
&& APR_SUCCESS != (rv = md_covers_server(md, s, ptemp))) {
|
||||
return rv;
|
||||
}
|
||||
|
||||
@ -596,11 +618,42 @@ static void assess_renewal(md_watchdog *wd, md_job_t *job, apr_pool_t *ptemp)
|
||||
}
|
||||
}
|
||||
|
||||
static apr_status_t load_job_props(md_reg_t *reg, md_job_t *job, apr_pool_t *p)
|
||||
{
|
||||
md_store_t *store = md_reg_store_get(reg);
|
||||
md_json_t *jprops;
|
||||
apr_status_t rv;
|
||||
|
||||
rv = md_store_load_json(store, MD_SG_STAGING, job->md->name,
|
||||
MD_FN_JOB, &jprops, p);
|
||||
if (APR_SUCCESS == rv) {
|
||||
job->restart_processed = md_json_getb(jprops, MD_KEY_PROCESSED, NULL);
|
||||
job->error_runs = (int)md_json_getl(jprops, MD_KEY_ERRORS, NULL);
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
static apr_status_t save_job_props(md_reg_t *reg, md_job_t *job, apr_pool_t *p)
|
||||
{
|
||||
md_store_t *store = md_reg_store_get(reg);
|
||||
md_json_t *jprops;
|
||||
apr_status_t rv;
|
||||
|
||||
rv = md_store_load_json(store, MD_SG_STAGING, job->md->name, MD_FN_JOB, &jprops, p);
|
||||
if (APR_SUCCESS == rv) {
|
||||
md_json_setb(job->restart_processed, jprops, MD_KEY_PROCESSED, NULL);
|
||||
md_json_setl(job->error_runs, jprops, MD_KEY_PROCESSED, NULL);
|
||||
rv = md_store_save_json(store, p, MD_SG_STAGING, job->md->name,
|
||||
MD_FN_JOB, jprops, 0);
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
static apr_status_t check_job(md_watchdog *wd, md_job_t *job, apr_pool_t *ptemp)
|
||||
{
|
||||
apr_status_t rv = APR_SUCCESS;
|
||||
apr_time_t valid_from, delay;
|
||||
int errored, renew;
|
||||
int errored, renew, error_runs;
|
||||
char ts[APR_RFC822_DATE_LEN];
|
||||
|
||||
if (apr_time_now() < job->next_check) {
|
||||
@ -609,6 +662,8 @@ static apr_status_t check_job(md_watchdog *wd, md_job_t *job, apr_pool_t *ptemp)
|
||||
}
|
||||
|
||||
job->next_check = 0;
|
||||
error_runs = job->error_runs;
|
||||
|
||||
if (job->md->state == MD_S_MISSING) {
|
||||
job->stalled = 1;
|
||||
}
|
||||
@ -665,6 +720,10 @@ static apr_status_t check_job(md_watchdog *wd, md_job_t *job, apr_pool_t *ptemp)
|
||||
job->md->name, job->error_runs, md_print_duration(ptemp, delay));
|
||||
}
|
||||
|
||||
if (error_runs != job->error_runs) {
|
||||
save_job_props(wd->reg, job, ptemp);
|
||||
}
|
||||
|
||||
job->last_rv = rv;
|
||||
return rv;
|
||||
}
|
||||
@ -673,6 +732,7 @@ static apr_status_t run_watchdog(int state, void *baton, apr_pool_t *ptemp)
|
||||
{
|
||||
md_watchdog *wd = baton;
|
||||
apr_status_t rv = APR_SUCCESS;
|
||||
md_store_t *store;
|
||||
md_job_t *job;
|
||||
apr_time_t next_run, now;
|
||||
int restart = 0;
|
||||
@ -682,10 +742,16 @@ static apr_status_t run_watchdog(int state, void *baton, apr_pool_t *ptemp)
|
||||
case AP_WATCHDOG_STATE_STARTING:
|
||||
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wd->s, APLOGNO(10054)
|
||||
"md watchdog start, auto drive %d mds", wd->jobs->nelts);
|
||||
assert(wd->reg);
|
||||
store = md_reg_store_get(wd->reg);
|
||||
|
||||
for (i = 0; i < wd->jobs->nelts; ++i) {
|
||||
job = APR_ARRAY_IDX(wd->jobs, i, md_job_t *);
|
||||
load_job_props(wd->reg, job, ptemp);
|
||||
}
|
||||
break;
|
||||
case AP_WATCHDOG_STATE_RUNNING:
|
||||
assert(wd->reg);
|
||||
|
||||
|
||||
wd->next_change = 0;
|
||||
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wd->s, APLOGNO(10055)
|
||||
"md watchdog run, auto drive %d mds", wd->jobs->nelts);
|
||||
@ -727,22 +793,13 @@ static apr_status_t run_watchdog(int state, void *baton, apr_pool_t *ptemp)
|
||||
|
||||
if (restart) {
|
||||
const char *action, *names = "";
|
||||
md_json_t *jprops;
|
||||
md_store_t *store = md_reg_store_get(wd->reg);
|
||||
int n;
|
||||
|
||||
for (i = 0, n = 0; i < wd->jobs->nelts; ++i) {
|
||||
job = APR_ARRAY_IDX(wd->jobs, i, md_job_t *);
|
||||
if (job->need_restart && !job->restart_processed) {
|
||||
rv = md_store_load_json(store, MD_SG_STAGING, job->md->name,
|
||||
"job.json", &jprops, ptemp);
|
||||
if (APR_SUCCESS == rv) {
|
||||
job->restart_processed = md_json_getb(jprops, MD_KEY_PROCESSED, NULL);
|
||||
}
|
||||
if (!job->restart_processed) {
|
||||
names = apr_psprintf(ptemp, "%s%s%s", names, n? " " : "", job->md->name);
|
||||
++n;
|
||||
}
|
||||
names = apr_psprintf(ptemp, "%s%s%s", names, n? " " : "", job->md->name);
|
||||
++n;
|
||||
}
|
||||
}
|
||||
|
||||
@ -778,14 +835,7 @@ static apr_status_t run_watchdog(int state, void *baton, apr_pool_t *ptemp)
|
||||
job = APR_ARRAY_IDX(wd->jobs, i, md_job_t *);
|
||||
if (job->need_restart && !job->restart_processed) {
|
||||
job->restart_processed = 1;
|
||||
|
||||
rv = md_store_load_json(store, MD_SG_STAGING, job->md->name,
|
||||
MD_FN_JOB, &jprops, ptemp);
|
||||
if (APR_SUCCESS == rv) {
|
||||
md_json_setb(1, jprops, MD_KEY_PROCESSED, NULL);
|
||||
rv = md_store_save_json(store, ptemp, MD_SG_STAGING, job->md->name,
|
||||
MD_FN_JOB, jprops, 0);
|
||||
}
|
||||
save_job_props(wd->reg, job, ptemp);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -871,6 +921,18 @@ static apr_status_t start_watchdog(apr_array_header_t *names, apr_pool_t *p,
|
||||
|
||||
ap_log_error( APLOG_MARK, APLOG_DEBUG, 0, wd->s, APLOGNO(10064)
|
||||
"md(%s): state=%d, driving", name, md->state);
|
||||
|
||||
load_job_props(reg, job, wd->p);
|
||||
if (job->error_runs) {
|
||||
/* We are just restarting. If we encounter jobs that had errors
|
||||
* running the protocol on previous staging runs, we reset
|
||||
* the staging area for it, in case we persisted something that
|
||||
* causes a loop. */
|
||||
md_store_t *store = md_reg_store_get(wd->reg);
|
||||
|
||||
md_store_purge(store, p, MD_SG_STAGING, job->md->name);
|
||||
md_store_purge(store, p, MD_SG_CHALLENGES, job->md->name);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -346,7 +346,7 @@ static const char *md_config_set_names(cmd_parms *cmd, void *dc,
|
||||
int i, transitive = -1;
|
||||
|
||||
(void)dc;
|
||||
err = ap_check_cmd_context(cmd, NOT_IN_DIR_CONTEXT);
|
||||
err = ap_check_cmd_context(cmd, NOT_IN_DIR_LOC_FILE);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
Reference in New Issue
Block a user