diff -Nru curl-7.81.0/debian/changelog curl-7.81.0/debian/changelog --- curl-7.81.0/debian/changelog 2023-01-04 14:53:07.000000000 +0000 +++ curl-7.81.0/debian/changelog 2023-02-15 13:20:05.000000000 +0000 @@ -1,3 +1,31 @@ +curl (7.81.0-1ubuntu1.8) jammy-security; urgency=medium + + * SECURITY UPDATE: multiple HSTS issues + - debian/patches/CVE-2023-23914_5-1.patch: add sharing of HSTS cache + among handles in docs/libcurl/opts/CURLSHOPT_SHARE.3, + docs/libcurl/symbols-in-versions, include/curl/curl.h, lib/hsts.c, + lib/hsts.h, lib/setopt.c, lib/share.c, lib/share.h, lib/transfer.c, + lib/url.c, lib/urldata.h. + - debian/patches/CVE-2023-23914_5-2.patch: share HSTS between handles + in src/tool_operate.c. + - debian/patches/CVE-2023-23914_5-3.patch: handle adding the same host + name again in lib/hsts.c. + - debian/patches/CVE-2023-23914_5-4.patch: support crlf="yes" for + verify/proxy in tests/FILEFORMAT.md, tests/runtests.pl. + - debian/patches/CVE-2023-23914_5-5.patch: verify hsts with two URLs in + tests/data/Makefile.inc, tests/data/test446. + - CVE-2023-23914 + - CVE-2023-23915 + * SECURITY UPDATE: HTTP multi-header compression denial of service + - debian/patches/CVE-2023-23916-pre1.patch: do CRLF replacements in + tests/FILEFORMAT.md, tests/data/test1, tests/runtests.pl. + - debian/patches/CVE-2023-23916.patch: do not reset stage counter for + each header in lib/content_encoding.c, lib/urldata.h, + tests/data/Makefile.inc, tests/data/test418. + - CVE-2023-23916 + + -- Marc Deslauriers Wed, 15 Feb 2023 08:20:05 -0500 + curl (7.81.0-1ubuntu1.7) jammy-security; urgency=medium * SECURITY UPDATE: Another HSTS bypass via IDN diff -Nru curl-7.81.0/debian/patches/CVE-2023-23914_5-1.patch curl-7.81.0/debian/patches/CVE-2023-23914_5-1.patch --- curl-7.81.0/debian/patches/CVE-2023-23914_5-1.patch 1970-01-01 00:00:00.000000000 +0000 +++ curl-7.81.0/debian/patches/CVE-2023-23914_5-1.patch 2023-02-15 13:18:44.000000000 +0000 @@ -0,0 +1,301 @@ +Backport of: + +From 076a2f629119222aeeb50f5a03bf9f9052fabb9a Mon Sep 17 00:00:00 2001 +From: Daniel Stenberg +Date: Tue, 27 Dec 2022 11:50:20 +0100 +Subject: [PATCH] share: add sharing of HSTS cache among handles + +Closes #10138 +--- + docs/libcurl/opts/CURLSHOPT_SHARE.3 | 4 +++ + docs/libcurl/symbols-in-versions | 1 + + include/curl/curl.h | 1 + + lib/hsts.c | 15 +++++++++ + lib/hsts.h | 2 ++ + lib/setopt.c | 48 ++++++++++++++++++++++++----- + lib/share.c | 32 +++++++++++++++++-- + lib/share.h | 6 +++- + lib/transfer.c | 3 ++ + lib/url.c | 6 +++- + lib/urldata.h | 2 ++ + 11 files changed, 109 insertions(+), 11 deletions(-) + +--- a/docs/libcurl/opts/CURLSHOPT_SHARE.3 ++++ b/docs/libcurl/opts/CURLSHOPT_SHARE.3 +@@ -77,6 +77,10 @@ Added in 7.61.0. + + Note that when you use the multi interface, all easy handles added to the same + multi handle will share PSL cache by default without using this option. ++.IP CURL_LOCK_DATA_HSTS ++The in-memory HSTS cache. ++ ++Added in 7.88.0 + .SH PROTOCOLS + All + .SH EXAMPLE +--- a/docs/libcurl/symbols-in-versions ++++ b/docs/libcurl/symbols-in-versions +@@ -962,6 +962,7 @@ CURL_LOCK_ACCESS_SINGLE 7.10.3 + CURL_LOCK_DATA_CONNECT 7.10.3 + CURL_LOCK_DATA_COOKIE 7.10.3 + CURL_LOCK_DATA_DNS 7.10.3 ++CURL_LOCK_DATA_HSTS 7.88.0 + CURL_LOCK_DATA_NONE 7.10.3 + CURL_LOCK_DATA_PSL 7.61.0 + CURL_LOCK_DATA_SHARE 7.10.4 +--- a/include/curl/curl.h ++++ b/include/curl/curl.h +@@ -2857,6 +2857,7 @@ typedef enum { + CURL_LOCK_DATA_SSL_SESSION, + CURL_LOCK_DATA_CONNECT, + CURL_LOCK_DATA_PSL, ++ CURL_LOCK_DATA_HSTS, + CURL_LOCK_DATA_LAST + } curl_lock_data; + +--- a/lib/hsts.c ++++ b/lib/hsts.c +@@ -38,6 +38,7 @@ + #include "fopen.h" + #include "rename.h" + #include "strtoofft.h" ++#include "share.h" + + /* The last 3 #include files should be in this order */ + #include "curl_printf.h" +@@ -531,4 +532,18 @@ CURLcode Curl_hsts_loadcb(struct Curl_ea + return CURLE_OK; + } + ++void Curl_hsts_loadfiles(struct Curl_easy *data) ++{ ++ struct curl_slist *l = data->set.hstslist; ++ if(l) { ++ Curl_share_lock(data, CURL_LOCK_DATA_HSTS, CURL_LOCK_ACCESS_SINGLE); ++ ++ while(l) { ++ (void)Curl_hsts_loadfile(data, data->hsts, l->data); ++ l = l->next; ++ } ++ Curl_share_unlock(data, CURL_LOCK_DATA_HSTS); ++ } ++} ++ + #endif /* CURL_DISABLE_HTTP || CURL_DISABLE_HSTS */ +--- a/lib/hsts.h ++++ b/lib/hsts.h +@@ -57,9 +57,11 @@ CURLcode Curl_hsts_loadfile(struct Curl_ + struct hsts *h, const char *file); + CURLcode Curl_hsts_loadcb(struct Curl_easy *data, + struct hsts *h); ++void Curl_hsts_loadfiles(struct Curl_easy *data); + #else + #define Curl_hsts_cleanup(x) + #define Curl_hsts_loadcb(x,y) CURLE_OK + #define Curl_hsts_save(x,y,z) ++#define Curl_hsts_loadfiles(x) + #endif /* CURL_DISABLE_HTTP || CURL_DISABLE_HSTS */ + #endif /* HEADER_CURL_HSTS_H */ +--- a/lib/setopt.c ++++ b/lib/setopt.c +@@ -2236,9 +2236,14 @@ CURLcode Curl_vsetopt(struct Curl_easy * + data->cookies = NULL; + #endif + ++#ifndef CURL_DISABLE_HSTS ++ if(data->share->hsts == data->hsts) ++ data->hsts = NULL; ++#endif ++#ifdef USE_SSL + if(data->share->sslsession == data->state.session) + data->state.session = NULL; +- ++#endif + #ifdef USE_LIBPSL + if(data->psl == &data->share->psl) + data->psl = data->multi? &data->multi->psl: NULL; +@@ -2272,10 +2277,19 @@ CURLcode Curl_vsetopt(struct Curl_easy * + data->cookies = data->share->cookies; + } + #endif /* CURL_DISABLE_HTTP */ ++#ifndef CURL_DISABLE_HSTS ++ if(data->share->hsts) { ++ /* first free the private one if any */ ++ Curl_hsts_cleanup(&data->hsts); ++ data->hsts = data->share->hsts; ++ } ++#endif /* CURL_DISABLE_HTTP */ ++#ifdef USE_SSL + if(data->share->sslsession) { + data->set.general_ssl.max_ssl_sessions = data->share->max_ssl_sessions; + data->state.session = data->share->sslsession; + } ++#endif + #ifdef USE_LIBPSL + if(data->share->specifier & (1 << CURL_LOCK_DATA_PSL)) + data->psl = &data->share->psl; +@@ -2990,19 +3004,39 @@ CURLcode Curl_vsetopt(struct Curl_easy * + case CURLOPT_HSTSWRITEDATA: + data->set.hsts_write_userp = va_arg(param, void *); + break; +- case CURLOPT_HSTS: ++ case CURLOPT_HSTS: { ++ struct curl_slist *h; + if(!data->hsts) { + data->hsts = Curl_hsts_init(); + if(!data->hsts) + return CURLE_OUT_OF_MEMORY; + } + argptr = va_arg(param, char *); +- result = Curl_setstropt(&data->set.str[STRING_HSTS], argptr); +- if(result) +- return result; +- if(argptr) +- (void)Curl_hsts_loadfile(data, data->hsts, argptr); ++ if(argptr) { ++ result = Curl_setstropt(&data->set.str[STRING_HSTS], argptr); ++ if(result) ++ return result; ++ /* this needs to build a list of file names to read from, so that it can ++ read them later, as we might get a shared HSTS handle to load them ++ into */ ++ h = curl_slist_append(data->set.hstslist, argptr); ++ if(!h) { ++ curl_slist_free_all(data->set.hstslist); ++ data->set.hstslist = NULL; ++ return CURLE_OUT_OF_MEMORY; ++ } ++ data->set.hstslist = h; /* store the list for later use */ ++ } ++ else { ++ /* clear the list of HSTS files */ ++ curl_slist_free_all(data->set.hstslist); ++ data->set.hstslist = NULL; ++ if(!data->share || !data->share->hsts) ++ /* throw away the HSTS cache unless shared */ ++ Curl_hsts_cleanup(&data->hsts); ++ } + break; ++ } + case CURLOPT_HSTS_CTRL: + arg = va_arg(param, long); + if(arg & CURLHSTS_ENABLE) { +--- a/lib/share.c ++++ b/lib/share.c +@@ -27,9 +27,11 @@ + #include "share.h" + #include "psl.h" + #include "vtls/vtls.h" +-#include "curl_memory.h" ++#include "hsts.h" + +-/* The last #include file should be: */ ++/* The last 3 #include files should be in this order */ ++#include "curl_printf.h" ++#include "curl_memory.h" + #include "memdebug.h" + + struct Curl_share * +@@ -87,6 +89,18 @@ curl_share_setopt(struct Curl_share *sha + #endif + break; + ++ case CURL_LOCK_DATA_HSTS: ++#ifndef CURL_DISABLE_HSTS ++ if(!share->hsts) { ++ share->hsts = Curl_hsts_init(); ++ if(!share->hsts) ++ res = CURLSHE_NOMEM; ++ } ++#else /* CURL_DISABLE_HSTS */ ++ res = CURLSHE_NOT_BUILT_IN; ++#endif ++ break; ++ + case CURL_LOCK_DATA_SSL_SESSION: + #ifdef USE_SSL + if(!share->sslsession) { +@@ -139,6 +153,16 @@ curl_share_setopt(struct Curl_share *sha + #endif + break; + ++ case CURL_LOCK_DATA_HSTS: ++#ifndef CURL_DISABLE_HSTS ++ if(share->hsts) { ++ Curl_hsts_cleanup(&share->hsts); ++ } ++#else /* CURL_DISABLE_HSTS */ ++ res = CURLSHE_NOT_BUILT_IN; ++#endif ++ break; ++ + case CURL_LOCK_DATA_SSL_SESSION: + #ifdef USE_SSL + Curl_safefree(share->sslsession); +@@ -205,6 +229,10 @@ curl_share_cleanup(struct Curl_share *sh + Curl_cookie_cleanup(share->cookies); + #endif + ++#ifndef CURL_DISABLE_HSTS ++ Curl_hsts_cleanup(&share->hsts); ++#endif ++ + #ifdef USE_SSL + if(share->sslsession) { + size_t i; +--- a/lib/share.h ++++ b/lib/share.h +@@ -57,10 +57,14 @@ struct Curl_share { + #ifdef USE_LIBPSL + struct PslCache psl; + #endif +- ++#ifndef CURL_DISABLE_HSTS ++ struct hsts *hsts; ++#endif ++#ifdef USE_SSL + struct Curl_ssl_session *sslsession; + size_t max_ssl_sessions; + long sessionage; ++#endif + }; + + CURLSHcode Curl_share_lock(struct Curl_easy *, curl_lock_data, +--- a/lib/transfer.c ++++ b/lib/transfer.c +@@ -1468,6 +1468,9 @@ CURLcode Curl_pretransfer(struct Curl_ea + if(data->state.resolve) + result = Curl_loadhostpairs(data); + ++ /* If there is a list of hsts files to read */ ++ Curl_hsts_loadfiles(data); ++ + if(!result) { + /* Allow data->set.use_port to set which port to use. This needs to be + * disabled for example when we follow Location: headers to URLs using +--- a/lib/url.c ++++ b/lib/url.c +@@ -434,7 +434,11 @@ CURLcode Curl_close(struct Curl_easy **d + Curl_altsvc_save(data, data->asi, data->set.str[STRING_ALTSVC]); + Curl_altsvc_cleanup(&data->asi); + Curl_hsts_save(data, data->hsts, data->set.str[STRING_HSTS]); +- Curl_hsts_cleanup(&data->hsts); ++#ifndef CURL_DISABLE_HSTS ++ if(!data->share || !data->share->hsts) ++ Curl_hsts_cleanup(&data->hsts); ++ curl_slist_free_all(data->set.hstslist); /* clean up list */ ++#endif + #if !defined(CURL_DISABLE_HTTP) && !defined(CURL_DISABLE_CRYPTO_AUTH) + Curl_http_auth_cleanup_digest(data); + #endif +--- a/lib/urldata.h ++++ b/lib/urldata.h +@@ -1676,6 +1676,8 @@ struct UserDefined { + /* function to convert from UTF-8 encoding: */ + curl_conv_callback convfromutf8; + #ifndef CURL_DISABLE_HSTS ++ struct curl_slist *hstslist; /* list of HSTS files set by ++ curl_easy_setopt(HSTS) calls */ + curl_hstsread_callback hsts_read; + void *hsts_read_userp; + curl_hstswrite_callback hsts_write; diff -Nru curl-7.81.0/debian/patches/CVE-2023-23914_5-2.patch curl-7.81.0/debian/patches/CVE-2023-23914_5-2.patch --- curl-7.81.0/debian/patches/CVE-2023-23914_5-2.patch 1970-01-01 00:00:00.000000000 +0000 +++ curl-7.81.0/debian/patches/CVE-2023-23914_5-2.patch 2023-02-15 13:18:49.000000000 +0000 @@ -0,0 +1,19 @@ +From 0bf8b796a0ea98395b390c7807187982215f5c11 Mon Sep 17 00:00:00 2001 +From: Daniel Stenberg +Date: Tue, 27 Dec 2022 11:50:23 +0100 +Subject: [PATCH] tool_operate: share HSTS between handles + +--- + src/tool_operate.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/src/tool_operate.c ++++ b/src/tool_operate.c +@@ -2656,6 +2656,7 @@ CURLcode operate(struct GlobalConfig *gl + curl_share_setopt(share, CURLSHOPT_SHARE, CURL_LOCK_DATA_SSL_SESSION); + curl_share_setopt(share, CURLSHOPT_SHARE, CURL_LOCK_DATA_CONNECT); + curl_share_setopt(share, CURLSHOPT_SHARE, CURL_LOCK_DATA_PSL); ++ curl_share_setopt(share, CURLSHOPT_SHARE, CURL_LOCK_DATA_HSTS); + + /* Get the required arguments for each operation */ + do { diff -Nru curl-7.81.0/debian/patches/CVE-2023-23914_5-3.patch curl-7.81.0/debian/patches/CVE-2023-23914_5-3.patch --- curl-7.81.0/debian/patches/CVE-2023-23914_5-3.patch 1970-01-01 00:00:00.000000000 +0000 +++ curl-7.81.0/debian/patches/CVE-2023-23914_5-3.patch 2023-02-15 13:18:54.000000000 +0000 @@ -0,0 +1,38 @@ +From ca02a77f05bd5cef20618c8f741aa48b7be0a648 Mon Sep 17 00:00:00 2001 +From: Daniel Stenberg +Date: Tue, 27 Dec 2022 11:50:23 +0100 +Subject: [PATCH] hsts: handle adding the same host name again + +It will then use the largest expire time of the two entries. +--- + lib/hsts.c | 13 +++++++++++-- + 1 file changed, 11 insertions(+), 2 deletions(-) + +--- a/lib/hsts.c ++++ b/lib/hsts.c +@@ -405,14 +405,23 @@ static CURLcode hsts_add(struct hsts *h, + if(2 == rc) { + time_t expires = strcmp(date, UNLIMITED) ? Curl_getdate_capped(date) : + TIME_T_MAX; +- CURLcode result; ++ CURLcode result = CURLE_OK; + char *p = host; + bool subdomain = FALSE; ++ struct stsentry *e; + if(p[0] == '.') { + p++; + subdomain = TRUE; + } +- result = hsts_create(h, p, subdomain, expires); ++ /* only add it if not already present */ ++ e = Curl_hsts(h, p, subdomain); ++ if(!e) ++ result = hsts_create(h, p, subdomain, expires); ++ else { ++ /* the same host name, use the largest expire time */ ++ if(expires > e->expires) ++ e->expires = expires; ++ } + if(result) + return result; + } diff -Nru curl-7.81.0/debian/patches/CVE-2023-23914_5-4.patch curl-7.81.0/debian/patches/CVE-2023-23914_5-4.patch --- curl-7.81.0/debian/patches/CVE-2023-23914_5-4.patch 1970-01-01 00:00:00.000000000 +0000 +++ curl-7.81.0/debian/patches/CVE-2023-23914_5-4.patch 2023-02-15 13:19:00.000000000 +0000 @@ -0,0 +1,37 @@ +Backport of: + +From dc0725244a3163f1e2d5f51165db3a1a430f3ba0 Mon Sep 17 00:00:00 2001 +From: Daniel Stenberg +Date: Tue, 27 Dec 2022 11:50:23 +0100 +Subject: [PATCH] runtests: support crlf="yes" for verify/proxy + +--- + tests/FILEFORMAT.md | 4 ++-- + tests/runtests.pl | 5 +++++ + 2 files changed, 7 insertions(+), 2 deletions(-) + +--- a/tests/FILEFORMAT.md ++++ b/tests/FILEFORMAT.md +@@ -541,7 +541,7 @@ the trailing newline of this given data + sent by the client The `` and `` rules are applied before + comparisons are made. + +-### `` ++### `` + + The protocol dump curl should transmit to a HTTP proxy (when the http-proxy + server is used), if 'nonewline' is set, we will cut off the trailing newline +--- a/tests/runtests.pl ++++ b/tests/runtests.pl +@@ -4521,6 +4521,11 @@ sub singletest { + } + } + ++ if($hash{'crlf'} || ++ ($has_hyper && ($keywords{"HTTP"} || $keywords{"HTTPS"}))) { ++ map subNewlines(0, \$_), @protstrip; ++ } ++ + $res = compare($testnum, $testname, "proxy", \@out, \@protstrip); + if($res) { + return $errorreturncode; diff -Nru curl-7.81.0/debian/patches/CVE-2023-23914_5-5.patch curl-7.81.0/debian/patches/CVE-2023-23914_5-5.patch --- curl-7.81.0/debian/patches/CVE-2023-23914_5-5.patch 1970-01-01 00:00:00.000000000 +0000 +++ curl-7.81.0/debian/patches/CVE-2023-23914_5-5.patch 2023-02-15 13:20:05.000000000 +0000 @@ -0,0 +1,111 @@ +Backport of: + +From ea5aaaa5ede53819f8bc7ae767fc2d13d3704d37 Mon Sep 17 00:00:00 2001 +From: Daniel Stenberg +Date: Tue, 27 Dec 2022 11:50:23 +0100 +Subject: [PATCH] test446: verify hsts with two URLs + +--- + tests/data/Makefile.inc | 2 +- + tests/data/test446 | 84 +++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 85 insertions(+), 1 deletion(-) + create mode 100644 tests/data/test446 + +--- /dev/null ++++ b/tests/data/test446 +@@ -0,0 +1,84 @@ ++ ++ ++ ++ ++HTTP ++HTTP proxy ++HSTS ++trailing-dot ++ ++ ++ ++ ++ ++# we use this as response to a CONNECT ++ ++HTTP/1.1 200 OK ++ ++ ++ ++HTTP/1.1 200 OK ++Content-Length: 6 ++Strict-Transport-Security: max-age=604800 ++ ++-foo- ++ ++ ++HTTP/1.1 200 OK ++Content-Length: 6 ++Strict-Transport-Security: max-age=6048000 ++ ++-baa- ++ ++ ++ ++ ++ ++https ++http-proxy ++ ++ ++HSTS ++proxy ++https ++debug ++ ++ ++CURL_HSTS_HTTP=yes ++CURL_TIME=2000000000 ++ ++ ++ ++HSTS with two URLs ++ ++ ++-x http://%HOSTIP:%PROXYPORT --hsts log/hsts%TESTNUMBER http://this.hsts.example./%TESTNUMBER http://another.example.com/%TESTNUMBER0002 ++ ++ ++ ++ ++# we let it CONNECT to the server to confirm HSTS but deny from there ++ ++GET http://this.hsts.example./%TESTNUMBER HTTP/1.1 ++Host: this.hsts.example. ++User-Agent: curl/%VERSION ++Accept: */* ++Proxy-Connection: Keep-Alive ++ ++GET http://another.example.com/%TESTNUMBER0002 HTTP/1.1 ++Host: another.example.com ++User-Agent: curl/%VERSION ++Accept: */* ++Proxy-Connection: Keep-Alive ++ ++ ++ ++ ++# Your HSTS cache. https://curl.se/docs/hsts.html ++# This file was generated by libcurl! Edit at your own risk. ++this.hsts.example "20330525 03:33:20" ++another.example.com "20330727 03:33:20" ++ ++ ++ ++ +--- a/tests/data/Makefile.inc ++++ b/tests/data/Makefile.inc +@@ -68,7 +68,7 @@ test392 test393 test394 test395 test396 + test400 test401 test402 test403 test404 test405 test406 test407 test408 \ + test409 test410 \ + \ +-test430 test431 test432 test433 test434 test435 \ ++test430 test431 test432 test433 test434 test435 test446 \ + \ + test490 test491 test492 test493 test494 \ + \ diff -Nru curl-7.81.0/debian/patches/CVE-2023-23916.patch curl-7.81.0/debian/patches/CVE-2023-23916.patch --- curl-7.81.0/debian/patches/CVE-2023-23916.patch 1970-01-01 00:00:00.000000000 +0000 +++ curl-7.81.0/debian/patches/CVE-2023-23916.patch 2023-02-15 13:20:05.000000000 +0000 @@ -0,0 +1,218 @@ +Backport of: + +From 119fb187192a9ea13dc90d9d20c215fc82799ab9 Mon Sep 17 00:00:00 2001 +From: Patrick Monnerat +Date: Mon, 13 Feb 2023 08:33:09 +0100 +Subject: [PATCH] content_encoding: do not reset stage counter for each header + +Test 418 verifies + +Closes #10492 +--- + lib/content_encoding.c | 7 +- + lib/urldata.h | 1 + + tests/data/Makefile.inc | 2 +- + tests/data/test387 | 2 +- + tests/data/test418 | 152 ++++++++++++++++++++++++++++++++++++++++ + 5 files changed, 158 insertions(+), 6 deletions(-) + create mode 100644 tests/data/test418 + +--- a/lib/content_encoding.c ++++ b/lib/content_encoding.c +@@ -1035,7 +1035,6 @@ CURLcode Curl_build_unencoding_stack(str + const char *enclist, int maybechunked) + { + struct SingleRequest *k = &data->req; +- int counter = 0; + + do { + const char *name; +@@ -1070,9 +1069,9 @@ CURLcode Curl_build_unencoding_stack(str + if(!encoding) + encoding = &error_encoding; /* Defer error at stack use. */ + +- if(++counter >= MAX_ENCODE_STACK) { +- failf(data, "Reject response due to %u content encodings", +- counter); ++ if(k->writer_stack_depth++ >= MAX_ENCODE_STACK) { ++ failf(data, "Reject response due to more than %u content encodings", ++ MAX_ENCODE_STACK); + return CURLE_BAD_CONTENT_ENCODING; + } + /* Stack the unencoding stage. */ +--- a/lib/urldata.h ++++ b/lib/urldata.h +@@ -708,6 +708,7 @@ struct SingleRequest { + struct dohdata *doh; /* DoH specific data for this request */ + #endif + unsigned char setcookies; ++ unsigned char writer_stack_depth; /* Unencoding stack depth. */ + BIT(header); /* incoming data has HTTP header */ + BIT(content_range); /* set TRUE if Content-Range: was found */ + BIT(upload_done); /* set to TRUE when doing chunked transfer-encoding +--- a/tests/data/Makefile.inc ++++ b/tests/data/Makefile.inc +@@ -66,7 +66,7 @@ test370 test371 \ + test392 test393 test394 test395 test396 test397 \ + \ + test400 test401 test402 test403 test404 test405 test406 test407 test408 \ +-test409 test410 \ ++test409 test410 test418 \ + \ + test430 test431 test432 test433 test434 test435 test446 \ + \ +--- /dev/null ++++ b/tests/data/test418 +@@ -0,0 +1,152 @@ ++ ++ ++ ++HTTP ++gzip ++ ++ ++ ++# ++# Server-side ++ ++ ++HTTP/1.1 200 OK ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++Transfer-Encoding: gzip ++ ++-foo- ++ ++ ++ ++# ++# Client-side ++ ++ ++http ++ ++ ++Response with multiple Transfer-Encoding headers ++ ++ ++http://%HOSTIP:%HTTPPORT/%TESTNUMBER -sS ++ ++ ++ ++# ++# Verify data after the test has been "shot" ++ ++ ++GET /%TESTNUMBER HTTP/1.1 ++Host: %HOSTIP:%HTTPPORT ++User-Agent: curl/%VERSION ++Accept: */* ++ ++ ++ ++# CURLE_BAD_CONTENT_ENCODING is 61 ++ ++61 ++ ++ ++curl: (61) Reject response due to more than 5 content encodings ++ ++ ++ diff -Nru curl-7.81.0/debian/patches/CVE-2023-23916-pre1.patch curl-7.81.0/debian/patches/CVE-2023-23916-pre1.patch --- curl-7.81.0/debian/patches/CVE-2023-23916-pre1.patch 1970-01-01 00:00:00.000000000 +0000 +++ curl-7.81.0/debian/patches/CVE-2023-23916-pre1.patch 2023-02-15 13:20:05.000000000 +0000 @@ -0,0 +1,258 @@ +Backport of: + +From 2f34a7347f315513bfda9ef14770d287fb246bcd Mon Sep 17 00:00:00 2001 +From: Daniel Stenberg +Date: Thu, 1 Dec 2022 09:21:04 +0100 +Subject: [PATCH] runtests: do CRLF replacements per section only + +The `crlf="yes"` attribute and "hyper mode" are now only applied on a +subset of dedicated sections: data, datacheck, stdout and protocol. + +Updated test 2500 accordingly. + +Also made test1 use crlf="yes" for , mostly because it is +often used as a template test case. Going forward, using this attribute +we should be able to write test cases using linefeeds only and avoid +mixed line ending encodings. + +Follow-up to ca15b7512e8d11 + +Fixes #10009 +Closes #10010 +--- + tests/FILEFORMAT.md | 22 ++++++++++++----- + tests/data/test1 | 14 +++++------ + tests/data/test2500 | 6 ++--- + tests/runtests.pl | 57 +++++++++++++++++++++++++++++++++++---------- + 4 files changed, 71 insertions(+), 28 deletions(-) + +--- a/tests/FILEFORMAT.md ++++ b/tests/FILEFORMAT.md +@@ -188,7 +188,7 @@ When using curl built with Hyper, the ke + 'hyper mode' to kick in and make line ending checks work for tests. + ## `` + +-### `` ++### `` + + data to be sent to the client on its request and later verified that it + arrived safely. Set `nocheck="yes"` to prevent the test script from verifying +@@ -214,12 +214,16 @@ much sense for other sections than "data + `hex=yes` means that the data is a sequence of hex pairs. It will get decoded + and used as "raw" data. + ++`crlf=yes` forces *header* newlines to become CRLF even if not written so in ++the source file. Note that this makes runtests.pl parse and "guess" what is a ++header and what is not in order to apply the CRLF line endings appropriately. ++ + For FTP file listings, the `` section will be used *only* if you make + sure that there has been a CWD done first to a directory named `test-[num]` + where [num] is the test case number. Otherwise the ftp server can't know from + which test file to load the list content. + +-### `` ++### `` + + Send back this contents instead of the one. The num is set by: + +@@ -246,7 +250,7 @@ a connect prefix. + ### `` + Address type and address details as logged by the SOCKS proxy. + +-### `` ++### `` + if the data is sent but this is what should be checked afterwards. If + `nonewline=yes` is set, runtests will cut off the trailing newline from the + data before comparing with the one actually received by the client. +@@ -254,7 +258,7 @@ data before comparing with the one actua + Use the `mode="text"` attribute if the output is in text mode on platforms + that have a text/binary difference. + +-### `` ++### `` + The contents of numbered datacheck sections are appended to the non-numbered + one. + +@@ -534,13 +538,16 @@ changing protocol data such as port numb + One perl op per line that operates on the protocol dump. This is pretty + advanced. Example: `s/^EPRT .*/EPRT stripped/`. + +-### `` ++### `` + + the protocol dump curl should transmit, if 'nonewline' is set, we will cut off + the trailing newline of this given data before comparing with the one actually + sent by the client The `` and `` rules are applied before + comparisons are made. + ++`crlf=yes` forces the newlines to become CRLF even if not written so in the ++test. ++ + ### `` + + The protocol dump curl should transmit to a HTTP proxy (when the http-proxy +@@ -557,7 +564,7 @@ have a text/binary difference. + If 'nonewline' is set, we will cut off the trailing newline of this given data + before comparing with the one actually received by the client + +-### `` ++### `` + This verifies that this data was passed to stdout. + + Use the mode="text" attribute if the output is in text mode on platforms that +@@ -566,6 +573,9 @@ have a text/binary difference. + If 'nonewline' is set, we will cut off the trailing newline of this given data + before comparing with the one actually received by the client + ++`crlf=yes` forces the newlines to become CRLF even if not written so in the ++test. ++ + ### `` + The file's contents must be identical to this after the test is complete. Use + the mode="text" attribute if the output is in text mode on platforms that have +--- a/tests/data/test1 ++++ b/tests/data/test1 +@@ -9,7 +9,7 @@ HTTP GET + # + # Server-side + +- ++ + HTTP/1.1 200 OK + Date: Tue, 09 Nov 2010 14:49:00 GMT + Server: test-server/fake +@@ -42,12 +42,12 @@ http://%HOSTIP:%HTTPPORT/%TESTNUMBER + # + # Verify data after the test has been "shot" + +- +-GET /%TESTNUMBER HTTP/1.1 +-Host: %HOSTIP:%HTTPPORT +-User-Agent: curl/%VERSION +-Accept: */* +- ++ ++GET /%TESTNUMBER HTTP/1.1 ++Host: %HOSTIP:%HTTPPORT ++User-Agent: curl/%VERSION ++Accept: */* ++ + + + +--- a/tests/runtests.pl ++++ b/tests/runtests.pl +@@ -3448,7 +3448,13 @@ sub subBase64 { + + my $prevupdate; + sub subNewlines { +- my ($thing) = @_; ++ my ($force, $thing) = @_; ++ ++ if($force) { ++ # enforce CRLF newline ++ $$thing =~ s/\x0d*\x0a/\x0d\x0a/; ++ return; ++ } + + # When curl is built with Hyper, it gets all response headers delivered as + # name/value pairs and curl "invents" the newlines when it saves the +@@ -3462,7 +3468,7 @@ sub subNewlines { + # skip curl error messages + ($$thing !~ /^curl: \(\d+\) /))) { + # enforce CRLF newline +- $$thing =~ s/\x0a/\x0d\x0a/; ++ $$thing =~ s/\x0d*\x0a/\x0d\x0a/; + $prevupdate = 1; + } + else { +@@ -3534,6 +3540,7 @@ sub prepro { + my (@entiretest) = @_; + my $show = 1; + my @out; ++ my $data_crlf; + for my $s (@entiretest) { + my $f = $s; + if($s =~ /^ *%if (.*)/) { +@@ -3557,10 +3564,19 @@ sub prepro { + next; + } + if($show) { ++ # The processor does CRLF replacements in the sections if ++ # necessary since those parts might be read by separate servers. ++ if($s =~ /^ */) { ++ if($1 =~ /crlf="yes"/ || $has_hyper) { ++ $data_crlf = 1; ++ } ++ } ++ elsif(($s =~ /^ *<\/data/) && $data_crlf) { ++ $data_crlf = 0; ++ } + subVariables(\$s, $testnum, "%"); + subBase64(\$s); +- subNewlines(\$s) if($has_hyper && ($keywords{"HTTP"} || +- $keywords{"HTTPS"})); ++ subNewlines(0, \$s) if($data_crlf); + push @out, $s; + } + } +@@ -3874,6 +3890,11 @@ sub singletest { + # of the datacheck + chomp($replycheckpart[$#replycheckpart]); + } ++ if($replycheckpartattr{'crlf'} || ++ ($has_hyper && ($keywords{"HTTP"} ++ || $keywords{"HTTPS"}))) { ++ map subNewlines(0, \$_), @replycheckpart; ++ } + push(@reply, @replycheckpart); + } + } +@@ -3888,6 +3909,11 @@ sub singletest { + map s/\r\n/\n/g, @reply; + map s/\n/\r\n/g, @reply; + } ++ if($replyattr{'crlf'} || ++ ($has_hyper && ($keywords{"HTTP"} ++ || $keywords{"HTTPS"}))) { ++ map subNewlines(0, \$_), @reply; ++ } + } + + # this is the valid protocol blurb curl should generate +@@ -4329,6 +4355,12 @@ sub singletest { + chomp($validstdout[$#validstdout]); + } + ++ if($hash{'crlf'} || ++ ($has_hyper && ($keywords{"HTTP"} ++ || $keywords{"HTTPS"}))) { ++ map subNewlines(0, \$_), @validstdout; ++ } ++ + $res = compare($testnum, $testname, "stdout", \@actual, \@validstdout); + if($res) { + return $errorreturncode; +@@ -4429,6 +4461,10 @@ sub singletest { + } + } + ++ if($hash{'crlf'}) { ++ map subNewlines(1, \$_), @protstrip; ++ } ++ + if((!$out[0] || ($out[0] eq "")) && $protstrip[0]) { + logmsg "\n $testnum: protocol FAILED!\n". + " There was no content at all in the file $SERVERIN.\n". +@@ -4565,6 +4601,11 @@ sub singletest { + map s/\r\n/\n/g, @outfile; + map s/\n/\r\n/g, @outfile; + } ++ if($hash{'crlf'} || ++ ($has_hyper && ($keywords{"HTTP"} ++ || $keywords{"HTTPS"}))) { ++ map subNewlines(0, \$_), @outfile; ++ } + + my $strip; + for $strip (@stripfile) { diff -Nru curl-7.81.0/debian/patches/series curl-7.81.0/debian/patches/series --- curl-7.81.0/debian/patches/series 2023-01-04 14:53:02.000000000 +0000 +++ curl-7.81.0/debian/patches/series 2023-02-15 13:20:05.000000000 +0000 @@ -22,6 +22,13 @@ CVE-2022-42916.patch CVE-2022-43551.patch CVE-2022-43552.patch +CVE-2023-23914_5-1.patch +CVE-2023-23914_5-2.patch +CVE-2023-23914_5-3.patch +CVE-2023-23914_5-4.patch +CVE-2023-23914_5-5.patch +CVE-2023-23916-pre1.patch +CVE-2023-23916.patch # do not add patches below 90_gnutls.patch