diff -u -r -N squid-6.3/ChangeLog squid-6.4/ChangeLog
--- squid-6.3/ChangeLog	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/ChangeLog	2023-10-21 11:40:41.000000000 +1300
@@ -1,3 +1,21 @@
+Changes in squid-6.4 (22 Oct 2023):
+
+	- Regression: Restore support for legacy cache_object cache manager requests
+	- Regression: Do not use static initialization to register modules
+	- Bug 5301: cachemgr.cgi not showing new manager interface URLs
+	- Bug 5300: cachemgr.cgi assertion
+	- Fix stack buffer overflow when parsing Digest Authorization
+	- Fix userinfo percent-encoding
+	- Fix store_client caller memory leak on certain errors
+	- Fix validation of certificates with CN=*
+	- Fix handling of large stored response headers
+	- Miss if a HTTP/304 update would exceed reply_header_max_size
+	- RFC 9112: Improve HTTP chunked encoding compliance
+	- HTTP: Improve handling of empty lines received prior to request-line
+	- Y2038: improve printing of time settings
+	- Extend cache_log_message to problematic from-helper annotations
+	- ... and several Continuous Integration improvements
+
 Changes in squid-6.3 (03 Sep 2023):
 
 	- Bug 5294: ERR_CANNOT_FORWARD returned instead of ERR_DNS_FAIL
diff -u -r -N squid-6.3/configure squid-6.4/configure
--- squid-6.3/configure	2023-09-03 20:33:34.000000000 +1200
+++ squid-6.4/configure	2023-10-22 01:43:00.000000000 +1300
@@ -1,7 +1,7 @@
 #! /bin/sh
 # From configure.ac Revision.
 # Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.71 for Squid Web Proxy 6.3.
+# Generated by GNU Autoconf 2.71 for Squid Web Proxy 6.4.
 #
 # Report bugs to <https://bugs.squid-cache.org/>.
 #
@@ -626,8 +626,8 @@
 # Identity of this package.
 PACKAGE_NAME='Squid Web Proxy'
 PACKAGE_TARNAME='squid'
-PACKAGE_VERSION='6.3'
-PACKAGE_STRING='Squid Web Proxy 6.3'
+PACKAGE_VERSION='6.4'
+PACKAGE_STRING='Squid Web Proxy 6.4'
 PACKAGE_BUGREPORT='https://bugs.squid-cache.org/'
 PACKAGE_URL=''
 
@@ -1696,7 +1696,7 @@
   # Omit some internal or obsolete options to make the list less imposing.
   # This message is too long to be a string in the A/UX 3.1 sh.
   cat <<_ACEOF
-\`configure' configures Squid Web Proxy 6.3 to adapt to many kinds of systems.
+\`configure' configures Squid Web Proxy 6.4 to adapt to many kinds of systems.
 
 Usage: $0 [OPTION]... [VAR=VALUE]...
 
@@ -1767,7 +1767,7 @@
 
 if test -n "$ac_init_help"; then
   case $ac_init_help in
-     short | recursive ) echo "Configuration of Squid Web Proxy 6.3:";;
+     short | recursive ) echo "Configuration of Squid Web Proxy 6.4:";;
    esac
   cat <<\_ACEOF
 
@@ -2187,7 +2187,7 @@
 test -n "$ac_init_help" && exit $ac_status
 if $ac_init_version; then
   cat <<\_ACEOF
-Squid Web Proxy configure 6.3
+Squid Web Proxy configure 6.4
 generated by GNU Autoconf 2.71
 
 Copyright (C) 2021 Free Software Foundation, Inc.
@@ -3200,7 +3200,7 @@
 This file contains any messages produced by compilers while
 running configure, to aid debugging if configure makes a mistake.
 
-It was created by Squid Web Proxy $as_me 6.3, which was
+It was created by Squid Web Proxy $as_me 6.4, which was
 generated by GNU Autoconf 2.71.  Invocation command line was
 
   $ $0$ac_configure_args_raw
@@ -4692,7 +4692,7 @@
 
 # Define the identity of the package.
  PACKAGE='squid'
- VERSION='6.3'
+ VERSION='6.4'
 
 
 printf "%s\n" "#define PACKAGE \"$PACKAGE\"" >>confdefs.h
@@ -56854,7 +56854,7 @@
 # report actual input values of CONFIG_FILES etc. instead of their
 # values after options handling.
 ac_log="
-This file was extended by Squid Web Proxy $as_me 6.3, which was
+This file was extended by Squid Web Proxy $as_me 6.4, which was
 generated by GNU Autoconf 2.71.  Invocation command line was
 
   CONFIG_FILES    = $CONFIG_FILES
@@ -56922,7 +56922,7 @@
 cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
 ac_cs_config='$ac_cs_config_escaped'
 ac_cs_version="\\
-Squid Web Proxy config.status 6.3
+Squid Web Proxy config.status 6.4
 configured by $0, generated by GNU Autoconf 2.71,
   with options \\"\$ac_cs_config\\"
 
diff -u -r -N squid-6.3/configure.ac squid-6.4/configure.ac
--- squid-6.3/configure.ac	2023-09-03 20:33:34.000000000 +1200
+++ squid-6.4/configure.ac	2023-10-22 01:43:00.000000000 +1300
@@ -5,7 +5,7 @@
 ## Please see the COPYING and CONTRIBUTORS files for details.
 ##
 
-AC_INIT([Squid Web Proxy],[6.3],[https://bugs.squid-cache.org/],[squid])
+AC_INIT([Squid Web Proxy],[6.4],[https://bugs.squid-cache.org/],[squid])
 AC_PREREQ(2.61)
 AC_CONFIG_HEADERS([include/autoconf.h])
 AC_CONFIG_AUX_DIR(cfgaux)
diff -u -r -N squid-6.3/CONTRIBUTORS squid-6.4/CONTRIBUTORS
--- squid-6.3/CONTRIBUTORS	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/CONTRIBUTORS	2023-10-21 11:40:41.000000000 +1300
@@ -10,6 +10,7 @@
     Alan Mizrahi <alan@mizrahi.com.ve>
     Alan Nastac <mrness@gentoo.org>
     Aleksa <susulic@gmail.com>
+    Alex Bason <nonsleepr@gmail.com>
     Alex Dowad <alexinbeijing@gmail.com>
     Alex Rousskov <rousskov@measurement-factory.com>
     Alex Wu <alex_wu2012@hotmail.com>
diff -u -r -N squid-6.3/doc/debug-messages.dox squid-6.4/doc/debug-messages.dox
--- squid-6.3/doc/debug-messages.dox	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/doc/debug-messages.dox	2023-10-21 11:40:41.000000000 +1300
@@ -66,5 +66,6 @@
 66 WARNING: BCP 177 violation. Detected non-functional IPv6 loopback.
 67 WARNING: BCP 177 violation. IPv6 transport forced OFF by build parameters.
 68 Processing Configuration File: ... (depth ...)
+69 WARNING: Unsupported or unexpected from-helper annotation with a name reserved for Squid use: ...advice: If this is a custom annotation, rename it to add a trailing underscore: ...
 \endverbatim
 */
diff -u -r -N squid-6.3/doc/release-notes/release-6.html squid-6.4/doc/release-notes/release-6.html
--- squid-6.3/doc/release-notes/release-6.html	2023-09-03 20:37:47.000000000 +1200
+++ squid-6.4/doc/release-notes/release-6.html	2023-10-22 01:47:10.000000000 +1300
@@ -3,10 +3,10 @@
 <HEAD>
  <META NAME="GENERATOR" CONTENT="LinuxDoc-Tools 0.9.83">
  <meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
- <TITLE>Squid 6.3 release notes</TITLE>
+ <TITLE>Squid 6.4 release notes</TITLE>
 </HEAD>
 <BODY>
-<H1>Squid 6.3 release notes</H1>
+<H1>Squid 6.4 release notes</H1>
 
 <H2>Squid Developers</H2>
 <P>
@@ -59,7 +59,7 @@
 <HR>
 <H2><A NAME="s1">1.</A> <A HREF="#toc1">Notice</A></H2>
 
-<P>The Squid Team are pleased to announce the release of Squid-6.3 for testing.</P>
+<P>The Squid Team are pleased to announce the release of Squid-6.4 for testing.</P>
 <P>This new release is available for download from 
 <A HREF="http://www.squid-cache.org/Versions/v6/">http://www.squid-cache.org/Versions/v6/</A> or the
 <A HREF="http://www.squid-cache.org/Download/http-mirrors.html">mirrors</A>.</P>
diff -u -r -N squid-6.3/include/version.h squid-6.4/include/version.h
--- squid-6.3/include/version.h	2023-09-03 20:33:34.000000000 +1200
+++ squid-6.4/include/version.h	2023-10-22 01:43:00.000000000 +1300
@@ -10,7 +10,7 @@
 #define SQUID_VERSION_H
 
 #ifndef SQUID_RELEASE_TIME
-#define SQUID_RELEASE_TIME 1693730003
+#define SQUID_RELEASE_TIME 1697892169
 #endif
 
 /*
diff -u -r -N squid-6.3/RELEASENOTES.html squid-6.4/RELEASENOTES.html
--- squid-6.3/RELEASENOTES.html	2023-09-03 20:37:47.000000000 +1200
+++ squid-6.4/RELEASENOTES.html	2023-10-22 01:47:10.000000000 +1300
@@ -3,10 +3,10 @@
 <HEAD>
  <META NAME="GENERATOR" CONTENT="LinuxDoc-Tools 0.9.83">
  <meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
- <TITLE>Squid 6.3 release notes</TITLE>
+ <TITLE>Squid 6.4 release notes</TITLE>
 </HEAD>
 <BODY>
-<H1>Squid 6.3 release notes</H1>
+<H1>Squid 6.4 release notes</H1>
 
 <H2>Squid Developers</H2>
 <P>
@@ -59,7 +59,7 @@
 <HR>
 <H2><A NAME="s1">1.</A> <A HREF="#toc1">Notice</A></H2>
 
-<P>The Squid Team are pleased to announce the release of Squid-6.3 for testing.</P>
+<P>The Squid Team are pleased to announce the release of Squid-6.4 for testing.</P>
 <P>This new release is available for download from 
 <A HREF="http://www.squid-cache.org/Versions/v6/">http://www.squid-cache.org/Versions/v6/</A> or the
 <A HREF="http://www.squid-cache.org/Download/http-mirrors.html">mirrors</A>.</P>
diff -u -r -N squid-6.3/src/acl/Asn.cc squid-6.4/src/acl/Asn.cc
--- squid-6.3/src/acl/Asn.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/acl/Asn.cc	2023-10-21 11:40:41.000000000 +1300
@@ -16,22 +16,21 @@
 #include "acl/DestinationIp.h"
 #include "acl/SourceAsn.h"
 #include "acl/Strategised.h"
+#include "base/CharacterSet.h"
 #include "FwdState.h"
 #include "HttpReply.h"
 #include "HttpRequest.h"
 #include "ipcache.h"
 #include "MasterXaction.h"
 #include "mgr/Registration.h"
+#include "parser/Tokenizer.h"
 #include "radix.h"
 #include "RequestFlags.h"
+#include "sbuf/SBuf.h"
 #include "SquidConfig.h"
 #include "Store.h"
 #include "StoreClient.h"
 
-#ifndef AS_REQBUF_SZ
-#define AS_REQBUF_SZ    4096
-#endif
-
 /* BEGIN of definitions for radix tree entries */
 
 /* 32/128 bits address in memory with length */
@@ -71,9 +70,8 @@
     CBDATA_CLASS(ASState);
 
 public:
-    ASState() {
-        memset(reqbuf, 0, sizeof(reqbuf));
-    }
+    ASState() = default;
+
     ~ASState() {
         if (entry) {
             debugs(53, 3, entry->url());
@@ -87,10 +85,9 @@
     store_client *sc = nullptr;
     HttpRequest::Pointer request;
     int as_number = 0;
-    int64_t offset = 0;
-    int reqofs = 0;
-    char reqbuf[AS_REQBUF_SZ];
-    bool dataRead = false;
+
+    /// for receiving a WHOIS reply body from Store and interpreting it
+    Store::ParsingBuffer parsingBuffer;
 };
 
 CBDATA_CLASS_INIT(ASState);
@@ -103,7 +100,7 @@
     m_ADDR e_mask;
 };
 
-static int asnAddNet(char *, int);
+static int asnAddNet(const SBuf &, int);
 
 static void asnCacheStart(int as);
 
@@ -258,8 +255,7 @@
     xfree(asres);
 
     asState->entry = e;
-    StoreIOBuffer readBuffer (AS_REQBUF_SZ, asState->offset, asState->reqbuf);
-    storeClientCopy(asState->sc, e, readBuffer, asHandleReply, asState);
+    storeClientCopy(asState->sc, e, asState->parsingBuffer.makeInitialSpace(), asHandleReply, asState);
 }
 
 static void
@@ -267,13 +263,8 @@
 {
     ASState *asState = (ASState *)data;
     StoreEntry *e = asState->entry;
-    char *s;
-    char *t;
-    char *buf = asState->reqbuf;
-    int leftoversz = -1;
 
-    debugs(53, 3, "asHandleReply: Called with size=" << (unsigned int)result.length);
-    debugs(53, 3, "asHandleReply: buffer='" << buf << "'");
+    debugs(53, 3, result << " for " << asState->as_number << " with " << *e);
 
     /* First figure out whether we should abort the request */
 
@@ -282,11 +273,7 @@
         return;
     }
 
-    if (result.length == 0 && asState->dataRead) {
-        debugs(53, 3, "asHandleReply: Done: " << e->url());
-        delete asState;
-        return;
-    } else if (result.flags.error) {
+    if (result.flags.error) {
         debugs(53, DBG_IMPORTANT, "ERROR: asHandleReply: Called with Error set and size=" << (unsigned int) result.length);
         delete asState;
         return;
@@ -296,117 +283,77 @@
         return;
     }
 
-    /*
-     * Next, attempt to parse our request
-     * Remembering that the actual buffer size is retsize + reqofs!
-     */
-    s = buf;
-
-    while ((size_t)(s - buf) < result.length + asState->reqofs && *s != '\0') {
-        while (*s && xisspace(*s))
-            ++s;
-
-        for (t = s; *t; ++t) {
-            if (xisspace(*t))
-                break;
-        }
+    asState->parsingBuffer.appended(result.data, result.length);
+    Parser::Tokenizer tok(SBuf(asState->parsingBuffer.content().data, asState->parsingBuffer.contentSize()));
+    SBuf address;
+    // Word delimiters in WHOIS ASN replies. RFC 3912 mentions SP, CR, and LF.
+    // Others are added to mimic an earlier isspace()-based implementation.
+    static const auto WhoisSpaces = CharacterSet("ASCII_spaces", " \f\r\n\t\v");
+    while (tok.token(address, WhoisSpaces)) {
+        (void)asnAddNet(address, asState->as_number);
+    }
+    asState->parsingBuffer.consume(tok.parsedSize());
+    const auto leftoverBytes = asState->parsingBuffer.contentSize();
 
-        if (*t == '\0') {
-            /* oof, word should continue on next block */
-            break;
-        }
+    if (asState->sc->atEof()) {
+        if (leftoverBytes)
+            debugs(53, 2, "WHOIS: Discarding the last " << leftoverBytes << " received bytes of a truncated AS response");
+        delete asState;
+        return;
+    }
 
-        *t = '\0';
-        debugs(53, 3, "asHandleReply: AS# " << s << " (" << asState->as_number << ")");
-        asnAddNet(s, asState->as_number);
-        s = t + 1;
-        asState->dataRead = true;
-    }
-
-    /*
-     * Next, grab the end of the 'valid data' in the buffer, and figure
-     * out how much data is left in our buffer, which we need to keep
-     * around for the next request
-     */
-    leftoversz = (asState->reqofs + result.length) - (s - buf);
-
-    assert(leftoversz >= 0);
-
-    /*
-     * Next, copy the left over data, from s to s + leftoversz to the
-     * beginning of the buffer
-     */
-    memmove(buf, s, leftoversz);
-
-    /*
-     * Next, update our offset and reqofs, and kick off a copy if required
-     */
-    asState->offset += result.length;
-
-    asState->reqofs = leftoversz;
-
-    debugs(53, 3, "asState->offset = " << asState->offset);
-
-    if (e->store_status == STORE_PENDING) {
-        debugs(53, 3, "asHandleReply: store_status == STORE_PENDING: " << e->url()  );
-        StoreIOBuffer tempBuffer (AS_REQBUF_SZ - asState->reqofs,
-                                  asState->offset,
-                                  asState->reqbuf + asState->reqofs);
-        storeClientCopy(asState->sc,
-                        e,
-                        tempBuffer,
-                        asHandleReply,
-                        asState);
-    } else {
-        StoreIOBuffer tempBuffer;
-        debugs(53, 3, "asHandleReply: store complete, but data received " << e->url()  );
-        tempBuffer.offset = asState->offset;
-        tempBuffer.length = AS_REQBUF_SZ - asState->reqofs;
-        tempBuffer.data = asState->reqbuf + asState->reqofs;
-        storeClientCopy(asState->sc,
-                        e,
-                        tempBuffer,
-                        asHandleReply,
-                        asState);
+    const auto remainingSpace = asState->parsingBuffer.space().positionAt(result.offset + result.length);
+
+    if (!remainingSpace.length) {
+        Assure(leftoverBytes);
+        debugs(53, DBG_IMPORTANT, "WARNING: Ignoring the tail of a WHOIS AS response" <<
+               " with an unparsable section of " << leftoverBytes <<
+               " bytes ending at offset " << remainingSpace.offset);
+        delete asState;
+        return;
     }
+
+    const decltype(StoreIOBuffer::offset) stillReasonableOffset = 100000; // an arbitrary limit in bytes
+    if (remainingSpace.offset > stillReasonableOffset) {
+        // stop suspicious accumulation of parsed addresses and/or work
+        debugs(53, DBG_IMPORTANT, "WARNING: Ignoring the tail of a suspiciously large WHOIS AS response" <<
+               " exceeding " << stillReasonableOffset << " bytes");
+        delete asState;
+        return;
+    }
+
+    storeClientCopy(asState->sc, e, remainingSpace, asHandleReply, asState);
 }
 
 /**
  * add a network (addr, mask) to the radix tree, with matching AS number
  */
 static int
-asnAddNet(char *as_string, int as_number)
+asnAddNet(const SBuf &addressAndMask, const int as_number)
 {
     struct squid_radix_node *rn;
     CbDataList<int> **Tail = nullptr;
     CbDataList<int> *q = nullptr;
     as_info *asinfo = nullptr;
 
-    Ip::Address mask;
-    Ip::Address addr;
-    char *t;
-    int bitl;
-
-    t = strchr(as_string, '/');
-
-    if (t == nullptr) {
+    static const CharacterSet NonSlashSet = CharacterSet("slash", "/").complement("non-slash");
+    Parser::Tokenizer tok(addressAndMask);
+    SBuf addressToken;
+    if (!(tok.prefix(addressToken, NonSlashSet) && tok.skip('/'))) {
         debugs(53, 3, "asnAddNet: failed, invalid response from whois server.");
         return 0;
     }
-
-    *t = '\0';
-    addr = as_string;
-    bitl = atoi(t + 1);
-
-    if (bitl < 0)
-        bitl = 0;
+    const Ip::Address addr = addressToken.c_str();
 
     // INET6 TODO : find a better way of identifying the base IPA family for mask than this.
-    t = strchr(as_string, '.');
+    const auto addrFamily = (addressToken.find('.') != SBuf::npos) ? AF_INET : AF_INET6;
 
     // generate Netbits Format Mask
+    Ip::Address mask;
     mask.setNoAddr();
-    mask.applyMask(bitl, (t!=nullptr?AF_INET:AF_INET6) );
+    int64_t bitl = 0;
+    if (tok.int64(bitl, 10, false))
+        mask.applyMask(bitl, addrFamily);
 
     debugs(53, 3, "asnAddNet: called for " << addr << "/" << mask );
 
diff -u -r -N squid-6.3/src/acl/external/delayer/ext_delayer_acl.8 squid-6.4/src/acl/external/delayer/ext_delayer_acl.8
--- squid-6.3/src/acl/external/delayer/ext_delayer_acl.8	2023-09-03 20:37:50.000000000 +1200
+++ squid-6.4/src/acl/external/delayer/ext_delayer_acl.8	2023-10-22 01:47:13.000000000 +1300
@@ -133,7 +133,7 @@
 .\" ========================================================================
 .\"
 .IX Title "EXT_DELAYER_ACL 8"
-.TH EXT_DELAYER_ACL 8 "2023-09-03" "perl v5.36.0" "User Contributed Perl Documentation"
+.TH EXT_DELAYER_ACL 8 "2023-10-21" "perl v5.36.0" "User Contributed Perl Documentation"
 .\" For nroff, turn off justification.  Always turn off hyphenation; it makes
 .\" way too many mistakes in technical documents.
 .if n .ad l
diff -u -r -N squid-6.3/src/acl/external/eDirectory_userip/ext_edirectory_userip_acl.cc squid-6.4/src/acl/external/eDirectory_userip/ext_edirectory_userip_acl.cc
--- squid-6.3/src/acl/external/eDirectory_userip/ext_edirectory_userip_acl.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/acl/external/eDirectory_userip/ext_edirectory_userip_acl.cc	2023-10-21 11:40:41.000000000 +1300
@@ -1555,7 +1555,7 @@
         /* BINARY DEBUGGING *
                     local_printfx("while() -> bufa[%" PRIuSIZE "]: %s", k, bufa);
                     for (i = 0; i < k; ++i)
-                      local_printfx("%02X", bufa[i]);
+                      local_printfx("%02X", static_cast<unsigned int>(static_cast<unsigned char>(bufa[i])));
                     local_printfx("\n");
         * BINARY DEBUGGING */
         /* Check for CRLF */
diff -u -r -N squid-6.3/src/acl/external/kerberos_sid_group/ext_kerberos_sid_group_acl.8 squid-6.4/src/acl/external/kerberos_sid_group/ext_kerberos_sid_group_acl.8
--- squid-6.3/src/acl/external/kerberos_sid_group/ext_kerberos_sid_group_acl.8	2023-09-03 20:37:51.000000000 +1200
+++ squid-6.4/src/acl/external/kerberos_sid_group/ext_kerberos_sid_group_acl.8	2023-10-22 01:47:13.000000000 +1300
@@ -133,7 +133,7 @@
 .\" ========================================================================
 .\"
 .IX Title "EXT_KERBEROS_SID_GROUP_ACL 8"
-.TH EXT_KERBEROS_SID_GROUP_ACL 8 "2023-09-03" "perl v5.36.0" "User Contributed Perl Documentation"
+.TH EXT_KERBEROS_SID_GROUP_ACL 8 "2023-10-21" "perl v5.36.0" "User Contributed Perl Documentation"
 .\" For nroff, turn off justification.  Always turn off hyphenation; it makes
 .\" way too many mistakes in technical documents.
 .if n .ad l
diff -u -r -N squid-6.3/src/acl/external/SQL_session/ext_sql_session_acl.8 squid-6.4/src/acl/external/SQL_session/ext_sql_session_acl.8
--- squid-6.3/src/acl/external/SQL_session/ext_sql_session_acl.8	2023-09-03 20:37:51.000000000 +1200
+++ squid-6.4/src/acl/external/SQL_session/ext_sql_session_acl.8	2023-10-22 01:47:14.000000000 +1300
@@ -133,7 +133,7 @@
 .\" ========================================================================
 .\"
 .IX Title "EXT_SQL_SESSION_ACL 8"
-.TH EXT_SQL_SESSION_ACL 8 "2023-09-03" "perl v5.36.0" "User Contributed Perl Documentation"
+.TH EXT_SQL_SESSION_ACL 8 "2023-10-21" "perl v5.36.0" "User Contributed Perl Documentation"
 .\" For nroff, turn off justification.  Always turn off hyphenation; it makes
 .\" way too many mistakes in technical documents.
 .if n .ad l
diff -u -r -N squid-6.3/src/acl/external/wbinfo_group/ext_wbinfo_group_acl.8 squid-6.4/src/acl/external/wbinfo_group/ext_wbinfo_group_acl.8
--- squid-6.3/src/acl/external/wbinfo_group/ext_wbinfo_group_acl.8	2023-09-03 20:37:51.000000000 +1200
+++ squid-6.4/src/acl/external/wbinfo_group/ext_wbinfo_group_acl.8	2023-10-22 01:47:14.000000000 +1300
@@ -133,7 +133,7 @@
 .\" ========================================================================
 .\"
 .IX Title "EXT_WBINFO_GROUP_ACL 8"
-.TH EXT_WBINFO_GROUP_ACL 8 "2023-09-03" "perl v5.36.0" "User Contributed Perl Documentation"
+.TH EXT_WBINFO_GROUP_ACL 8 "2023-10-21" "perl v5.36.0" "User Contributed Perl Documentation"
 .\" For nroff, turn off justification.  Always turn off hyphenation; it makes
 .\" way too many mistakes in technical documents.
 .if n .ad l
diff -u -r -N squid-6.3/src/anyp/Uri.cc squid-6.4/src/anyp/Uri.cc
--- squid-6.3/src/anyp/Uri.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/anyp/Uri.cc	2023-10-21 11:40:41.000000000 +1300
@@ -71,7 +71,7 @@
     while (!tk.atEnd()) {
         // TODO: Add Tokenizer::parseOne(void).
         const auto ch = tk.remaining()[0];
-        output.appendf("%%%02X", static_cast<unsigned int>(ch)); // TODO: Optimize using a table
+        output.appendf("%%%02X", static_cast<unsigned int>(static_cast<unsigned char>(ch))); // TODO: Optimize using a table
         (void)tk.skip(ch);
 
         if (tk.prefix(goodSection, ignore))
@@ -174,6 +174,10 @@
     assert(0 == matchDomainName("*.foo.com", ".foo.com", mdnHonorWildcards));
     assert(0 != matchDomainName("*.foo.com", "foo.com", mdnHonorWildcards));
 
+    assert(0 != matchDomainName("foo.com", ""));
+    assert(0 != matchDomainName("foo.com", "", mdnHonorWildcards));
+    assert(0 != matchDomainName("foo.com", "", mdnRejectSubsubDomains));
+
     /* more cases? */
 }
 
@@ -748,6 +752,8 @@
         return -1;
 
     dl = strlen(d);
+    if (dl == 0)
+        return 1;
 
     /*
      * Start at the ends of the two strings and work towards the
diff -u -r -N squid-6.3/src/auth/basic/DB/basic_db_auth.8 squid-6.4/src/auth/basic/DB/basic_db_auth.8
--- squid-6.3/src/auth/basic/DB/basic_db_auth.8	2023-09-03 20:37:52.000000000 +1200
+++ squid-6.4/src/auth/basic/DB/basic_db_auth.8	2023-10-22 01:47:15.000000000 +1300
@@ -133,7 +133,7 @@
 .\" ========================================================================
 .\"
 .IX Title "BASIC_DB_AUTH 8"
-.TH BASIC_DB_AUTH 8 "2023-09-03" "perl v5.36.0" "User Contributed Perl Documentation"
+.TH BASIC_DB_AUTH 8 "2023-10-21" "perl v5.36.0" "User Contributed Perl Documentation"
 .\" For nroff, turn off justification.  Always turn off hyphenation; it makes
 .\" way too many mistakes in technical documents.
 .if n .ad l
diff -u -r -N squid-6.3/src/auth/basic/POP3/basic_pop3_auth.8 squid-6.4/src/auth/basic/POP3/basic_pop3_auth.8
--- squid-6.3/src/auth/basic/POP3/basic_pop3_auth.8	2023-09-03 20:37:52.000000000 +1200
+++ squid-6.4/src/auth/basic/POP3/basic_pop3_auth.8	2023-10-22 01:47:15.000000000 +1300
@@ -133,7 +133,7 @@
 .\" ========================================================================
 .\"
 .IX Title "BASIC_POP3_AUTH 8"
-.TH BASIC_POP3_AUTH 8 "2023-09-03" "perl v5.36.0" "User Contributed Perl Documentation"
+.TH BASIC_POP3_AUTH 8 "2023-10-21" "perl v5.36.0" "User Contributed Perl Documentation"
 .\" For nroff, turn off justification.  Always turn off hyphenation; it makes
 .\" way too many mistakes in technical documents.
 .if n .ad l
diff -u -r -N squid-6.3/src/auth/digest/Config.cc squid-6.4/src/auth/digest/Config.cc
--- squid-6.3/src/auth/digest/Config.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/auth/digest/Config.cc	2023-10-21 11:40:41.000000000 +1300
@@ -827,11 +827,15 @@
             break;
 
         case DIGEST_NC:
-            if (value.size() != 8) {
+            if (value.size() == 8) {
+                // for historical reasons, the nc value MUST be exactly 8 bytes
+                static_assert(sizeof(digest_request->nc) == 8 + 1);
+                xstrncpy(digest_request->nc, value.rawBuf(), value.size() + 1);
+                debugs(29, 9, "Found noncecount '" << digest_request->nc << "'");
+            } else {
                 debugs(29, 9, "Invalid nc '" << value << "' in '" << temp << "'");
+                digest_request->nc[0] = 0;
             }
-            xstrncpy(digest_request->nc, value.rawBuf(), value.size() + 1);
-            debugs(29, 9, "Found noncecount '" << digest_request->nc << "'");
             break;
 
         case DIGEST_CNONCE:
diff -u -r -N squid-6.3/src/auth/ntlm/Scheme.cc squid-6.4/src/auth/ntlm/Scheme.cc
--- squid-6.3/src/auth/ntlm/Scheme.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/auth/ntlm/Scheme.cc	2023-10-21 11:40:41.000000000 +1300
@@ -23,7 +23,8 @@
         debugs(29, 2, "Initialized Authentication Scheme '" << type << "'");
     }
 };
-RunnerRegistrationEntry(NtlmAuthRr);
+
+DefineRunnerRegistrator(NtlmAuthRr);
 
 Auth::Scheme::Pointer
 Auth::Ntlm::Scheme::GetInstance()
diff -u -r -N squid-6.3/src/base/RunnersRegistry.cc squid-6.4/src/base/RunnersRegistry.cc
--- squid-6.3/src/base/RunnersRegistry.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/base/RunnersRegistry.cc	2023-10-21 11:40:41.000000000 +1300
@@ -108,9 +108,3 @@
     // else do nothing past finishShutdown
 }
 
-bool
-UseThisStatic(const void *)
-{
-    return true;
-}
-
diff -u -r -N squid-6.3/src/base/RunnersRegistry.h squid-6.4/src/base/RunnersRegistry.h
--- squid-6.3/src/base/RunnersRegistry.h	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/base/RunnersRegistry.h	2023-10-21 11:40:41.000000000 +1300
@@ -118,14 +118,46 @@
     debugs(1, 2, "running " # m); \
     RunRegistered(&m)
 
-/// convenience function to "use" an otherwise unreferenced static variable
-bool UseThisStatic(const void *);
+/// helps DefineRunnerRegistrator() and CallRunnerRegistrator() (and their
+/// namespace-sensitive variants) to use the same registration function name
+#define NameRunnerRegistrator_(Namespace, ClassName) \
+    Register ## ClassName ## In ## Namespace()
 
-/// convenience macro: register one RegisteredRunner kid as early as possible
-#define RunnerRegistrationEntry(Who) \
-    static const bool Who ## _Registered_ = \
-        RegisterRunner(new Who) > 0 && \
-        UseThisStatic(& Who ## _Registered_);
+/// helper macro that implements DefineRunnerRegistrator() and
+/// DefineRunnerRegistratorIn() using supplied constructor expression
+#define DefineRunnerRegistrator_(Namespace, ClassName, Constructor) \
+    void NameRunnerRegistrator_(Namespace, ClassName); \
+    void NameRunnerRegistrator_(Namespace, ClassName) { \
+        const auto registered = RegisterRunner(new Constructor); \
+        assert(registered); \
+    }
+
+/// Define registration code for the given RegisteredRunner-derived class known
+/// as ClassName in Namespace. A matching CallRunnerRegistratorIn() call should
+/// run this code before any possible use of the being-registered module.
+/// \sa DefineRunnerRegistrator() for classes declared in the global namespace.
+#define DefineRunnerRegistratorIn(Namespace, ClassName) \
+    DefineRunnerRegistrator_(Namespace, ClassName, Namespace::ClassName)
+
+/// Define registration code for the given RegisteredRunner-derived class known
+/// as ClassName (in global namespace). A matching CallRunnerRegistrator() call
+/// should run this code before any possible use of the being-registered module.
+/// \sa DefineRunnerRegistratorIn() for classes declared in named namespaces.
+#define DefineRunnerRegistrator(ClassName) \
+    DefineRunnerRegistrator_(Global, ClassName, ClassName)
+
+/// Register one RegisteredRunner kid, known as ClassName in Namespace.
+/// \sa CallRunnerRegistrator() for classes declared in the global namespace.
+#define CallRunnerRegistratorIn(Namespace, ClassName) \
+    do { \
+        void NameRunnerRegistrator_(Namespace, ClassName); \
+        NameRunnerRegistrator_(Namespace, ClassName); \
+    } while (false)
+
+/// Register one RegisteredRunner kid, known as ClassName (in the global namespace).
+/// \sa CallRunnerRegistratorIn() for classes declared in named namespaces.
+#define CallRunnerRegistrator(ClassName) \
+    CallRunnerRegistratorIn(Global, ClassName)
 
 #endif /* SQUID_BASE_RUNNERSREGISTRY_H */
 
diff -u -r -N squid-6.3/src/cache_cf.cc squid-6.4/src/cache_cf.cc
--- squid-6.3/src/cache_cf.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/cache_cf.cc	2023-10-21 11:40:41.000000000 +1300
@@ -2948,7 +2948,8 @@
 static void
 dump_time_t(StoreEntry * entry, const char *name, time_t var)
 {
-    storeAppendPrintf(entry, "%s %d seconds\n", name, (int) var);
+    PackableStream os(*entry);
+    os << name << ' ' << var << " seconds\n";
 }
 
 void
@@ -2970,10 +2971,11 @@
 static void
 dump_time_msec(StoreEntry * entry, const char *name, time_msec_t var)
 {
+    PackableStream os(*entry);
     if (var % 1000)
-        storeAppendPrintf(entry, "%s %" PRId64 " milliseconds\n", name, var);
+        os << name << ' ' << var << " milliseconds\n";
     else
-        storeAppendPrintf(entry, "%s %d seconds\n", name, (int)(var/1000) );
+        os << name << ' ' << (var/1000) << " seconds\n";
 }
 
 static void
@@ -4374,7 +4376,7 @@
 
 Ssl::BumpMode sslBumpCfgRr::lastDeprecatedRule = Ssl::bumpEnd;
 
-RunnerRegistrationEntry(sslBumpCfgRr);
+DefineRunnerRegistrator(sslBumpCfgRr);
 
 void
 sslBumpCfgRr::finalizeConfig()
diff -u -r -N squid-6.3/src/cache_manager.cc squid-6.4/src/cache_manager.cc
--- squid-6.3/src/cache_manager.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/cache_manager.cc	2023-10-21 11:40:41.000000000 +1300
@@ -190,7 +190,7 @@
 {
     Parser::Tokenizer tok(uri.path());
 
-    Assure(tok.skip(WellKnownUrlPathPrefix()));
+    Assure(tok.skip(WellKnownUrlPathPrefix()) || tok.skip('/'));
 
     Mgr::Command::Pointer cmd = new Mgr::Command();
     cmd->params.httpUri = SBufToString(uri.absolute());
diff -u -r -N squid-6.3/src/client_db.cc squid-6.4/src/client_db.cc
--- squid-6.3/src/client_db.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/client_db.cc	2023-10-21 11:40:41.000000000 +1300
@@ -102,7 +102,7 @@
     /* RegisteredRunner API */
     void useConfig() override;
 };
-RunnerRegistrationEntry(ClientDbRr);
+DefineRunnerRegistrator(ClientDbRr);
 
 void
 ClientDbRr::useConfig()
diff -u -r -N squid-6.3/src/client_side_reply.cc squid-6.4/src/client_side_reply.cc
--- squid-6.3/src/client_side_reply.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/client_side_reply.cc	2023-10-21 11:40:41.000000000 +1300
@@ -33,6 +33,7 @@
 #include "refresh.h"
 #include "RequestFlags.h"
 #include "SquidConfig.h"
+#include "SquidMath.h"
 #include "Store.h"
 #include "StrList.h"
 #include "tools.h"
@@ -66,7 +67,6 @@
     /* old_entry might still be set if we didn't yet get the reply
      * code in HandleIMSReply() */
     removeStoreReference(&old_sc, &old_entry);
-    safe_free(tempBuffer.data);
     cbdataReferenceDone(http);
     HTTPMSGUNLOCK(reply);
 }
@@ -74,11 +74,7 @@
 clientReplyContext::clientReplyContext(ClientHttpRequest *clientContext) :
     purgeStatus(Http::scNone),
     http(cbdataReference(clientContext)),
-    headers_sz(0),
     sc(nullptr),
-    old_reqsize(0),
-    reqsize(0),
-    reqofs(0),
     ourNode(nullptr),
     reply(nullptr),
     old_entry(nullptr),
@@ -161,8 +157,6 @@
 #if USE_DELAY_POOLS
     sc->setDelayId(DelayId::DelayClient(http));
 #endif
-    reqofs = 0;
-    reqsize = 0;
     if (http->request)
         http->request->ignoreRange(reason);
     flags.storelogiccomplete = 1;
@@ -201,13 +195,9 @@
     old_sc = sc;
     old_lastmod = http->request->lastmod;
     old_etag = http->request->etag;
-    old_reqsize = reqsize;
-    tempBuffer.offset = reqofs;
     /* Prevent accessing the now saved entries */
     http->storeEntry(nullptr);
     sc = nullptr;
-    reqsize = 0;
-    reqofs = 0;
 }
 
 void
@@ -218,8 +208,6 @@
     removeClientStoreReference(&sc, http);
     http->storeEntry(old_entry);
     sc = old_sc;
-    reqsize = old_reqsize;
-    reqofs = tempBuffer.offset;
     http->request->lastmod = old_lastmod;
     http->request->etag = old_etag;
     /* Prevent accessed the old saved entries */
@@ -227,8 +215,6 @@
     old_sc = nullptr;
     old_lastmod = -1;
     old_etag.clean();
-    old_reqsize = 0;
-    tempBuffer.offset = 0;
 }
 
 void
@@ -245,18 +231,27 @@
     return (clientStreamNode *)ourNode->node.next->data;
 }
 
-/* This function is wrong - the client parameters don't include the
- * header offset
- */
+/// Request HTTP response headers from Store, to be sent to the given recipient.
+/// That recipient also gets zero, some, or all HTTP response body bytes (into
+/// next()->readBuffer).
 void
-clientReplyContext::triggerInitialStoreRead()
+clientReplyContext::triggerInitialStoreRead(STCB recipient)
 {
-    /* when confident, 0 becomes reqofs, and then this factors into
-     * startSendProcess
-     */
-    assert(reqofs == 0);
+    Assure(recipient != HandleIMSReply);
+    lastStreamBufferedBytes = StoreIOBuffer(); // storeClientCopy(next()->readBuffer) invalidates
     StoreIOBuffer localTempBuffer (next()->readBuffer.length, 0, next()->readBuffer.data);
-    storeClientCopy(sc, http->storeEntry(), localTempBuffer, SendMoreData, this);
+    ::storeClientCopy(sc, http->storeEntry(), localTempBuffer, recipient, this);
+}
+
+/// Request HTTP response body bytes from Store into next()->readBuffer. This
+/// method requests body bytes at readerBuffer.offset and, hence, it should only
+/// be called after we triggerInitialStoreRead() and get the requested HTTP
+/// response headers (using zero offset).
+void
+clientReplyContext::requestMoreBodyFromStore()
+{
+    lastStreamBufferedBytes = StoreIOBuffer(); // storeClientCopy(next()->readBuffer) invalidates
+    ::storeClientCopy(sc, http->storeEntry(), next()->readBuffer, SendMoreData, this);
 }
 
 /* there is an expired entry in the store.
@@ -363,30 +358,22 @@
     {
         /* start counting the length from 0 */
         StoreIOBuffer localTempBuffer(HTTP_REQBUF_SZ, 0, tempbuf);
-        storeClientCopy(sc, entry, localTempBuffer, HandleIMSReply, this);
+        // keep lastStreamBufferedBytes: tempbuf is not a Client Stream buffer
+        ::storeClientCopy(sc, entry, localTempBuffer, HandleIMSReply, this);
     }
 }
 
 void
-clientReplyContext::sendClientUpstreamResponse()
+clientReplyContext::sendClientUpstreamResponse(const StoreIOBuffer &upstreamResponse)
 {
-    StoreIOBuffer tempresult;
     removeStoreReference(&old_sc, &old_entry);
 
     if (collapsedRevalidation)
         http->storeEntry()->clearPublicKeyScope();
 
     /* here the data to send is the data we just received */
-    tempBuffer.offset = 0;
-    old_reqsize = 0;
-    /* sendMoreData tracks the offset as well.
-     * Force it back to zero */
-    reqofs = 0;
     assert(!EBIT_TEST(http->storeEntry()->flags, ENTRY_ABORTED));
-    /* TODO: provide sendMoreData with the ready parsed reply */
-    tempresult.length = reqsize;
-    tempresult.data = tempbuf;
-    sendMoreData(tempresult);
+    sendMoreData(upstreamResponse);
 }
 
 void
@@ -403,11 +390,9 @@
     restoreState();
     /* here the data to send is in the next nodes buffers already */
     assert(!EBIT_TEST(http->storeEntry()->flags, ENTRY_ABORTED));
-    /* sendMoreData tracks the offset as well.
-     * Force it back to zero */
-    reqofs = 0;
-    StoreIOBuffer tempresult (reqsize, reqofs, next()->readBuffer.data);
-    sendMoreData(tempresult);
+    Assure(matchesStreamBodyBuffer(lastStreamBufferedBytes));
+    Assure(!lastStreamBufferedBytes.offset);
+    sendMoreData(lastStreamBufferedBytes);
 }
 
 /* This is the workhorse of the HandleIMSReply callback.
@@ -416,16 +401,16 @@
  * IMS request to revalidate a stale entry.
  */
 void
-clientReplyContext::handleIMSReply(StoreIOBuffer result)
+clientReplyContext::handleIMSReply(const StoreIOBuffer result)
 {
     if (deleting)
         return;
 
-    debugs(88, 3, http->storeEntry()->url() << ", " << (long unsigned) result.length << " bytes");
-
     if (http->storeEntry() == nullptr)
         return;
 
+    debugs(88, 3, http->storeEntry()->url() << " got " << result);
+
     if (result.flags.error && !EBIT_TEST(http->storeEntry()->flags, ENTRY_ABORTED))
         return;
 
@@ -438,9 +423,6 @@
         return;
     }
 
-    /* update size of the request */
-    reqsize = result.length + reqofs;
-
     // request to origin was aborted
     if (EBIT_TEST(http->storeEntry()->flags, ENTRY_ABORTED)) {
         debugs(88, 3, "request to origin aborted '" << http->storeEntry()->url() << "', sending old entry to client");
@@ -458,18 +440,24 @@
 
     // origin replied 304
     if (status == Http::scNotModified) {
-        http->updateLoggingTags(LOG_TCP_REFRESH_UNMODIFIED);
-        http->request->flags.staleIfHit = false; // old_entry is no longer stale
-
         // TODO: The update may not be instantaneous. Should we wait for its
         // completion to avoid spawning too much client-disassociated work?
-        Store::Root().updateOnNotModified(old_entry, *http->storeEntry());
+        if (!Store::Root().updateOnNotModified(old_entry, *http->storeEntry())) {
+            old_entry->release(true);
+            restoreState();
+            http->updateLoggingTags(LOG_TCP_MISS);
+            processMiss();
+            return;
+        }
+
+        http->updateLoggingTags(LOG_TCP_REFRESH_UNMODIFIED);
+        http->request->flags.staleIfHit = false; // old_entry is no longer stale
 
         // if client sent IMS
         if (http->request->flags.ims && !old_entry->modifiedSince(http->request->ims, http->request->imslen)) {
             // forward the 304 from origin
             debugs(88, 3, "origin replied 304, revalidated existing entry and forwarding 304 to client");
-            sendClientUpstreamResponse();
+            sendClientUpstreamResponse(result);
             return;
         }
 
@@ -494,7 +482,7 @@
 
         http->updateLoggingTags(LOG_TCP_REFRESH_MODIFIED);
         debugs(88, 3, "origin replied " << status << ", forwarding to client");
-        sendClientUpstreamResponse();
+        sendClientUpstreamResponse(result);
         return;
     }
 
@@ -502,7 +490,7 @@
     if (http->request->flags.failOnValidationError) {
         http->updateLoggingTags(LOG_TCP_REFRESH_FAIL_ERR);
         debugs(88, 3, "origin replied with error " << status << ", forwarding to client due to fail_on_validation_err");
-        sendClientUpstreamResponse();
+        sendClientUpstreamResponse(result);
         return;
     }
 
@@ -515,13 +503,7 @@
 SQUIDCEXTERN CSR clientGetMoreData;
 SQUIDCEXTERN CSD clientReplyDetach;
 
-/**
- * clientReplyContext::cacheHit Should only be called until the HTTP reply headers
- * have been parsed.  Normally this should be a single call, but
- * it might take more than one.  As soon as we have the headers,
- * we hand off to clientSendMoreData, processExpired, or
- * processMiss.
- */
+/// \copydoc clientReplyContext::cacheHit()
 void
 clientReplyContext::CacheHit(void *data, StoreIOBuffer result)
 {
@@ -529,11 +511,11 @@
     context->cacheHit(result);
 }
 
-/**
- * Process a possible cache HIT.
- */
+/// Processes HTTP response headers received from Store on a suspected cache hit
+/// path. May be called several times (e.g., a Vary marker object hit followed
+/// by the corresponding variant hit).
 void
-clientReplyContext::cacheHit(StoreIOBuffer result)
+clientReplyContext::cacheHit(const StoreIOBuffer result)
 {
     /** Ignore if the HIT object is being deleted. */
     if (deleting) {
@@ -545,7 +527,7 @@
 
     HttpRequest *r = http->request;
 
-    debugs(88, 3, "clientCacheHit: " << http->uri << ", " << result.length << " bytes");
+    debugs(88, 3, http->uri << " got " << result);
 
     if (http->storeEntry() == nullptr) {
         debugs(88, 3, "clientCacheHit: request aborted");
@@ -569,20 +551,7 @@
         return;
     }
 
-    if (result.length == 0) {
-        debugs(88, 5, "store IO buffer has no content. MISS");
-        /* the store couldn't get enough data from the file for us to id the
-         * object
-         */
-        /* treat as a miss */
-        http->updateLoggingTags(LOG_TCP_MISS);
-        processMiss();
-        return;
-    }
-
     assert(!EBIT_TEST(e->flags, ENTRY_ABORTED));
-    /* update size of the request */
-    reqsize = result.length + reqofs;
 
     /*
      * Got the headers, now grok them
@@ -596,6 +565,8 @@
         return;
     }
 
+    noteStreamBufferredBytes(result);
+
     switch (varyEvaluateMatch(e, r)) {
 
     case VARY_NONE:
@@ -1018,16 +989,10 @@
 }
 
 void
-clientReplyContext::traceReply(clientStreamNode * node)
+clientReplyContext::traceReply()
 {
-    clientStreamNode *nextNode = (clientStreamNode *)node->node.next->data;
-    StoreIOBuffer localTempBuffer;
     createStoreEntry(http->request->method, RequestFlags());
-    localTempBuffer.offset = nextNode->readBuffer.offset + headers_sz;
-    localTempBuffer.length = nextNode->readBuffer.length;
-    localTempBuffer.data = nextNode->readBuffer.data;
-    storeClientCopy(sc, http->storeEntry(),
-                    localTempBuffer, SendMoreData, this);
+    triggerInitialStoreRead();
     http->storeEntry()->releaseRequest();
     http->storeEntry()->buffer();
     const HttpReplyPointer rep(new HttpReply);
@@ -1076,16 +1041,15 @@
 clientReplyContext::storeOKTransferDone() const
 {
     assert(http->storeEntry()->objectLen() >= 0);
+    const auto headers_sz = http->storeEntry()->mem().baseReply().hdr_sz;
     assert(http->storeEntry()->objectLen() >= headers_sz);
-    if (http->out.offset >= http->storeEntry()->objectLen() - headers_sz) {
-        debugs(88,3, "storeOKTransferDone " <<
-               " out.offset=" << http->out.offset <<
-               " objectLen()=" << http->storeEntry()->objectLen() <<
-               " headers_sz=" << headers_sz);
-        return 1;
-    }
-
-    return 0;
+    const auto done = http->out.offset >= http->storeEntry()->objectLen() - headers_sz;
+    const auto debugLevel = done ? 3 : 5;
+    debugs(88, debugLevel, done <<
+           " out.offset=" << http->out.offset <<
+           " objectLen()=" << http->storeEntry()->objectLen() <<
+           " headers_sz=" << headers_sz);
+    return done ? 1 : 0;
 }
 
 int
@@ -1098,8 +1062,7 @@
     assert(mem != nullptr);
     assert(http->request != nullptr);
 
-    /* mem->reply was wrong because it uses the UPSTREAM header length!!! */
-    if (headers_sz == 0)
+    if (mem->baseReply().pstate != Http::Message::psParsed)
         /* haven't found end of headers yet */
         return 0;
 
@@ -1120,16 +1083,12 @@
     if (expectedBodySize < 0)
         return 0;
 
-    const uint64_t expectedLength = expectedBodySize + http->out.headers_sz;
-
-    if (http->out.size < expectedLength)
-        return 0;
-    else {
-        debugs(88,3, "storeNotOKTransferDone " <<
-               " out.size=" << http->out.size <<
-               " expectedLength=" << expectedLength);
-        return 1;
-    }
+    const auto done = http->out.offset >= expectedBodySize;
+    const auto debugLevel = done ? 3 : 5;
+    debugs(88, debugLevel, done <<
+           " out.offset=" << http->out.offset <<
+           " expectedBodySize=" << expectedBodySize);
+    return done ? 1 : 0;
 }
 
 /* Preconditions:
@@ -1654,20 +1613,12 @@
     assert (context);
     assert(context->http == http);
 
-    clientStreamNode *next = ( clientStreamNode *)aNode->node.next->data;
-
     if (!context->ourNode)
         context->ourNode = aNode;
 
     /* no cbdatareference, this is only used once, and safely */
     if (context->flags.storelogiccomplete) {
-        StoreIOBuffer tempBuffer;
-        tempBuffer.offset = next->readBuffer.offset + context->headers_sz;
-        tempBuffer.length = next->readBuffer.length;
-        tempBuffer.data = next->readBuffer.data;
-
-        storeClientCopy(context->sc, http->storeEntry(),
-                        tempBuffer, clientReplyContext::SendMoreData, context);
+        context->requestMoreBodyFromStore();
         return;
     }
 
@@ -1680,7 +1631,7 @@
 
     if (context->http->request->method == Http::METHOD_TRACE) {
         if (context->http->request->header.getInt64(Http::HdrType::MAX_FORWARDS) == 0) {
-            context->traceReply(aNode);
+            context->traceReply();
             return;
         }
 
@@ -1710,7 +1661,6 @@
 #endif
 
         assert(http->loggingTags().oldType == LOG_TCP_HIT);
-        reqofs = 0;
         /* guarantee nothing has been sent yet! */
         assert(http->out.size == 0);
         assert(http->out.offset == 0);
@@ -1725,10 +1675,7 @@
             }
         }
 
-        localTempBuffer.offset = reqofs;
-        localTempBuffer.length = getNextNode()->readBuffer.length;
-        localTempBuffer.data = getNextNode()->readBuffer.data;
-        storeClientCopy(sc, http->storeEntry(), localTempBuffer, CacheHit, this);
+        triggerInitialStoreRead(CacheHit);
     } else {
         /* MISS CASE, http->loggingTags() are already set! */
         processMiss();
@@ -1763,12 +1710,11 @@
 }
 
 bool
-clientReplyContext::errorInStream(StoreIOBuffer const &result, size_t const &sizeToProcess)const
+clientReplyContext::errorInStream(const StoreIOBuffer &result) const
 {
     return /* aborted request */
         (http->storeEntry() && EBIT_TEST(http->storeEntry()->flags, ENTRY_ABORTED)) ||
-        /* Upstream read error */ (result.flags.error) ||
-        /* Upstream EOF */ (sizeToProcess == 0);
+        /* Upstream read error */ (result.flags.error);
 }
 
 void
@@ -1789,24 +1735,16 @@
 }
 
 void
-clientReplyContext::pushStreamData(StoreIOBuffer const &result, char *source)
+clientReplyContext::pushStreamData(const StoreIOBuffer &result)
 {
-    StoreIOBuffer localTempBuffer;
-
     if (result.length == 0) {
         debugs(88, 5, "clientReplyContext::pushStreamData: marking request as complete due to 0 length store result");
         flags.complete = 1;
     }
 
-    assert(result.offset - headers_sz == next()->readBuffer.offset);
-    localTempBuffer.offset = result.offset - headers_sz;
-    localTempBuffer.length = result.length;
-
-    if (localTempBuffer.length)
-        localTempBuffer.data = source;
-
+    assert(!result.length || result.offset == next()->readBuffer.offset);
     clientStreamCallback((clientStreamNode*)http->client_stream.head->data, http, nullptr,
-                         localTempBuffer);
+                         result);
 }
 
 clientStreamNode *
@@ -1893,7 +1831,6 @@
     if (http->loggingTags().oldType == LOG_TCP_DENIED ||
             http->loggingTags().oldType == LOG_TCP_DENIED_REPLY ||
             alwaysAllowResponse(reply->sline.status())) {
-        headers_sz = reply->hdr_sz;
         processReplyAccessResult(ACCESS_ALLOWED);
         return;
     }
@@ -1904,8 +1841,6 @@
         return;
     }
 
-    headers_sz = reply->hdr_sz;
-
     /** check for absent access controls (permit by default) */
     if (!Config.accessList.reply) {
         processReplyAccessResult(ACCESS_ALLOWED);
@@ -1959,11 +1894,9 @@
     /* Ok, the reply is allowed, */
     http->loggingEntry(http->storeEntry());
 
-    ssize_t body_size = reqofs - reply->hdr_sz;
-    if (body_size < 0) {
-        reqofs = reply->hdr_sz;
-        body_size = 0;
-    }
+    Assure(matchesStreamBodyBuffer(lastStreamBufferedBytes));
+    Assure(!lastStreamBufferedBytes.offset);
+    auto body_size = lastStreamBufferedBytes.length; // may be zero
 
     debugs(88, 3, "clientReplyContext::sendMoreData: Appending " <<
            (int) body_size << " bytes after " << reply->hdr_sz <<
@@ -1991,19 +1924,27 @@
     assert (!flags.headersSent);
     flags.headersSent = true;
 
+    // next()->readBuffer.offset may be positive for Range requests, but our
+    // localTempBuffer initialization code assumes that next()->readBuffer.data
+    // points to the response body at offset 0 because the first
+    // storeClientCopy() request always has offset 0 (i.e. our first Store
+    // request ignores next()->readBuffer.offset).
+    //
+    // XXX: We cannot fully check that assumption: readBuffer.offset field is
+    // often out of sync with the buffer content, and if some buggy code updates
+    // the buffer while we were waiting for the processReplyAccessResult()
+    // callback, we may not notice.
+
     StoreIOBuffer localTempBuffer;
-    char *buf = next()->readBuffer.data;
-    char *body_buf = buf + reply->hdr_sz;
+    const auto body_buf = next()->readBuffer.data;
 
     //Server side may disable ranges under some circumstances.
 
     if ((!http->request->range))
         next()->readBuffer.offset = 0;
 
-    body_buf -= next()->readBuffer.offset;
-
-    if (next()->readBuffer.offset != 0) {
-        if (next()->readBuffer.offset > body_size) {
+    if (next()->readBuffer.offset > 0) {
+        if (Less(body_size, next()->readBuffer.offset)) {
             /* Can't use any of the body we received. send nothing */
             localTempBuffer.length = 0;
             localTempBuffer.data = nullptr;
@@ -2016,7 +1957,6 @@
         localTempBuffer.data = body_buf;
     }
 
-    /* TODO??: move the data in the buffer back by the request header size */
     clientStreamCallback((clientStreamNode *)http->client_stream.head->data,
                          http, reply, localTempBuffer);
 
@@ -2029,6 +1969,8 @@
     if (deleting)
         return;
 
+    debugs(88, 5, http->uri << " got " << result);
+
     StoreEntry *entry = http->storeEntry();
 
     if (ConnStateData * conn = http->getConn()) {
@@ -2041,7 +1983,9 @@
             return;
         }
 
-        if (reqofs==0 && !http->loggingTags().isTcpHit()) {
+        if (!flags.headersSent && !http->loggingTags().isTcpHit()) {
+            // We get here twice if processReplyAccessResult() calls startError().
+            // TODO: Revise when we check/change QoS markings to reduce syscalls.
             if (Ip::Qos::TheConfig.isHitTosActive()) {
                 Ip::Qos::doTosLocalMiss(conn->clientConnection, http->request->hier.code);
             }
@@ -2055,21 +1999,9 @@
                " out.offset=" << http->out.offset);
     }
 
-    char *buf = next()->readBuffer.data;
-
-    if (buf != result.data) {
-        /* we've got to copy some data */
-        assert(result.length <= next()->readBuffer.length);
-        memcpy(buf, result.data, result.length);
-    }
-
     /* We've got the final data to start pushing... */
     flags.storelogiccomplete = 1;
 
-    reqofs += result.length;
-
-    assert(reqofs <= HTTP_REQBUF_SZ || flags.headersSent);
-
     assert(http->request != nullptr);
 
     /* ESI TODO: remove this assert once everything is stable */
@@ -2078,20 +2010,25 @@
 
     makeThisHead();
 
-    debugs(88, 5, "clientReplyContext::sendMoreData: " << http->uri << ", " <<
-           reqofs << " bytes (" << result.length <<
-           " new bytes)");
-
-    /* update size of the request */
-    reqsize = reqofs;
-
-    if (errorInStream(result, reqofs)) {
+    if (errorInStream(result)) {
         sendStreamError(result);
         return;
     }
 
+    if (!matchesStreamBodyBuffer(result)) {
+        // Subsequent processing expects response body bytes to be at the start
+        // of our Client Stream buffer. When given something else (e.g., bytes
+        // in our tempbuf), we copy and adjust to meet those expectations.
+        const auto &ourClientStreamsBuffer = next()->readBuffer;
+        assert(result.length <= ourClientStreamsBuffer.length);
+        memcpy(ourClientStreamsBuffer.data, result.data, result.length);
+        result.data = ourClientStreamsBuffer.data;
+    }
+
+    noteStreamBufferredBytes(result);
+
     if (flags.headersSent) {
-        pushStreamData (result, buf);
+        pushStreamData(result);
         return;
     }
 
@@ -2106,6 +2043,32 @@
     return;
 }
 
+/// Whether the given body area describes the start of our Client Stream buffer.
+/// An empty area does.
+bool
+clientReplyContext::matchesStreamBodyBuffer(const StoreIOBuffer &their) const
+{
+    // the answer is undefined for errors; they are not really "body buffers"
+    Assure(!their.flags.error);
+
+    if (!their.length)
+        return true; // an empty body area always matches our body area
+
+    if (their.data != next()->readBuffer.data) {
+        debugs(88, 7, "no: " << their << " vs. " << next()->readBuffer);
+        return false;
+    }
+
+    return true;
+}
+
+void
+clientReplyContext::noteStreamBufferredBytes(const StoreIOBuffer &result)
+{
+    Assure(matchesStreamBodyBuffer(result));
+    lastStreamBufferedBytes = result; // may be unchanged and/or zero-length
+}
+
 void
 clientReplyContext::fillChecklist(ACLFilledChecklist &checklist) const
 {
@@ -2152,13 +2115,6 @@
     sc->setDelayId(DelayId::DelayClient(http));
 #endif
 
-    reqofs = 0;
-
-    reqsize = 0;
-
-    /* I don't think this is actually needed! -- adrian */
-    /* http->reqbuf = http->norm_reqbuf; */
-    //    assert(http->reqbuf == http->norm_reqbuf);
     /* The next line is illegal because we don't know if the client stream
      * buffers have been set up
      */
diff -u -r -N squid-6.3/src/client_side_reply.h squid-6.4/src/client_side_reply.h
--- squid-6.3/src/client_side_reply.h	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/client_side_reply.h	2023-10-21 11:40:41.000000000 +1300
@@ -34,7 +34,6 @@
     void saveState();
     void restoreState();
     void purgeRequest ();
-    void sendClientUpstreamResponse();
     void doGetMoreData();
     void identifyStoreObject();
     void identifyFoundObject(StoreEntry *entry, const char *detail);
@@ -60,7 +59,7 @@
     void processExpired();
     clientStream_status_t replyStatus();
     void processMiss();
-    void traceReply(clientStreamNode * node);
+    void traceReply();
     const char *storeId() const { return (http->store_id.size() > 0 ? http->store_id.termedBuf() : http->uri); }
 
     Http::StatusCode purgeStatus;
@@ -69,16 +68,13 @@
     LogTags *loggingTags() const override;
 
     ClientHttpRequest *http;
-    /// Base reply header bytes received from Store.
-    /// Compatible with ClientHttpRequest::Out::offset.
-    /// Not to be confused with ClientHttpRequest::Out::headers_sz.
-    int headers_sz;
     store_client *sc;       /* The store_client we're using */
-    StoreIOBuffer tempBuffer;   /* For use in validating requests via IMS */
-    int old_reqsize;        /* ... again, for the buffer */
-    size_t reqsize;
-    size_t reqofs;
-    char tempbuf[HTTP_REQBUF_SZ];   ///< a temporary buffer if we need working storage
+
+    /// Buffer dedicated to receiving storeClientCopy() responses to generated
+    /// revalidation requests. These requests cannot use next()->readBuffer
+    /// because the latter keeps the contents of the stale HTTP response during
+    /// revalidation. sendClientOldEntry() uses that contents.
+    char tempbuf[HTTP_REQBUF_SZ];
 
     struct Flags {
         Flags() : storelogiccomplete(0), complete(0), headersSent(false) {}
@@ -95,9 +91,10 @@
 
     clientStreamNode *getNextNode() const;
     void makeThisHead();
-    bool errorInStream(StoreIOBuffer const &result, size_t const &sizeToProcess)const ;
+    bool errorInStream(const StoreIOBuffer &result) const;
+    bool matchesStreamBodyBuffer(const StoreIOBuffer &) const;
     void sendStreamError(StoreIOBuffer const &result);
-    void pushStreamData(StoreIOBuffer const &result, char *source);
+    void pushStreamData(const StoreIOBuffer &);
     clientStreamNode * next() const;
     HttpReply *reply;
     void processReplyAccess();
@@ -109,10 +106,12 @@
     int checkTransferDone();
     void processOnlyIfCachedMiss();
     bool processConditional();
+    void noteStreamBufferredBytes(const StoreIOBuffer &);
     void cacheHit(StoreIOBuffer result);
     void handleIMSReply(StoreIOBuffer result);
     void sendMoreData(StoreIOBuffer result);
-    void triggerInitialStoreRead();
+    void triggerInitialStoreRead(STCB = SendMoreData);
+    void requestMoreBodyFromStore();
     void sendClientOldEntry();
     void purgeAllCached();
     /// attempts to release the cached entry
@@ -129,14 +128,21 @@
     void sendPreconditionFailedError();
     void sendNotModified();
     void sendNotModifiedOrPreconditionFailedError();
+    void sendClientUpstreamResponse(const StoreIOBuffer &upstreamResponse);
+
+    /// Reduces a chance of an accidental direct storeClientCopy() call that
+    /// (should but) forgets to invalidate our lastStreamBufferedBytes. This
+    /// function is not defined; decltype() syntax prohibits "= delete", but
+    /// function usage will trigger deprecation warnings and linking errors.
+    static decltype(::storeClientCopy) storeClientCopy [[deprecated]];
 
     /// Classification of the initial Store lookup.
     /// This very first lookup happens without the Vary-driven key augmentation.
     /// TODO: Exclude internal Store match bans from the "mismatch" category.
     const char *firstStoreLookup_ = nullptr;
 
+    /* (stale) cache hit information preserved during IMS revalidation */
     StoreEntry *old_entry;
-    /* ... for entry to be validated */
     store_client *old_sc;
     time_t old_lastmod;
     String old_etag;
@@ -150,6 +156,12 @@
     } CollapsedRevalidation;
 
     CollapsedRevalidation collapsedRevalidation;
+
+    /// HTTP response body bytes stored in our Client Stream buffer (if any)
+    StoreIOBuffer lastStreamBufferedBytes;
+
+    // TODO: Remove after moving the meat of this function into a method.
+    friend CSR clientGetMoreData;
 };
 
 // TODO: move to SideAgent parent, when we have one
diff -u -r -N squid-6.3/src/client_side_request.h squid-6.4/src/client_side_request.h
--- squid-6.3/src/client_side_request.h	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/client_side_request.h	2023-10-21 11:40:41.000000000 +1300
@@ -147,7 +147,6 @@
         /// Response header and body bytes written to the client connection.
         uint64_t size = 0;
         /// Response header bytes written to the client connection.
-        /// Not to be confused with clientReplyContext::headers_sz.
         size_t headers_sz = 0;
     } out;
 
diff -u -r -N squid-6.3/src/clientStream.cc squid-6.4/src/clientStream.cc
--- squid-6.3/src/clientStream.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/clientStream.cc	2023-10-21 11:40:41.000000000 +1300
@@ -154,8 +154,7 @@
     assert(thisObject && http && thisObject->node.next);
     next = thisObject->next();
 
-    debugs(87, 3, "clientStreamCallback: Calling " << next->callback << " with cbdata " <<
-           next->data.getRaw() << " from node " << thisObject);
+    debugs(87, 3, thisObject << " gives " << next->data << ' ' << replyBuffer);
     next->callback(next, http, rep, replyBuffer);
 }
 
diff -u -r -N squid-6.3/src/CollapsedForwarding.cc squid-6.4/src/CollapsedForwarding.cc
--- squid-6.3/src/CollapsedForwarding.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/CollapsedForwarding.cc	2023-10-21 11:40:41.000000000 +1300
@@ -186,7 +186,7 @@
     Ipc::MultiQueue::Owner *owner;
 };
 
-RunnerRegistrationEntry(CollapsedForwardingRr);
+DefineRunnerRegistrator(CollapsedForwardingRr);
 
 void CollapsedForwardingRr::create()
 {
diff -u -r -N squid-6.3/src/debug/Messages.h squid-6.4/src/debug/Messages.h
--- squid-6.3/src/debug/Messages.h	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/debug/Messages.h	2023-10-21 11:40:41.000000000 +1300
@@ -61,7 +61,7 @@
 };
 
 /// The maximum used DebugMessage::id plus 1. Increase as you add new IDs.
-constexpr DebugMessageId DebugMessageIdUpperBound = 69;
+constexpr DebugMessageId DebugMessageIdUpperBound = 70;
 
 /// a collection of DebugMessage objects (with fast access by message IDs)
 class DebugMessages
diff -u -r -N squid-6.3/src/DiskIO/IpcIo/IpcIoFile.cc squid-6.4/src/DiskIO/IpcIo/IpcIoFile.cc
--- squid-6.3/src/DiskIO/IpcIo/IpcIoFile.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/DiskIO/IpcIo/IpcIoFile.cc	2023-10-21 11:40:41.000000000 +1300
@@ -1026,7 +1026,7 @@
     Ipc::FewToFewBiQueue::Owner *owner;
 };
 
-RunnerRegistrationEntry(IpcIoRr);
+DefineRunnerRegistrator(IpcIoRr);
 
 void
 IpcIoRr::claimMemoryNeeds()
diff -u -r -N squid-6.3/src/dns_internal.cc squid-6.4/src/dns_internal.cc
--- squid-6.3/src/dns_internal.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/dns_internal.cc	2023-10-21 11:40:41.000000000 +1300
@@ -210,10 +210,10 @@
     void endingShutdown() override;
 };
 
-RunnerRegistrationEntry(ConfigRr);
-
 } // namespace Dns
 
+DefineRunnerRegistratorIn(Dns, ConfigRr);
+
 struct _sp {
     char domain[NS_MAXDNAME];
     int queries;
diff -u -r -N squid-6.3/src/enums.h squid-6.4/src/enums.h
--- squid-6.3/src/enums.h	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/enums.h	2023-10-21 11:40:41.000000000 +1300
@@ -202,7 +202,6 @@
 typedef enum {
     DIGEST_READ_NONE,
     DIGEST_READ_REPLY,
-    DIGEST_READ_HEADERS,
     DIGEST_READ_CBLOCK,
     DIGEST_READ_MASK,
     DIGEST_READ_DONE
diff -u -r -N squid-6.3/src/esi/ExpatParser.cc squid-6.4/src/esi/ExpatParser.cc
--- squid-6.3/src/esi/ExpatParser.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/esi/ExpatParser.cc	2023-10-21 11:40:41.000000000 +1300
@@ -32,10 +32,10 @@
     std::unique_ptr<ESIParser::Register> registration;
 };
 
-RunnerRegistrationEntry(ExpatRr);
-
 }
 
+DefineRunnerRegistratorIn(Esi, ExpatRr);
+
 EsiParserDefinition(ESIExpatParser);
 
 ESIExpatParser::ESIExpatParser(ESIParserClient *aClient) : theClient (aClient)
diff -u -r -N squid-6.3/src/esi/Libxml2Parser.cc squid-6.4/src/esi/Libxml2Parser.cc
--- squid-6.3/src/esi/Libxml2Parser.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/esi/Libxml2Parser.cc	2023-10-21 11:40:41.000000000 +1300
@@ -36,10 +36,10 @@
     std::unique_ptr<ESIParser::Register> registration;
 };
 
-RunnerRegistrationEntry(Libxml2Rr);
-
 }
 
+DefineRunnerRegistratorIn(Esi, Libxml2Rr);
+
 // the global document that will store the resolved entity
 // definitions
 static htmlDocPtr entity_doc = nullptr;
diff -u -r -N squid-6.3/src/fs/rock/RockSwapDir.cc squid-6.4/src/fs/rock/RockSwapDir.cc
--- squid-6.3/src/fs/rock/RockSwapDir.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/fs/rock/RockSwapDir.cc	2023-10-21 11:40:41.000000000 +1300
@@ -1107,10 +1107,7 @@
     return map->hasReadableEntry(reinterpret_cast<const cache_key*>(e.key));
 }
 
-namespace Rock
-{
-RunnerRegistrationEntry(SwapDirRr);
-}
+DefineRunnerRegistratorIn(Rock, SwapDirRr);
 
 void Rock::SwapDirRr::create()
 {
diff -u -r -N squid-6.3/src/helper/Reply.cc squid-6.4/src/helper/Reply.cc
--- squid-6.3/src/helper/Reply.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/helper/Reply.cc	2023-10-21 11:40:41.000000000 +1300
@@ -10,6 +10,7 @@
 
 #include "squid.h"
 #include "ConfigParser.h"
+#include "debug/Messages.h"
 #include "debug/Stream.h"
 #include "helper.h"
 #include "helper/Reply.h"
@@ -197,7 +198,7 @@
     if (std::find(recognized.begin(), recognized.end(), key) != recognized.end())
         return; // a Squid-recognized key
 
-    debugs(84, DBG_IMPORTANT, "WARNING: Unsupported or unexpected from-helper annotation with a name reserved for Squid use: " <<
+    debugs(84, Important(69), "WARNING: Unsupported or unexpected from-helper annotation with a name reserved for Squid use: " <<
            key << '=' << value <<
            Debug::Extra << "advice: If this is a custom annotation, rename it to add a trailing underscore: " <<
            key << '_');
diff -u -r -N squid-6.3/src/http/one/Parser.cc squid-6.4/src/http/one/Parser.cc
--- squid-6.3/src/http/one/Parser.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/http/one/Parser.cc	2023-10-21 11:40:41.000000000 +1300
@@ -65,16 +65,10 @@
 void
 Http::One::Parser::skipLineTerminator(Tokenizer &tok) const
 {
-    if (tok.skip(Http1::CrLf()))
-        return;
-
     if (Config.onoff.relaxed_header_parser && tok.skipOne(CharacterSet::LF))
         return;
 
-    if (tok.atEnd() || (tok.remaining().length() == 1 && tok.remaining().at(0) == '\r'))
-        throw InsufficientInput();
-
-    throw TexcHere("garbage instead of CRLF line terminator");
+    tok.skipRequired("line-terminating CRLF", Http1::CrLf());
 }
 
 /// all characters except the LF line terminator
diff -u -r -N squid-6.3/src/http/one/Parser.h squid-6.4/src/http/one/Parser.h
--- squid-6.3/src/http/one/Parser.h	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/http/one/Parser.h	2023-10-21 11:40:41.000000000 +1300
@@ -124,9 +124,7 @@
      * detect and skip the CRLF or (if tolerant) LF line terminator
      * consume from the tokenizer.
      *
-     * \throws exception on bad or InsuffientInput.
-     * \retval true only if line terminator found.
-     * \retval false incomplete or missing line terminator, need more data.
+     * \throws exception on bad or InsufficientInput
      */
     void skipLineTerminator(Tokenizer &) const;
 
diff -u -r -N squid-6.3/src/http/one/RequestParser.cc squid-6.4/src/http/one/RequestParser.cc
--- squid-6.3/src/http/one/RequestParser.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/http/one/RequestParser.cc	2023-10-21 11:40:41.000000000 +1300
@@ -44,7 +44,8 @@
                    "Ignored due to relaxed_header_parser.");
         // Be tolerant of prefix empty lines
         // ie any series of either \n or \r\n with no other characters and no repeated \r
-        while (!buf_.isEmpty() && (buf_[0] == '\n' || (buf_[0] == '\r' && buf_[1] == '\n'))) {
+        while (!buf_.isEmpty() && (buf_[0] == '\n' ||
+                                   (buf_[0] == '\r' && buf_.length() > 1 && buf_[1] == '\n'))) {
             buf_.consume(1);
         }
     }
diff -u -r -N squid-6.3/src/http/one/TeChunkedParser.cc squid-6.4/src/http/one/TeChunkedParser.cc
--- squid-6.3/src/http/one/TeChunkedParser.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/http/one/TeChunkedParser.cc	2023-10-21 11:40:41.000000000 +1300
@@ -91,6 +91,11 @@
 {
     Must(theChunkSize <= 0); // Should(), really
 
+    static const SBuf bannedHexPrefixLower("0x");
+    static const SBuf bannedHexPrefixUpper("0X");
+    if (tok.skip(bannedHexPrefixLower) || tok.skip(bannedHexPrefixUpper))
+        throw TextException("chunk starts with 0x", Here());
+
     int64_t size = -1;
     if (tok.int64(size, 16, false) && !tok.atEnd()) {
         if (size < 0)
@@ -121,7 +126,7 @@
     // bad or insufficient input, like in the code below. TODO: Expand up.
     try {
         parseChunkExtensions(tok); // a possibly empty chunk-ext list
-        skipLineTerminator(tok);
+        tok.skipRequired("CRLF after [chunk-ext]", Http1::CrLf());
         buf_ = tok.remaining();
         parsingStage_ = theChunkSize ? Http1::HTTP_PARSE_CHUNK : Http1::HTTP_PARSE_MIME;
         return true;
@@ -132,12 +137,14 @@
     // other exceptions bubble up to kill message parsing
 }
 
-/// Parses the chunk-ext list (RFC 7230 section 4.1.1 and its Errata #4667):
+/// Parses the chunk-ext list (RFC 9112 section 7.1.1:
 /// chunk-ext = *( BWS ";" BWS chunk-ext-name [ BWS "=" BWS chunk-ext-val ] )
 void
-Http::One::TeChunkedParser::parseChunkExtensions(Tokenizer &tok)
+Http::One::TeChunkedParser::parseChunkExtensions(Tokenizer &callerTok)
 {
     do {
+        auto tok = callerTok;
+
         ParseBws(tok); // Bug 4492: IBM_HTTP_Server sends SP after chunk-size
 
         if (!tok.skip(';'))
@@ -145,6 +152,7 @@
 
         parseOneChunkExtension(tok);
         buf_ = tok.remaining(); // got one extension
+        callerTok = tok;
     } while (true);
 }
 
@@ -158,11 +166,14 @@
 /// Parses a single chunk-ext list element:
 /// chunk-ext = *( BWS ";" BWS chunk-ext-name [ BWS "=" BWS chunk-ext-val ] )
 void
-Http::One::TeChunkedParser::parseOneChunkExtension(Tokenizer &tok)
+Http::One::TeChunkedParser::parseOneChunkExtension(Tokenizer &callerTok)
 {
+    auto tok = callerTok;
+
     ParseBws(tok); // Bug 4492: ICAP servers send SP before chunk-ext-name
 
     const auto extName = tok.prefix("chunk-ext-name", CharacterSet::TCHAR);
+    callerTok = tok; // in case we determine that this is a valueless chunk-ext
 
     ParseBws(tok);
 
@@ -176,6 +187,8 @@
         customExtensionValueParser->parse(tok, extName);
     else
         ChunkExtensionValueParser::Ignore(tok, extName);
+
+    callerTok = tok;
 }
 
 bool
@@ -209,7 +222,7 @@
     Must(theLeftBodySize == 0); // Should(), really
 
     try {
-        skipLineTerminator(tok);
+        tok.skipRequired("chunk CRLF", Http1::CrLf());
         buf_ = tok.remaining(); // parse checkpoint
         theChunkSize = 0; // done with the current chunk
         parsingStage_ = Http1::HTTP_PARSE_CHUNK_SZ;
diff -u -r -N squid-6.3/src/http/StatusLine.cc squid-6.4/src/http/StatusLine.cc
--- squid-6.3/src/http/StatusLine.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/http/StatusLine.cc	2023-10-21 11:40:41.000000000 +1300
@@ -46,9 +46,47 @@
     return reason_ ? reason_ : Http::StatusCodeString(status());
 }
 
+size_t
+Http::StatusLine::packedLength() const
+{
+    // Keep in sync with packInto(). TODO: Refactor to avoid code duplication.
+
+    auto packedStatus = status();
+    auto packedReason = reason();
+
+    if (packedStatus == scNone) {
+        packedStatus = scInternalServerError;
+        packedReason = StatusCodeString(packedStatus);
+    }
+
+    // "ICY %3d %s\r\n"
+    if (version.protocol == AnyP::PROTO_ICY) {
+        return
+            + 3 // ICY
+            + 1 // SP
+            + 3 // %3d (packedStatus)
+            + 1 // SP
+            + strlen(packedReason) // %s
+            + 2; // CRLF
+    }
+
+    // "HTTP/%d.%d %3d %s\r\n"
+    return
+        + 4 // HTTP
+        + 1 // "/"
+        + 3 // %d.%d (version.major and version.minor)
+        + 1 // SP
+        + 3 // %3d (packedStatus)
+        + 1 // SP
+        + strlen(packedReason) // %s
+        + 2; // CRLF
+}
+
 void
 Http::StatusLine::packInto(Packable * p) const
 {
+    // Keep in sync with packedLength().
+
     assert(p);
 
     auto packedStatus = status();
diff -u -r -N squid-6.3/src/http/StatusLine.h squid-6.4/src/http/StatusLine.h
--- squid-6.3/src/http/StatusLine.h	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/http/StatusLine.h	2023-10-21 11:40:41.000000000 +1300
@@ -47,6 +47,9 @@
     /// retrieve the reason string for this status line
     const char *reason() const;
 
+    /// expected size of packInto() output
+    size_t packedLength() const;
+
     /// pack fields into a Packable object
     void packInto(Packable *) const;
 
diff -u -r -N squid-6.3/src/http/url_rewriters/LFS/url_lfs_rewrite.8 squid-6.4/src/http/url_rewriters/LFS/url_lfs_rewrite.8
--- squid-6.3/src/http/url_rewriters/LFS/url_lfs_rewrite.8	2023-09-03 20:37:53.000000000 +1200
+++ squid-6.4/src/http/url_rewriters/LFS/url_lfs_rewrite.8	2023-10-22 01:47:16.000000000 +1300
@@ -133,7 +133,7 @@
 .\" ========================================================================
 .\"
 .IX Title "URL_LFS_REWRITE 8"
-.TH URL_LFS_REWRITE 8 "2023-09-03" "perl v5.36.0" "User Contributed Perl Documentation"
+.TH URL_LFS_REWRITE 8 "2023-10-21" "perl v5.36.0" "User Contributed Perl Documentation"
 .\" For nroff, turn off justification.  Always turn off hyphenation; it makes
 .\" way too many mistakes in technical documents.
 .if n .ad l
diff -u -r -N squid-6.3/src/HttpReply.cc squid-6.4/src/HttpReply.cc
--- squid-6.3/src/HttpReply.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/HttpReply.cc	2023-10-21 11:40:41.000000000 +1300
@@ -21,7 +21,9 @@
 #include "HttpReply.h"
 #include "HttpRequest.h"
 #include "MemBuf.h"
+#include "sbuf/Stream.h"
 #include "SquidConfig.h"
+#include "SquidMath.h"
 #include "Store.h"
 #include "StrList.h"
 
@@ -456,6 +458,44 @@
     return sline.parse(protoPrefix, blk_start, blk_end);
 }
 
+size_t
+HttpReply::parseTerminatedPrefix(const char * const terminatedBuf, const size_t bufSize)
+{
+    auto error = Http::scNone;
+    const bool eof = false; // TODO: Remove after removing atEnd from HttpHeader::parse()
+    if (parse(terminatedBuf, bufSize, eof, &error)) {
+        debugs(58, 7, "success after accumulating " << bufSize << " bytes and parsing " << hdr_sz);
+        Assure(pstate == Http::Message::psParsed);
+        Assure(hdr_sz > 0);
+        Assure(!Less(bufSize, hdr_sz)); // cannot parse more bytes than we have
+        return hdr_sz; // success
+    }
+
+    Assure(pstate != Http::Message::psParsed);
+    hdr_sz = 0;
+
+    if (error) {
+        throw TextException(ToSBuf("failed to parse HTTP headers",
+                                   Debug::Extra, "parser error code: ", error,
+                                   Debug::Extra, "accumulated unparsed bytes: ", bufSize,
+                                   Debug::Extra, "reply_header_max_size: ", Config.maxReplyHeaderSize),
+                            Here());
+    }
+
+    debugs(58, 3, "need more bytes after accumulating " << bufSize << " out of " << Config.maxReplyHeaderSize);
+
+    // the parse() call above enforces Config.maxReplyHeaderSize limit
+    // XXX: Make this a strict comparison after fixing Http::Message::parse() enforcement
+    Assure(bufSize <= Config.maxReplyHeaderSize);
+    return 0; // parsed nothing, need more data
+}
+
+size_t
+HttpReply::prefixLen() const
+{
+    return sline.packedLength() + header.len + 2;
+}
+
 void
 HttpReply::configureContentLengthInterpreter(Http::ContentLengthInterpreter &interpreter)
 {
diff -u -r -N squid-6.3/src/HttpReply.h squid-6.4/src/HttpReply.h
--- squid-6.3/src/HttpReply.h	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/HttpReply.h	2023-10-21 11:40:41.000000000 +1300
@@ -125,6 +125,17 @@
     /// parses reply header using Parser
     bool parseHeader(Http1::Parser &hp);
 
+    /// Parses response status line and headers at the start of the given
+    /// NUL-terminated buffer of the given size. Respects reply_header_max_size.
+    /// Assures pstate becomes Http::Message::psParsed on (and only on) success.
+    /// \returns the number of bytes in a successfully parsed prefix (or zero)
+    /// \retval 0 implies that more data is needed to parse the response prefix
+    size_t parseTerminatedPrefix(const char *, size_t);
+
+    /// approximate size of a "status-line CRLF headers CRLF" sequence
+    /// \sa HttpRequest::prefixLen()
+    size_t prefixLen() const;
+
 private:
     /** initialize */
     void init();
diff -u -r -N squid-6.3/src/icmp/net_db.cc squid-6.4/src/icmp/net_db.cc
--- squid-6.3/src/icmp/net_db.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/icmp/net_db.cc	2023-10-21 11:40:41.000000000 +1300
@@ -33,6 +33,7 @@
 #include "mime_header.h"
 #include "neighbors.h"
 #include "PeerSelectState.h"
+#include "sbuf/SBuf.h"
 #include "SquidConfig.h"
 #include "Store.h"
 #include "StoreClient.h"
@@ -48,8 +49,6 @@
 #include "ipcache.h"
 #include "StoreClient.h"
 
-#define NETDB_REQBUF_SZ 4096
-
 typedef enum {
     STATE_NONE,
     STATE_HEADER,
@@ -65,7 +64,6 @@
         p(aPeer),
         r(theReq)
     {
-        *buf = 0;
         assert(r);
         // TODO: check if we actually need to do this. should be implicit
         r->http_ver = Http::ProtocolVersion();
@@ -81,10 +79,10 @@
     StoreEntry *e = nullptr;
     store_client *sc = nullptr;
     HttpRequestPointer r;
-    int64_t used = 0;
-    size_t buf_sz = NETDB_REQBUF_SZ;
-    char buf[NETDB_REQBUF_SZ];
-    int buf_ofs = 0;
+
+    /// for receiving a NetDB reply body from Store and interpreting it
+    Store::ParsingBuffer parsingBuffer;
+
     netdb_conn_state_t connstate = STATE_HEADER;
 };
 
@@ -667,23 +665,19 @@
     Ip::Address addr;
 
     netdbExchangeState *ex = (netdbExchangeState *)data;
-    int rec_sz = 0;
-    int o;
 
     struct in_addr line_addr;
     double rtt;
     double hops;
-    char *p;
     int j;
-    size_t hdr_sz;
     int nused = 0;
-    int size;
-    int oldbufofs = ex->buf_ofs;
 
-    rec_sz = 0;
+    size_t rec_sz = 0; // received record size (TODO: make const)
     rec_sz += 1 + sizeof(struct in_addr);
     rec_sz += 1 + sizeof(int);
     rec_sz += 1 + sizeof(int);
+    // to make progress without growing buffer space, we must parse at least one record per call
+    Assure(rec_sz <= ex->parsingBuffer.capacity());
     debugs(38, 3, "netdbExchangeHandleReply: " << receivedData.length << " read bytes");
 
     if (!ex->p.valid()) {
@@ -694,64 +688,28 @@
 
     debugs(38, 3, "for " << *ex->p);
 
-    if (receivedData.length == 0 && !receivedData.flags.error) {
-        debugs(38, 3, "netdbExchangeHandleReply: Done");
+    if (receivedData.flags.error) {
         delete ex;
         return;
     }
 
-    p = ex->buf;
-
-    /* Get the size of the buffer now */
-    size = ex->buf_ofs + receivedData.length;
-    debugs(38, 3, "netdbExchangeHandleReply: " << size << " bytes buf");
-
-    /* Check if we're still doing headers */
-
     if (ex->connstate == STATE_HEADER) {
-
-        ex->buf_ofs += receivedData.length;
-
-        /* skip reply headers */
-
-        if ((hdr_sz = headersEnd(p, ex->buf_ofs))) {
-            debugs(38, 5, "netdbExchangeHandleReply: hdr_sz = " << hdr_sz);
-            const auto scode = ex->e->mem().baseReply().sline.status();
-            assert(scode != Http::scNone);
-            debugs(38, 3, "netdbExchangeHandleReply: reply status " << scode);
-
-            if (scode != Http::scOkay) {
-                delete ex;
-                return;
-            }
-
-            assert((size_t)ex->buf_ofs >= hdr_sz);
-
-            /*
-             * Now, point p to the part of the buffer where the data
-             * starts, and update the size accordingly
-             */
-            assert(ex->used == 0);
-            ex->used = hdr_sz;
-            size = ex->buf_ofs - hdr_sz;
-            p += hdr_sz;
-
-            /* Finally, set the conn state mode to STATE_BODY */
-            ex->connstate = STATE_BODY;
-        } else {
-            StoreIOBuffer tempBuffer;
-            tempBuffer.offset = ex->buf_ofs;
-            tempBuffer.length = ex->buf_sz - ex->buf_ofs;
-            tempBuffer.data = ex->buf + ex->buf_ofs;
-            /* Have more headers .. */
-            storeClientCopy(ex->sc, ex->e, tempBuffer,
-                            netdbExchangeHandleReply, ex);
+        const auto scode = ex->e->mem().baseReply().sline.status();
+        assert(scode != Http::scNone);
+        debugs(38, 3, "reply status " << scode);
+        if (scode != Http::scOkay) {
+            delete ex;
             return;
         }
+        ex->connstate = STATE_BODY;
     }
 
     assert(ex->connstate == STATE_BODY);
 
+    ex->parsingBuffer.appended(receivedData.data, receivedData.length);
+    auto p = ex->parsingBuffer.c_str(); // current parsing position
+    auto size = ex->parsingBuffer.contentSize(); // bytes we still need to parse
+
     /* If we get here, we have some body to parse .. */
     debugs(38, 5, "netdbExchangeHandleReply: start parsing loop, size = " << size);
 
@@ -760,6 +718,7 @@
         addr.setAnyAddr();
         hops = rtt = 0.0;
 
+        size_t o; // current record parsing offset
         for (o = 0; o < rec_sz;) {
             switch ((int) *(p + o)) {
 
@@ -797,8 +756,6 @@
 
         assert(o == rec_sz);
 
-        ex->used += rec_sz;
-
         size -= rec_sz;
 
         p += rec_sz;
@@ -806,32 +763,8 @@
         ++nused;
     }
 
-    /*
-     * Copy anything that is left over to the beginning of the buffer,
-     * and adjust buf_ofs accordingly
-     */
-
-    /*
-     * Evilly, size refers to the buf size left now,
-     * ex->buf_ofs is the original buffer size, so just copy that
-     * much data over
-     */
-    memmove(ex->buf, ex->buf + (ex->buf_ofs - size), size);
-
-    ex->buf_ofs = size;
-
-    /*
-     * And don't re-copy the remaining data ..
-     */
-    ex->used += size;
-
-    /*
-     * Now the tricky bit - size _included_ the leftover bit from the _last_
-     * storeClientCopy. We don't want to include that, or our offset will be wrong.
-     * So, don't count the size of the leftover buffer we began with.
-     * This can _disappear_ when we're not tracking offsets ..
-     */
-    ex->used -= oldbufofs;
+    const auto parsedSize = ex->parsingBuffer.contentSize() - size;
+    ex->parsingBuffer.consume(parsedSize);
 
     debugs(38, 3, "netdbExchangeHandleReply: size left over in this buffer: " << size << " bytes");
 
@@ -839,20 +772,26 @@
            " entries, (x " << rec_sz << " bytes) == " << nused * rec_sz <<
            " bytes total");
 
-    debugs(38, 3, "netdbExchangeHandleReply: used " << ex->used);
-
     if (EBIT_TEST(ex->e->flags, ENTRY_ABORTED)) {
         debugs(38, 3, "netdbExchangeHandleReply: ENTRY_ABORTED");
         delete ex;
-    } else if (ex->e->store_status == STORE_PENDING) {
-        StoreIOBuffer tempBuffer;
-        tempBuffer.offset = ex->used;
-        tempBuffer.length = ex->buf_sz - ex->buf_ofs;
-        tempBuffer.data = ex->buf + ex->buf_ofs;
-        debugs(38, 3, "netdbExchangeHandleReply: EOF not received");
-        storeClientCopy(ex->sc, ex->e, tempBuffer,
-                        netdbExchangeHandleReply, ex);
+        return;
     }
+
+    if (ex->sc->atEof()) {
+        if (const auto leftoverBytes = ex->parsingBuffer.contentSize())
+            debugs(38, 2, "discarding a partially received record due to Store EOF: " << leftoverBytes);
+        delete ex;
+        return;
+    }
+
+    // TODO: To protect us from a broken peer sending an "infinite" stream of
+    // new addresses, limit the cumulative number of received bytes or records?
+
+    const auto remainingSpace = ex->parsingBuffer.space().positionAt(receivedData.offset + receivedData.length);
+    // rec_sz is at most buffer capacity, and we consume all fully loaded records
+    Assure(remainingSpace.length);
+    storeClientCopy(ex->sc, ex->e, remainingSpace, netdbExchangeHandleReply, ex);
 }
 
 #endif /* USE_ICMP */
@@ -1273,14 +1212,9 @@
     ex->e = storeCreateEntry(uri, uri, RequestFlags(), Http::METHOD_GET);
     assert(nullptr != ex->e);
 
-    StoreIOBuffer tempBuffer;
-    tempBuffer.length = ex->buf_sz;
-    tempBuffer.data = ex->buf;
-
     ex->sc = storeClientListAdd(ex->e, ex);
+    storeClientCopy(ex->sc, ex->e, ex->parsingBuffer.makeInitialSpace(), netdbExchangeHandleReply, ex);
 
-    storeClientCopy(ex->sc, ex->e, tempBuffer,
-                    netdbExchangeHandleReply, ex);
     ex->r->flags.loopDetected = true;   /* cheat! -- force direct */
 
     // XXX: send as Proxy-Authenticate instead
diff -u -r -N squid-6.3/src/ipc/mem/Pages.cc squid-6.4/src/ipc/mem/Pages.cc
--- squid-6.3/src/ipc/mem/Pages.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/ipc/mem/Pages.cc	2023-10-21 11:40:41.000000000 +1300
@@ -103,7 +103,7 @@
     Ipc::Mem::PagePool::Owner *owner;
 };
 
-RunnerRegistrationEntry(SharedMemPagesRr);
+DefineRunnerRegistrator(SharedMemPagesRr);
 
 void
 SharedMemPagesRr::useConfig()
diff -u -r -N squid-6.3/src/log/DB/log_db_daemon.8 squid-6.4/src/log/DB/log_db_daemon.8
--- squid-6.3/src/log/DB/log_db_daemon.8	2023-09-03 20:37:53.000000000 +1200
+++ squid-6.4/src/log/DB/log_db_daemon.8	2023-10-22 01:47:16.000000000 +1300
@@ -133,7 +133,7 @@
 .\" ========================================================================
 .\"
 .IX Title "LOG_DB_DAEMON 8"
-.TH LOG_DB_DAEMON 8 "2023-09-03" "perl v5.36.0" "User Contributed Perl Documentation"
+.TH LOG_DB_DAEMON 8 "2023-10-21" "perl v5.36.0" "User Contributed Perl Documentation"
 .\" For nroff, turn off justification.  Always turn off hyphenation; it makes
 .\" way too many mistakes in technical documents.
 .if n .ad l
diff -u -r -N squid-6.3/src/main.cc squid-6.4/src/main.cc
--- squid-6.3/src/main.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/main.cc	2023-10-21 11:40:41.000000000 +1300
@@ -1437,9 +1437,58 @@
     }
 }
 
+/// register all known modules for handling future RegisteredRunner events
+static void
+RegisterModules()
+{
+    // These registration calls do not represent a RegisteredRunner "event". The
+    // modules registered here should be initialized later, during those events.
+
+    // RegisteredRunner event handlers should not depend on handler call order
+    // and, hence, should not depend on the registration call order below.
+
+    CallRunnerRegistrator(ClientDbRr);
+    CallRunnerRegistrator(CollapsedForwardingRr);
+    CallRunnerRegistrator(MemStoreRr);
+    CallRunnerRegistrator(PeerPoolMgrsRr);
+    CallRunnerRegistrator(SharedMemPagesRr);
+    CallRunnerRegistrator(SharedSessionCacheRr);
+    CallRunnerRegistrator(TransientsRr);
+    CallRunnerRegistratorIn(Dns, ConfigRr);
+
+#if HAVE_DISKIO_MODULE_IPCIO
+    CallRunnerRegistrator(IpcIoRr);
+#endif
+
+#if HAVE_AUTH_MODULE_NTLM
+    CallRunnerRegistrator(NtlmAuthRr);
+#endif
+
+#if USE_OPENSSL
+    CallRunnerRegistrator(sslBumpCfgRr);
+#endif
+
+#if USE_SQUID_ESI && HAVE_LIBEXPAT
+    CallRunnerRegistratorIn(Esi, ExpatRr);
+#endif
+
+#if USE_SQUID_ESI && HAVE_LIBXML2
+    CallRunnerRegistratorIn(Esi, Libxml2Rr);
+#endif
+
+#if HAVE_FS_ROCK
+    CallRunnerRegistratorIn(Rock, SwapDirRr);
+#endif
+}
+
 int
 SquidMain(int argc, char **argv)
 {
+    // We must register all modules before the first RunRegisteredHere() call.
+    // We do it ASAP/here so that we do not need to move this code when we add
+    // earlier hooks to the RegisteredRunner API.
+    RegisterModules();
+
     const CommandLine cmdLine(argc, argv, shortOpStr, squidOptions);
 
     ConfigureCurrentKid(cmdLine);
diff -u -r -N squid-6.3/src/MemObject.cc squid-6.4/src/MemObject.cc
--- squid-6.3/src/MemObject.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/MemObject.cc	2023-10-21 11:40:41.000000000 +1300
@@ -353,6 +353,12 @@
      */
     int64_t lowest_offset = lowestMemReaderOffset();
 
+    // XXX: Remove the last (Config.onoff.memory_cache_first-based) condition
+    // and update keepForLocalMemoryCache() accordingly. The caller wants to
+    // remove all local memory that is safe to remove. Honoring caching
+    // preferences is its responsibility. Our responsibility is safety. The
+    // situation was different when ff4b33f added that condition -- there was no
+    // keepInLocalMemory/keepForLocalMemoryCache() call guard back then.
     if (endOffset() < lowest_offset ||
             endOffset() - inmem_lo > (int64_t)Config.Store.maxInMemObjSize ||
             (swap && !Config.onoff.memory_cache_first))
diff -u -r -N squid-6.3/src/MemObject.h squid-6.4/src/MemObject.h
--- squid-6.3/src/MemObject.h	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/MemObject.h	2023-10-21 11:40:41.000000000 +1300
@@ -89,6 +89,15 @@
     bool appliedUpdates = false;
 
     void stat (MemBuf * mb) const;
+
+    /// The offset of the last memory-stored HTTP response byte plus one.
+    /// * HTTP response headers (if any) are stored at offset zero.
+    /// * HTTP response body byte[n] usually has offset (hdr_sz + n), where
+    ///   hdr_sz is the size of stored HTTP response headers (zero if none); and
+    ///   n is the corresponding byte offset in the whole resource body.
+    ///   However, some 206 (Partial Content) response bodies are stored (and
+    ///   retrieved) as regular 200 response bodies, disregarding offsets of
+    ///   their body parts. \sa HttpStateData::decideIfWeDoRanges().
     int64_t endOffset () const;
 
     /// sets baseReply().hdr_sz (i.e. written reply headers size) to endOffset()
diff -u -r -N squid-6.3/src/MemStore.cc squid-6.4/src/MemStore.cc
--- squid-6.3/src/MemStore.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/MemStore.cc	2023-10-21 11:40:41.000000000 +1300
@@ -17,6 +17,8 @@
 #include "MemObject.h"
 #include "MemStore.h"
 #include "mime_header.h"
+#include "sbuf/SBuf.h"
+#include "sbuf/Stream.h"
 #include "SquidConfig.h"
 #include "SquidMath.h"
 #include "StoreStats.h"
@@ -311,19 +313,25 @@
     // create a brand new store entry and initialize it with stored info
     StoreEntry *e = new StoreEntry();
 
-    // XXX: We do not know the URLs yet, only the key, but we need to parse and
-    // store the response for the Root().find() callers to be happy because they
-    // expect IN_MEMORY entries to already have the response headers and body.
-    e->createMemObject();
-
-    anchorEntry(*e, index, *slot);
-
-    const bool copied = copyFromShm(*e, index, *slot);
-
-    if (copied)
-        return e;
+    try {
+        // XXX: We do not know the URLs yet, only the key, but we need to parse and
+        // store the response for the Root().find() callers to be happy because they
+        // expect IN_MEMORY entries to already have the response headers and body.
+        e->createMemObject();
+
+        anchorEntry(*e, index, *slot);
+
+        // TODO: make copyFromShm() throw on all failures, simplifying this code
+        if (copyFromShm(*e, index, *slot))
+            return e;
+        debugs(20, 3, "failed for " << *e);
+    } catch (...) {
+        // see store_client::parseHttpHeadersFromDisk() for problems this may log
+        debugs(20, DBG_IMPORTANT, "ERROR: Cannot load a cache hit from shared memory" <<
+               Debug::Extra << "exception: " << CurrentException <<
+               Debug::Extra << "cache_mem entry: " << *e);
+    }
 
-    debugs(20, 3, "failed for " << *e);
     map->freeEntry(index); // do not let others into the same trap
     destroyStoreEntry(static_cast<hash_link *>(e));
     return nullptr;
@@ -469,6 +477,8 @@
     Ipc::StoreMapSliceId sid = anchor.start; // optimize: remember the last sid
     bool wasEof = anchor.complete() && sid < 0;
     int64_t sliceOffset = 0;
+
+    SBuf httpHeaderParsingBuffer;
     while (sid >= 0) {
         const Ipc::StoreMapSlice &slice = map->readableSlice(index, sid);
         // slice state may change during copying; take snapshots now
@@ -491,10 +501,18 @@
             const StoreIOBuffer sliceBuf(wasSize - prefixSize,
                                          e.mem_obj->endOffset(),
                                          page + prefixSize);
-            if (!copyFromShmSlice(e, sliceBuf, wasEof))
-                return false;
+
+            copyFromShmSlice(e, sliceBuf);
             debugs(20, 8, "entry " << index << " copied slice " << sid <<
                    " from " << extra.page << '+' << prefixSize);
+
+            // parse headers if needed; they might span multiple slices!
+            auto &reply = e.mem().adjustableBaseReply();
+            if (reply.pstate != Http::Message::psParsed) {
+                httpHeaderParsingBuffer.append(sliceBuf.data, sliceBuf.length);
+                if (reply.parseTerminatedPrefix(httpHeaderParsingBuffer.c_str(), httpHeaderParsingBuffer.length()))
+                    httpHeaderParsingBuffer = SBuf(); // we do not need these bytes anymore
+            }
         }
         // else skip a [possibly incomplete] slice that we copied earlier
 
@@ -526,6 +544,9 @@
     debugs(20, 5, "mem-loaded all " << e.mem_obj->endOffset() << '/' <<
            anchor.basics.swap_file_sz << " bytes of " << e);
 
+    if (e.mem().adjustableBaseReply().pstate != Http::Message::psParsed)
+        throw TextException(ToSBuf("truncated mem-cached headers; accumulated: ", httpHeaderParsingBuffer.length()), Here());
+
     // from StoreEntry::complete()
     e.mem_obj->object_sz = e.mem_obj->endOffset();
     e.store_status = STORE_OK;
@@ -541,32 +562,11 @@
 }
 
 /// imports one shared memory slice into local memory
-bool
-MemStore::copyFromShmSlice(StoreEntry &e, const StoreIOBuffer &buf, bool eof)
+void
+MemStore::copyFromShmSlice(StoreEntry &e, const StoreIOBuffer &buf)
 {
     debugs(20, 7, "buf: " << buf.offset << " + " << buf.length);
 
-    // from store_client::readBody()
-    // parse headers if needed; they might span multiple slices!
-    const auto rep = &e.mem().adjustableBaseReply();
-    if (rep->pstate < Http::Message::psParsed) {
-        // XXX: have to copy because httpMsgParseStep() requires 0-termination
-        MemBuf mb;
-        mb.init(buf.length+1, buf.length+1);
-        mb.append(buf.data, buf.length);
-        mb.terminate();
-        const int result = rep->httpMsgParseStep(mb.buf, buf.length, eof);
-        if (result > 0) {
-            assert(rep->pstate == Http::Message::psParsed);
-        } else if (result < 0) {
-            debugs(20, DBG_IMPORTANT, "Corrupted mem-cached headers: " << e);
-            return false;
-        } else { // more slices are needed
-            assert(!eof);
-        }
-    }
-    debugs(20, 7, "rep pstate: " << rep->pstate);
-
     // local memory stores both headers and body so copy regardless of pstate
     const int64_t offBefore = e.mem_obj->endOffset();
     assert(e.mem_obj->data_hdr.write(buf)); // from MemObject::write()
@@ -574,7 +574,6 @@
     // expect to write the entire buf because StoreEntry::write() never fails
     assert(offAfter >= 0 && offBefore <= offAfter &&
            static_cast<size_t>(offAfter - offBefore) == buf.length);
-    return true;
 }
 
 /// whether we should cache the entry
@@ -988,7 +987,7 @@
     Ipc::Mem::Owner<MemStoreMapExtras> *extrasOwner; ///< PageIds Owner
 };
 
-RunnerRegistrationEntry(MemStoreRr);
+DefineRunnerRegistrator(MemStoreRr);
 
 void
 MemStoreRr::claimMemoryNeeds()
diff -u -r -N squid-6.3/src/MemStore.h squid-6.4/src/MemStore.h
--- squid-6.3/src/MemStore.h	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/MemStore.h	2023-10-21 11:40:41.000000000 +1300
@@ -80,7 +80,7 @@
     void copyToShm(StoreEntry &e);
     void copyToShmSlice(StoreEntry &e, Ipc::StoreMapAnchor &anchor, Ipc::StoreMap::Slice &slice);
     bool copyFromShm(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor);
-    bool copyFromShmSlice(StoreEntry &e, const StoreIOBuffer &buf, bool eof);
+    void copyFromShmSlice(StoreEntry &, const StoreIOBuffer &);
 
     void updateHeadersOrThrow(Ipc::StoreMapUpdate &update);
 
diff -u -r -N squid-6.3/src/parser/Tokenizer.cc squid-6.4/src/parser/Tokenizer.cc
--- squid-6.3/src/parser/Tokenizer.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/parser/Tokenizer.cc	2023-10-21 11:40:41.000000000 +1300
@@ -147,6 +147,18 @@
     return success(prefixLen);
 }
 
+void
+Parser::Tokenizer::skipRequired(const char *description, const SBuf &tokenToSkip)
+{
+    if (skip(tokenToSkip) || tokenToSkip.isEmpty())
+        return;
+
+    if (tokenToSkip.startsWith(buf_))
+        throw InsufficientInput();
+
+    throw TextException(ToSBuf("cannot skip ", description), Here());
+}
+
 bool
 Parser::Tokenizer::skipOne(const CharacterSet &chars)
 {
diff -u -r -N squid-6.3/src/parser/Tokenizer.h squid-6.4/src/parser/Tokenizer.h
--- squid-6.3/src/parser/Tokenizer.h	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/parser/Tokenizer.h	2023-10-21 11:40:41.000000000 +1300
@@ -115,6 +115,13 @@
      */
     SBuf::size_type skipAll(const CharacterSet &discardables);
 
+    /** skips a given character sequence (string);
+     * does nothing if the sequence is empty
+     *
+     * \throws exception on mismatching prefix or InsufficientInput
+     */
+    void skipRequired(const char *description, const SBuf &tokenToSkip);
+
     /** Removes a single trailing character from the set.
      *
      * \return whether a character was removed
diff -u -r -N squid-6.3/src/peer_digest.cc squid-6.4/src/peer_digest.cc
--- squid-6.3/src/peer_digest.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/peer_digest.cc	2023-10-21 11:40:41.000000000 +1300
@@ -38,7 +38,6 @@
 static void peerDigestRequest(PeerDigest * pd);
 static STCB peerDigestHandleReply;
 static int peerDigestFetchReply(void *, char *, ssize_t);
-int peerDigestSwapInHeaders(void *, char *, ssize_t);
 int peerDigestSwapInCBlock(void *, char *, ssize_t);
 int peerDigestSwapInMask(void *, char *, ssize_t);
 static int peerDigestFetchedEnough(DigestFetchState * fetch, char *buf, ssize_t size, const char *step_name);
@@ -355,6 +354,9 @@
     fetch->sc = storeClientListAdd(e, fetch);
     /* set lastmod to trigger IMS request if possible */
 
+    // TODO: Also check for fetch->pd->cd presence as a precondition for sending
+    // IMS requests because peerDigestFetchReply() does not accept 304 responses
+    // without an in-memory cache digest.
     if (old_e)
         e->lastModified(old_e->lastModified());
 
@@ -389,6 +391,11 @@
     digest_read_state_t prevstate;
     int newsize;
 
+    if (receivedData.flags.error) {
+        peerDigestFetchAbort(fetch, fetch->buf, "failure loading digest reply from Store");
+        return;
+    }
+
     assert(fetch->pd && receivedData.data);
     /* The existing code assumes that the received pointer is
      * where we asked the data to be put
@@ -425,10 +432,6 @@
             retsize = peerDigestFetchReply(fetch, fetch->buf, fetch->bufofs);
             break;
 
-        case DIGEST_READ_HEADERS:
-            retsize = peerDigestSwapInHeaders(fetch, fetch->buf, fetch->bufofs);
-            break;
-
         case DIGEST_READ_CBLOCK:
             retsize = peerDigestSwapInCBlock(fetch, fetch->buf, fetch->bufofs);
             break;
@@ -468,7 +471,7 @@
     // checking at the beginning of this function. However, in this case, we would have to require
     // that the parser does not regard EOF as a special condition (it is true now but may change
     // in the future).
-    if (!receivedData.length) { // EOF
+    if (fetch->sc->atEof()) {
         peerDigestFetchAbort(fetch, fetch->buf, "premature end of digest reply");
         return;
     }
@@ -487,19 +490,12 @@
     }
 }
 
-/* wait for full http headers to be received then parse them */
-/*
- * This routine handles parsing the reply line.
- * If the reply line indicates an OK, the same data is thrown
- * to SwapInHeaders(). If the reply line is a NOT_MODIFIED,
- * we simply stop parsing.
- */
+/// handle HTTP response headers in the initial storeClientCopy() response
 static int
 peerDigestFetchReply(void *data, char *buf, ssize_t size)
 {
     DigestFetchState *fetch = (DigestFetchState *)data;
     PeerDigest *pd = fetch->pd;
-    size_t hdr_size;
     assert(pd && buf);
     assert(!fetch->offset);
 
@@ -508,7 +504,7 @@
     if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestFetchReply"))
         return -1;
 
-    if ((hdr_size = headersEnd(buf, size))) {
+    {
         const auto &reply = fetch->entry->mem().freshestReply();
         const auto status = reply.sline.status();
         assert(status != Http::scNone);
@@ -527,7 +523,10 @@
 
             assert(fetch->old_entry->mem_obj->request);
 
-            Store::Root().updateOnNotModified(fetch->old_entry, *fetch->entry);
+            if (!Store::Root().updateOnNotModified(fetch->old_entry, *fetch->entry)) {
+                peerDigestFetchAbort(fetch, buf, "header update failure after a 304 response");
+                return -1;
+            }
 
             /* get rid of 304 reply */
             storeUnregister(fetch->sc, fetch->entry, fetch);
@@ -541,6 +540,15 @@
             /* preserve request -- we need its size to update counters */
             /* requestUnlink(r); */
             /* fetch->entry->mem_obj->request = nullptr; */
+
+            if (!fetch->pd->cd) {
+                peerDigestFetchAbort(fetch, buf, "304 without the old in-memory digest");
+                return -1;
+            }
+
+            // stay with the old in-memory digest
+            peerDigestFetchStop(fetch, buf, "Not modified");
+            fetch->state = DIGEST_READ_DONE;
         } else if (status == Http::scOkay) {
             /* get rid of old entry if any */
 
@@ -551,70 +559,16 @@
                 fetch->old_entry->unlock("peerDigestFetchReply 200");
                 fetch->old_entry = nullptr;
             }
+
+            fetch->state = DIGEST_READ_CBLOCK;
         } else {
             /* some kind of a bug */
             peerDigestFetchAbort(fetch, buf, reply.sline.reason());
             return -1;      /* XXX -1 will abort stuff in ReadReply! */
         }
-
-        /* must have a ready-to-use store entry if we got here */
-        /* can we stay with the old in-memory digest? */
-        if (status == Http::scNotModified && fetch->pd->cd) {
-            peerDigestFetchStop(fetch, buf, "Not modified");
-            fetch->state = DIGEST_READ_DONE;
-        } else {
-            fetch->state = DIGEST_READ_HEADERS;
-        }
-    } else {
-        /* need more data, do we have space? */
-
-        if (size >= SM_PAGE_SIZE)
-            peerDigestFetchAbort(fetch, buf, "reply header too big");
-    }
-
-    /* We don't want to actually ack that we've handled anything,
-     * otherwise SwapInHeaders() won't get the reply line .. */
-    return 0;
-}
-
-/* fetch headers from disk, pass on to SwapInCBlock */
-int
-peerDigestSwapInHeaders(void *data, char *buf, ssize_t size)
-{
-    DigestFetchState *fetch = (DigestFetchState *)data;
-    size_t hdr_size;
-
-    assert(fetch->state == DIGEST_READ_HEADERS);
-
-    if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestSwapInHeaders"))
-        return -1;
-
-    assert(!fetch->offset);
-
-    if ((hdr_size = headersEnd(buf, size))) {
-        const auto &reply = fetch->entry->mem().freshestReply();
-        const auto status = reply.sline.status();
-        assert(status != Http::scNone);
-
-        if (status != Http::scOkay) {
-            debugs(72, DBG_IMPORTANT, "peerDigestSwapInHeaders: " << fetch->pd->host <<
-                   " status " << status << " got cached!");
-
-            peerDigestFetchAbort(fetch, buf, "internal status error");
-            return -1;
-        }
-
-        fetch->state = DIGEST_READ_CBLOCK;
-        return hdr_size;    /* Say how much data we read */
-    }
-
-    /* need more data, do we have space? */
-    if (size >= SM_PAGE_SIZE) {
-        peerDigestFetchAbort(fetch, buf, "stored header too big");
-        return -1;
     }
 
-    return 0;       /* We need to read more to parse .. */
+    return 0; // we consumed/used no buffered bytes
 }
 
 int
diff -u -r -N squid-6.3/src/PeerPoolMgr.cc squid-6.4/src/PeerPoolMgr.cc
--- squid-6.3/src/PeerPoolMgr.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/PeerPoolMgr.cc	2023-10-21 11:40:41.000000000 +1300
@@ -239,7 +239,7 @@
     void syncConfig() override;
 };
 
-RunnerRegistrationEntry(PeerPoolMgrsRr);
+DefineRunnerRegistrator(PeerPoolMgrsRr);
 
 void
 PeerPoolMgrsRr::syncConfig()
diff -u -r -N squid-6.3/src/security/cert_validators/fake/security_fake_certverify.8 squid-6.4/src/security/cert_validators/fake/security_fake_certverify.8
--- squid-6.3/src/security/cert_validators/fake/security_fake_certverify.8	2023-09-03 20:37:54.000000000 +1200
+++ squid-6.4/src/security/cert_validators/fake/security_fake_certverify.8	2023-10-22 01:47:16.000000000 +1300
@@ -133,7 +133,7 @@
 .\" ========================================================================
 .\"
 .IX Title "SECURITY_FAKE_CERTVERIFY 8"
-.TH SECURITY_FAKE_CERTVERIFY 8 "2023-09-03" "perl v5.36.0" "User Contributed Perl Documentation"
+.TH SECURITY_FAKE_CERTVERIFY 8 "2023-10-21" "perl v5.36.0" "User Contributed Perl Documentation"
 .\" For nroff, turn off justification.  Always turn off hyphenation; it makes
 .\" way too many mistakes in technical documents.
 .if n .ad l
diff -u -r -N squid-6.3/src/security/Session.cc squid-6.4/src/security/Session.cc
--- squid-6.3/src/security/Session.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/security/Session.cc	2023-10-21 11:40:41.000000000 +1300
@@ -423,7 +423,7 @@
     Ipc::MemMap::Owner *owner;
 };
 
-RunnerRegistrationEntry(SharedSessionCacheRr);
+DefineRunnerRegistrator(SharedSessionCacheRr);
 
 void
 SharedSessionCacheRr::useConfig()
diff -u -r -N squid-6.3/src/store/Controller.cc squid-6.4/src/store/Controller.cc
--- squid-6.3/src/store/Controller.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/store/Controller.cc	2023-10-21 11:40:41.000000000 +1300
@@ -691,7 +691,7 @@
     // and keep the entry in store_table for its on-disk data
 }
 
-void
+bool
 Store::Controller::updateOnNotModified(StoreEntry *old, StoreEntry &e304)
 {
     Must(old);
@@ -708,13 +708,18 @@
     //         sake, it is best to detect and skip such repeated update calls.
     if (e304.mem_obj->appliedUpdates) {
         debugs(20, 5, "ignored repeated update of " << *old << " with " << e304);
-        return;
+        return true;
     }
     e304.mem_obj->appliedUpdates = true;
 
-    if (!old->updateOnNotModified(e304)) {
-        debugs(20, 5, "updated nothing in " << *old << " with " << e304);
-        return;
+    try {
+        if (!old->updateOnNotModified(e304)) {
+            debugs(20, 5, "updated nothing in " << *old << " with " << e304);
+            return true;
+        }
+    } catch (...) {
+        debugs(20, DBG_IMPORTANT, "ERROR: Failed to update a cached response: " << CurrentException);
+        return false;
     }
 
     if (sharedMemStore && old->mem_status == IN_MEMORY && !EBIT_TEST(old->flags, ENTRY_SPECIAL))
@@ -722,6 +727,8 @@
 
     if (old->swap_dirn > -1)
         swapDir->updateHeaders(old);
+
+    return true;
 }
 
 bool
diff -u -r -N squid-6.3/src/store/Controller.h squid-6.4/src/store/Controller.h
--- squid-6.3/src/store/Controller.h	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/store/Controller.h	2023-10-21 11:40:41.000000000 +1300
@@ -89,7 +89,8 @@
     void memoryOut(StoreEntry &, const bool preserveSwappable);
 
     /// using a 304 response, update the old entry (metadata and reply headers)
-    void updateOnNotModified(StoreEntry *old, StoreEntry &e304);
+    /// \returns whether the old entry can be used (and is considered fresh)
+    bool updateOnNotModified(StoreEntry *old, StoreEntry &e304);
 
     /// tries to make the entry available for collapsing future requests
     bool allowCollapsing(StoreEntry *, const RequestFlags &, const HttpRequestMethod &);
diff -u -r -N squid-6.3/src/store/forward.h squid-6.4/src/store/forward.h
--- squid-6.3/src/store/forward.h	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/store/forward.h	2023-10-21 11:40:41.000000000 +1300
@@ -46,6 +46,7 @@
 class Disk;
 class DiskConfig;
 class EntryGuard;
+class ParsingBuffer;
 
 typedef ::StoreEntry Entry;
 typedef ::MemStore Memory;
diff -u -r -N squid-6.3/src/store/id_rewriters/file/storeid_file_rewrite.8 squid-6.4/src/store/id_rewriters/file/storeid_file_rewrite.8
--- squid-6.3/src/store/id_rewriters/file/storeid_file_rewrite.8	2023-09-03 20:37:51.000000000 +1200
+++ squid-6.4/src/store/id_rewriters/file/storeid_file_rewrite.8	2023-10-22 01:47:14.000000000 +1300
@@ -133,7 +133,7 @@
 .\" ========================================================================
 .\"
 .IX Title "STOREID_FILE_REWRITE 8"
-.TH STOREID_FILE_REWRITE 8 "2023-09-03" "perl v5.36.0" "User Contributed Perl Documentation"
+.TH STOREID_FILE_REWRITE 8 "2023-10-21" "perl v5.36.0" "User Contributed Perl Documentation"
 .\" For nroff, turn off justification.  Always turn off hyphenation; it makes
 .\" way too many mistakes in technical documents.
 .if n .ad l
diff -u -r -N squid-6.3/src/store/Makefile.am squid-6.4/src/store/Makefile.am
--- squid-6.3/src/store/Makefile.am	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/store/Makefile.am	2023-10-21 11:40:41.000000000 +1300
@@ -22,6 +22,8 @@
 	Disks.h \
 	LocalSearch.cc \
 	LocalSearch.h \
+	ParsingBuffer.cc \
+	ParsingBuffer.h \
 	Storage.h \
 	SwapMeta.cc \
 	SwapMeta.h \
diff -u -r -N squid-6.3/src/store/Makefile.in squid-6.4/src/store/Makefile.in
--- squid-6.3/src/store/Makefile.in	2023-09-03 20:33:33.000000000 +1200
+++ squid-6.4/src/store/Makefile.in	2023-10-22 01:42:59.000000000 +1300
@@ -167,7 +167,8 @@
 LTLIBRARIES = $(noinst_LTLIBRARIES)
 libstore_la_LIBADD =
 am_libstore_la_OBJECTS = Controller.lo Disk.lo Disks.lo LocalSearch.lo \
-	SwapMeta.lo SwapMetaIn.lo SwapMetaOut.lo SwapMetaView.lo
+	ParsingBuffer.lo SwapMeta.lo SwapMetaIn.lo SwapMetaOut.lo \
+	SwapMetaView.lo
 libstore_la_OBJECTS = $(am_libstore_la_OBJECTS)
 AM_V_lt = $(am__v_lt_@AM_V@)
 am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
@@ -190,8 +191,9 @@
 am__maybe_remake_depfiles = depfiles
 am__depfiles_remade = ./$(DEPDIR)/Controller.Plo ./$(DEPDIR)/Disk.Plo \
 	./$(DEPDIR)/Disks.Plo ./$(DEPDIR)/LocalSearch.Plo \
-	./$(DEPDIR)/SwapMeta.Plo ./$(DEPDIR)/SwapMetaIn.Plo \
-	./$(DEPDIR)/SwapMetaOut.Plo ./$(DEPDIR)/SwapMetaView.Plo
+	./$(DEPDIR)/ParsingBuffer.Plo ./$(DEPDIR)/SwapMeta.Plo \
+	./$(DEPDIR)/SwapMetaIn.Plo ./$(DEPDIR)/SwapMetaOut.Plo \
+	./$(DEPDIR)/SwapMetaView.Plo
 am__mv = mv -f
 CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
 	$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS)
@@ -789,6 +791,8 @@
 	Disks.h \
 	LocalSearch.cc \
 	LocalSearch.h \
+	ParsingBuffer.cc \
+	ParsingBuffer.h \
 	Storage.h \
 	SwapMeta.cc \
 	SwapMeta.h \
@@ -868,6 +872,7 @@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/Disk.Plo@am__quote@ # am--include-marker
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/Disks.Plo@am__quote@ # am--include-marker
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/LocalSearch.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ParsingBuffer.Plo@am__quote@ # am--include-marker
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/SwapMeta.Plo@am__quote@ # am--include-marker
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/SwapMetaIn.Plo@am__quote@ # am--include-marker
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/SwapMetaOut.Plo@am__quote@ # am--include-marker
@@ -1279,6 +1284,7 @@
 	-rm -f ./$(DEPDIR)/Disk.Plo
 	-rm -f ./$(DEPDIR)/Disks.Plo
 	-rm -f ./$(DEPDIR)/LocalSearch.Plo
+	-rm -f ./$(DEPDIR)/ParsingBuffer.Plo
 	-rm -f ./$(DEPDIR)/SwapMeta.Plo
 	-rm -f ./$(DEPDIR)/SwapMetaIn.Plo
 	-rm -f ./$(DEPDIR)/SwapMetaOut.Plo
@@ -1332,6 +1338,7 @@
 	-rm -f ./$(DEPDIR)/Disk.Plo
 	-rm -f ./$(DEPDIR)/Disks.Plo
 	-rm -f ./$(DEPDIR)/LocalSearch.Plo
+	-rm -f ./$(DEPDIR)/ParsingBuffer.Plo
 	-rm -f ./$(DEPDIR)/SwapMeta.Plo
 	-rm -f ./$(DEPDIR)/SwapMetaIn.Plo
 	-rm -f ./$(DEPDIR)/SwapMetaOut.Plo
diff -u -r -N squid-6.3/src/store/ParsingBuffer.cc squid-6.4/src/store/ParsingBuffer.cc
--- squid-6.3/src/store/ParsingBuffer.cc	1970-01-01 12:00:00.000000000 +1200
+++ squid-6.4/src/store/ParsingBuffer.cc	2023-10-21 11:40:41.000000000 +1300
@@ -0,0 +1,198 @@
+/*
+ * Copyright (C) 1996-2023 The Squid Software Foundation and contributors
+ *
+ * Squid software is distributed under GPLv2+ license and includes
+ * contributions from numerous individuals and organizations.
+ * Please see the COPYING and CONTRIBUTORS files for details.
+ */
+
+#include "squid.h"
+#include "sbuf/Stream.h"
+#include "SquidMath.h"
+#include "store/ParsingBuffer.h"
+
+#include <iostream>
+
+// Several Store::ParsingBuffer() methods use assert() because the corresponding
+// failure means there is a good chance that somebody have already read from (or
+// written to) the wrong memory location. Since this buffer is used for storing
+// HTTP response bytes, such failures may corrupt traffic. No Assure() handling
+// code can safely recover from such failures.
+
+Store::ParsingBuffer::ParsingBuffer(StoreIOBuffer &initialSpace):
+    readerSuppliedMemory_(initialSpace)
+{
+}
+
+/// a read-only content start (or nil for some zero-size buffers)
+const char *
+Store::ParsingBuffer::memory() const
+{
+    return extraMemory_ ? extraMemory_->rawContent() : readerSuppliedMemory_.data;
+}
+
+size_t
+Store::ParsingBuffer::capacity() const
+{
+    return extraMemory_ ? (extraMemory_->length() + extraMemory_->spaceSize()) : readerSuppliedMemory_.length;
+}
+
+size_t
+Store::ParsingBuffer::contentSize() const
+{
+    return extraMemory_ ? extraMemory_->length() : readerSuppliedMemoryContentSize_;
+}
+
+void
+Store::ParsingBuffer::appended(const char * const newBytes, const size_t newByteCount)
+{
+    // a positive newByteCount guarantees that, after the first assertion below
+    // succeeds, the second assertion will not increment a nil memory() pointer
+    if (!newByteCount)
+        return;
+
+    // these checks order guarantees that memory() is not nil in the second assertion
+    assert(newByteCount <= spaceSize()); // the new bytes end in our space
+    assert(memory() + contentSize() == newBytes); // the new bytes start in our space
+    // and now we know that newBytes is not nil either
+
+    if (extraMemory_)
+        extraMemory_->rawAppendFinish(newBytes, newByteCount);
+    else
+        readerSuppliedMemoryContentSize_ = *IncreaseSum(readerSuppliedMemoryContentSize_, newByteCount);
+
+    assert(contentSize() <= capacity()); // paranoid
+}
+
+void
+Store::ParsingBuffer::consume(const size_t parsedBytes)
+{
+    Assure(contentSize() >= parsedBytes); // more conservative than extraMemory_->consume()
+    if (extraMemory_) {
+        extraMemory_->consume(parsedBytes);
+    } else {
+        readerSuppliedMemoryContentSize_ -= parsedBytes;
+        if (parsedBytes && readerSuppliedMemoryContentSize_)
+            memmove(readerSuppliedMemory_.data, memory() + parsedBytes, readerSuppliedMemoryContentSize_);
+    }
+}
+
+StoreIOBuffer
+Store::ParsingBuffer::space()
+{
+    const auto size = spaceSize();
+    const auto start = extraMemory_ ?
+                       extraMemory_->rawAppendStart(size) :
+                       (readerSuppliedMemory_.data + readerSuppliedMemoryContentSize_);
+    return StoreIOBuffer(spaceSize(), 0, start);
+}
+
+StoreIOBuffer
+Store::ParsingBuffer::makeSpace(const size_t pageSize)
+{
+    growSpace(pageSize);
+    auto result = space();
+    Assure(result.length >= pageSize);
+    result.length = pageSize;
+    return result;
+}
+
+StoreIOBuffer
+Store::ParsingBuffer::content() const
+{
+    // This const_cast is a StoreIOBuffer API limitation: That class does not
+    // support a "constant content view", even though it is used as such a view.
+    return StoreIOBuffer(contentSize(), 0, const_cast<char*>(memory()));
+}
+
+/// makes sure we have the requested number of bytes, allocates enough memory if needed
+void
+Store::ParsingBuffer::growSpace(const size_t minimumSpaceSize)
+{
+    const auto capacityIncreaseAttempt = IncreaseSum(contentSize(), minimumSpaceSize);
+    if (!capacityIncreaseAttempt)
+        throw TextException(ToSBuf("no support for a single memory block of ", contentSize(), '+', minimumSpaceSize, " bytes"), Here());
+    const auto newCapacity = *capacityIncreaseAttempt;
+
+    if (newCapacity <= capacity())
+        return; // already have enough space; no reallocation is needed
+
+    debugs(90, 7, "growing to provide " << minimumSpaceSize << " in " << *this);
+
+    if (extraMemory_) {
+        extraMemory_->reserveCapacity(newCapacity);
+    } else {
+        SBuf newStorage;
+        newStorage.reserveCapacity(newCapacity);
+        newStorage.append(readerSuppliedMemory_.data, readerSuppliedMemoryContentSize_);
+        extraMemory_ = std::move(newStorage);
+    }
+    Assure(spaceSize() >= minimumSpaceSize);
+}
+
+SBuf
+Store::ParsingBuffer::toSBuf() const
+{
+    return extraMemory_ ? *extraMemory_ : SBuf(content().data, content().length);
+}
+
+size_t
+Store::ParsingBuffer::spaceSize() const
+{
+    if (extraMemory_)
+        return extraMemory_->spaceSize();
+
+    assert(readerSuppliedMemoryContentSize_ <= readerSuppliedMemory_.length);
+    return readerSuppliedMemory_.length - readerSuppliedMemoryContentSize_;
+}
+
+/// 0-terminates stored byte sequence, allocating more memory if needed, but
+/// without increasing the number of stored content bytes
+void
+Store::ParsingBuffer::terminate()
+{
+    *makeSpace(1).data = 0;
+}
+
+StoreIOBuffer
+Store::ParsingBuffer::packBack()
+{
+    const auto bytesToPack = contentSize();
+    // until our callers do not have to work around legacy code expectations
+    Assure(bytesToPack);
+
+    // if we accumulated more bytes at some point, any extra metadata should
+    // have been consume()d by now, allowing readerSuppliedMemory_.data reuse
+    Assure(bytesToPack <= readerSuppliedMemory_.length);
+
+    auto result = readerSuppliedMemory_;
+    result.length = bytesToPack;
+    Assure(result.data);
+
+    if (!extraMemory_) {
+        // no accumulated bytes copying because they are in readerSuppliedMemory_
+        debugs(90, 7, "quickly exporting " << result.length << " bytes via " << readerSuppliedMemory_);
+    } else {
+        debugs(90, 7, "slowly exporting " << result.length << " bytes from " << extraMemory_->id << " back into " << readerSuppliedMemory_);
+        memmove(result.data, extraMemory_->rawContent(), result.length);
+    }
+
+    return result;
+}
+
+void
+Store::ParsingBuffer::print(std::ostream &os) const
+{
+    os << "size=" << contentSize();
+
+    if (extraMemory_) {
+        os << " capacity=" << capacity();
+        os << " extra=" << extraMemory_->id;
+    }
+
+    // report readerSuppliedMemory_ (if any) even if we are no longer using it
+    // for content storage; it affects packBack() and related parsing logic
+    if (readerSuppliedMemory_.length)
+        os << ' ' << readerSuppliedMemory_;
+}
+
diff -u -r -N squid-6.3/src/store/ParsingBuffer.h squid-6.4/src/store/ParsingBuffer.h
--- squid-6.3/src/store/ParsingBuffer.h	1970-01-01 12:00:00.000000000 +1200
+++ squid-6.4/src/store/ParsingBuffer.h	2023-10-21 11:40:41.000000000 +1300
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 1996-2023 The Squid Software Foundation and contributors
+ *
+ * Squid software is distributed under GPLv2+ license and includes
+ * contributions from numerous individuals and organizations.
+ * Please see the COPYING and CONTRIBUTORS files for details.
+ */
+
+#ifndef SQUID_SRC_STORE_PARSINGBUFFER_H
+#define SQUID_SRC_STORE_PARSINGBUFFER_H
+
+#include "sbuf/SBuf.h"
+#include "StoreIOBuffer.h"
+
+#include <optional>
+
+namespace Store
+{
+
+/// A continuous buffer for efficient accumulation and NUL-termination of
+/// Store-read bytes. The buffer accumulates two kinds of Store readers:
+///
+/// * Readers that do not have any external buffer to worry about but need to
+///   accumulate, terminate, and/or consume buffered content read by Store.
+///   These readers use the default constructor and then allocate the initial
+///   buffer space for their first read (if any).
+///
+/// * Readers that supply their StoreIOBuffer at construction time. That buffer
+///   is enough to handle the majority of use cases. However, the supplied
+///   StoreIOBuffer capacity may be exceeded when parsing requires accumulating
+///   multiple Store read results and/or NUL-termination of a full buffer.
+///
+/// This buffer seamlessly grows as needed, reducing memory over-allocation and,
+/// in case of StoreIOBuffer-seeded construction, memory copies.
+class ParsingBuffer
+{
+public:
+    /// creates buffer without any space or content
+    ParsingBuffer() = default;
+
+    /// seeds this buffer with the caller-supplied buffer space
+    explicit ParsingBuffer(StoreIOBuffer &);
+
+    /// a NUL-terminated version of content(); same lifetime as content()
+    const char *c_str() { terminate(); return memory(); }
+
+    /// export content() into SBuf, avoiding content copying when possible
+    SBuf toSBuf() const;
+
+    /// the total number of append()ed bytes that were not consume()d
+    size_t contentSize() const;
+
+    /// the number of bytes in the space() buffer
+    size_t spaceSize() const;
+
+    /// the maximum number of bytes we can store without allocating more space
+    size_t capacity() const;
+
+    /// Stored append()ed bytes that have not been consume()d. The returned
+    /// buffer offset is set to zero; the caller is responsible for adjusting
+    /// the offset if needed (TODO: Add/return a no-offset Mem::View instead).
+    /// The returned buffer is invalidated by calling a non-constant method or
+    /// by changing the StoreIOBuffer contents given to our constructor.
+    StoreIOBuffer content() const;
+
+    /// A (possibly empty) buffer for reading the next byte(s). The returned
+    /// buffer offset is set to zero; the caller is responsible for adjusting
+    /// the offset if needed (TODO: Add/return a no-offset Mem::Area instead).
+    /// The returned buffer is invalidated by calling a non-constant method or
+    /// by changing the StoreIOBuffer contents given to our constructor.
+    StoreIOBuffer space();
+
+    /// A buffer for reading the exact number of next byte(s). The method may
+    /// allocate new memory and copy previously appended() bytes as needed.
+    /// \param pageSize the exact number of bytes the caller wants to read
+    /// \returns space() after any necessary allocations
+    StoreIOBuffer makeSpace(size_t pageSize);
+
+    /// A buffer suitable for the first storeClientCopy() call. The method may
+    /// allocate new memory and copy previously appended() bytes as needed.
+    /// \returns space() after any necessary allocations
+    /// \deprecated New clients should call makeSpace() with client-specific
+    /// pageSize instead of this one-size-fits-all legacy method.
+    StoreIOBuffer makeInitialSpace() { return makeSpace(4096); }
+
+    /// remember the new bytes received into the previously provided space()
+    void appended(const char *, size_t);
+
+    /// get rid of previously appended() prefix of a given size
+    void consume(size_t);
+
+    /// Returns stored content, reusing the StoreIOBuffer given at the
+    /// construction time. Copying is avoided if we did not allocate extra
+    /// memory since construction. Not meant for default-constructed buffers.
+    /// \prec positive contentSize() (\sa store_client::finishCallback())
+    StoreIOBuffer packBack();
+
+    /// summarizes object state (for debugging)
+    void print(std::ostream &) const;
+
+private:
+    const char *memory() const;
+    void terminate();
+    void growSpace(size_t);
+
+private:
+    /// externally allocated buffer we were seeded with (or a zero-size one)
+    StoreIOBuffer readerSuppliedMemory_;
+
+    /// append()ed to readerSuppliedMemory_ bytes that were not consume()d
+    size_t readerSuppliedMemoryContentSize_ = 0;
+
+    /// our internal buffer that takes over readerSuppliedMemory_ when the
+    /// latter becomes full and more memory is needed
+    std::optional<SBuf> extraMemory_;
+};
+
+inline std::ostream &
+operator <<(std::ostream &os, const ParsingBuffer &b)
+{
+    b.print(os);
+    return os;
+}
+
+} // namespace Store
+
+#endif /* SQUID_SRC_STORE_PARSINGBUFFER_H */
+
diff -u -r -N squid-6.3/src/store.cc squid-6.4/src/store.cc
--- squid-6.3/src/store.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/store.cc	2023-10-21 11:40:41.000000000 +1300
@@ -35,6 +35,7 @@
 #include "mgr/StoreIoAction.h"
 #include "repl_modules.h"
 #include "RequestFlags.h"
+#include "sbuf/Stream.h"
 #include "SquidConfig.h"
 #include "StatCounters.h"
 #include "stmem.h"
@@ -256,6 +257,8 @@
 
     assert(mem_obj);
 
+    debugs(20, 7, *this << " inmem_lo=" << mem_obj->inmem_lo);
+
     if (mem_obj->inmem_lo)
         return STORE_DISK_CLIENT;
 
@@ -283,6 +286,7 @@
                 return STORE_MEM_CLIENT;
             }
         }
+        debugs(20, 7, "STORE_OK STORE_DISK_CLIENT");
         return STORE_DISK_CLIENT;
     }
 
@@ -302,10 +306,18 @@
     if (swap_status == SWAPOUT_NONE)
         return STORE_MEM_CLIENT;
 
+    // TODO: The above "must make this a mem client" logic contradicts "Slight
+    // weirdness" logic in store_client::doCopy() that converts hits to misses
+    // on startSwapin() failures. We should probably attempt to open a swapin
+    // file _here_ instead (and avoid STORE_DISK_CLIENT designation for clients
+    // that fail to do so). That would also address a similar problem with Rock
+    // store that does not yet support swapin during SWAPOUT_WRITING.
+
     /*
      * otherwise, make subsequent clients read from disk so they
      * can not delay the first, and vice-versa.
      */
+    debugs(20, 7, "STORE_PENDING STORE_DISK_CLIENT");
     return STORE_DISK_CLIENT;
 }
 
@@ -1431,8 +1443,14 @@
     // update reply before calling timestampsSet() below
     const auto &oldReply = mem_obj->freshestReply();
     const auto updatedReply = oldReply.recreateOnNotModified(e304.mem_obj->baseReply());
-    if (updatedReply) // HTTP 304 brought in new information
+    if (updatedReply) { // HTTP 304 brought in new information
+        if (updatedReply->prefixLen() > Config.maxReplyHeaderSize) {
+            throw TextException(ToSBuf("cannot update the cached response because its updated ",
+                                       updatedReply->prefixLen(), "-byte header would exceed ",
+                                       Config.maxReplyHeaderSize, "-byte reply_header_max_size"), Here());
+        }
         mem_obj->updateReply(*updatedReply);
+    }
     // else continue to use the previous update, if any
 
     if (!timestampsSet() && !updatedReply)
diff -u -r -N squid-6.3/src/store_client.cc squid-6.4/src/store_client.cc
--- squid-6.3/src/store_client.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/store_client.cc	2023-10-21 11:40:41.000000000 +1300
@@ -19,7 +19,9 @@
 #include "MemBuf.h"
 #include "MemObject.h"
 #include "mime_header.h"
+#include "sbuf/Stream.h"
 #include "SquidConfig.h"
+#include "SquidMath.h"
 #include "StatCounters.h"
 #include "Store.h"
 #include "store/SwapMetaIn.h"
@@ -139,20 +141,6 @@
     return sc;
 }
 
-/// schedules asynchronous STCB call to relay disk or memory read results
-/// \param outcome an error signal (if negative), an EOF signal (if zero), or the number of bytes read
-void
-store_client::callback(const ssize_t outcome)
-{
-    if (outcome > 0)
-        return noteCopiedBytes(outcome);
-
-    if (outcome < 0)
-        return fail();
-
-    noteEof();
-}
-
 /// finishCallback() wrapper; TODO: Add NullaryMemFunT for non-jobs.
 void
 store_client::FinishCallback(store_client * const sc)
@@ -167,54 +155,38 @@
     Assure(_callback.callback_handler);
     Assure(_callback.notifier);
 
-    // callers are not ready to handle a content+error combination
-    Assure(object_ok || !copiedSize);
-
-    StoreIOBuffer result(copiedSize, copyInto.offset, copyInto.data);
+    // XXX: Some legacy code relies on zero-length buffers having nil data
+    // pointers. Some other legacy code expects "correct" result.offset even
+    // when there is no body to return. Accommodate all those expectations.
+    auto result = StoreIOBuffer(0, copyInto.offset, nullptr);
+    if (object_ok && parsingBuffer && parsingBuffer->contentSize())
+        result = parsingBuffer->packBack();
     result.flags.error = object_ok ? 0 : 1;
-    copiedSize = 0;
 
-    cmp_offset = result.offset + result.length;
+    // no HTTP headers and no body bytes (but not because there was no space)
+    atEof_ = !sendingHttpHeaders() && !result.length && copyInto.length;
+
+    parsingBuffer.reset();
+    ++answers;
+
     STCB *temphandler = _callback.callback_handler;
-    void *cbdata = _callback.callback_data;
-    _callback = Callback(nullptr, nullptr);
+    const auto cbdata = _callback.cbData.validDone();
+    _callback = Callback();
     copyInto.data = nullptr;
 
-    if (cbdataReferenceValid(cbdata))
+    if (cbdata)
         temphandler(cbdata, result);
-
-    cbdataReferenceDone(cbdata);
-}
-
-/// schedules asynchronous STCB call to relay a successful disk or memory read
-/// \param bytesCopied the number of response bytes copied into copyInto
-void
-store_client::noteCopiedBytes(const size_t bytesCopied)
-{
-    debugs(90, 5, bytesCopied);
-    Assure(bytesCopied > 0);
-    Assure(!copiedSize);
-    copiedSize = bytesCopied;
-    noteNews();
-}
-
-void
-store_client::noteEof()
-{
-    debugs(90, 5, copiedSize);
-    Assure(!copiedSize);
-    noteNews();
 }
 
 store_client::store_client(StoreEntry *e) :
-    cmp_offset(0),
 #if STORE_CLIENT_LIST_DEBUG
     owner(cbdataReference(data)),
 #endif
     entry(e),
     type(e->storeClientType()),
     object_ok(true),
-    copiedSize(0)
+    atEof_(false),
+    answers(0)
 {
     Assure(entry);
     entry->lock("store_client");
@@ -269,16 +241,29 @@
 #endif
 
     assert(!_callback.pending());
-#if ONLYCONTIGUOUSREQUESTS
-
-    assert(cmp_offset == copyRequest.offset);
-#endif
-    /* range requests will skip into the body */
-    cmp_offset = copyRequest.offset;
-    _callback = Callback (callback_fn, cbdataReference(data));
+    _callback = Callback(callback_fn, data);
     copyInto.data = copyRequest.data;
     copyInto.length = copyRequest.length;
     copyInto.offset = copyRequest.offset;
+    Assure(copyInto.offset >= 0);
+
+    if (!copyInto.length) {
+        // During the first storeClientCopy() call, a zero-size buffer means
+        // that we will have to drop any HTTP response body bytes we read (with
+        // the HTTP headers from disk). After that, it means we cannot return
+        // anything to the caller at all.
+        debugs(90, 2, "WARNING: zero-size storeClientCopy() buffer: " << copyInto);
+        // keep going; moreToRead() should prevent any from-Store reading
+    }
+
+    // Our nextHttpReadOffset() expects the first copy() call to have zero
+    // offset. More complex code could handle a positive first offset, but it
+    // would only be useful when reading responses from memory: We would not
+    // _delay_ the response (to read the requested HTTP body bytes from disk)
+    // when we already can respond with HTTP headers.
+    Assure(!copyInto.offset || answeredOnce());
+
+    parsingBuffer.emplace(copyInto);
 
     static bool copying (false);
     assert (!copying);
@@ -304,33 +289,30 @@
     // Add no code here. This object may no longer exist.
 }
 
-/// Whether there is (or will be) more entry data for us.
+/// Whether Store has (or possibly will have) more entry data for us.
 bool
-store_client::moreToSend() const
+store_client::moreToRead() const
 {
+    if (!copyInto.length)
+        return false; // the client supplied a zero-size buffer
+
     if (entry->store_status == STORE_PENDING)
         return true; // there may be more coming
 
     /* STORE_OK, including aborted entries: no more data is coming */
 
-    const int64_t len = entry->objectLen();
-
-    // If we do not know the entry length, then we have to open the swap file.
-    const bool canSwapIn = entry->hasDisk();
-    if (len < 0)
-        return canSwapIn;
+    if (canReadFromMemory())
+        return true; // memory has the first byte wanted by the client
 
-    if (copyInto.offset >= len)
-        return false; // sent everything there is
+    if (!entry->hasDisk())
+        return false; // cannot read anything from disk either
 
-    if (canSwapIn)
-        return true; // if we lack prefix, we can swap it in
+    if (entry->objectLen() >= 0 && copyInto.offset >= entry->contentLen())
+        return false; // the disk cannot have byte(s) wanted by the client
 
-    // If we cannot swap in, make sure we have what we want in RAM. Otherwise,
-    // scheduleRead calls scheduleDiskRead which asserts without a swap file.
-    const MemObject *mem = entry->mem_obj;
-    return mem &&
-           mem->inmem_lo <= copyInto.offset && copyInto.offset < mem->endOffset();
+    // we cannot be sure until we swap in metadata and learn contentLen(),
+    // but the disk may have the byte(s) wanted by the client
+    return true;
 }
 
 static void
@@ -357,6 +339,14 @@
     sc->doCopy(e);
 }
 
+/// Whether our answer, if sent right now, will announce the availability of
+/// HTTP response headers (to the STCB callback) for the first time.
+bool
+store_client::sendingHttpHeaders() const
+{
+    return !answeredOnce() && entry->mem().baseReply().hdr_sz > 0;
+}
+
 void
 store_client::doCopy(StoreEntry *anEntry)
 {
@@ -368,20 +358,22 @@
     flags.store_copying = true;
     MemObject *mem = entry->mem_obj;
 
-    debugs(33, 5, "store_client::doCopy: co: " <<
-           copyInto.offset << ", hi: " <<
-           mem->endOffset());
+    debugs(33, 5, this << " into " << copyInto <<
+           " hi: " << mem->endOffset() <<
+           " objectLen: " << entry->objectLen() <<
+           " past_answers: " << answers);
+
+    const auto sendHttpHeaders = sendingHttpHeaders();
 
-    if (!moreToSend()) {
+    if (!sendHttpHeaders && !moreToRead()) {
         /* There is no more to send! */
         debugs(33, 3, "There is no more to send!");
-        noteEof();
+        noteNews();
         flags.store_copying = false;
         return;
     }
 
-    /* Check that we actually have data */
-    if (anEntry->store_status == STORE_PENDING && copyInto.offset >= mem->endOffset()) {
+    if (!sendHttpHeaders && anEntry->store_status == STORE_PENDING && nextHttpReadOffset() >= mem->endOffset()) {
         debugs(90, 3, "store_client::doCopy: Waiting for more");
         flags.store_copying = false;
         return;
@@ -403,7 +395,24 @@
         if (!startSwapin())
             return; // failure
     }
-    scheduleRead();
+
+    // send any immediately available body bytes even if we also sendHttpHeaders
+    if (canReadFromMemory()) {
+        readFromMemory();
+        noteNews(); // will sendHttpHeaders (if needed) as well
+        flags.store_copying = false;
+        return;
+    }
+
+    if (sendHttpHeaders) {
+        debugs(33, 5, "just send HTTP headers: " << mem->baseReply().hdr_sz);
+        noteNews();
+        flags.store_copying = false;
+        return;
+    }
+
+    // no information that the client needs is available immediately
+    scheduleDiskRead();
 }
 
 /// opens the swapin "file" if possible; otherwise, fail()s and returns false
@@ -443,18 +452,7 @@
     if (error)
         fail();
     else
-        noteEof();
-}
-
-void
-store_client::scheduleRead()
-{
-    MemObject *mem = entry->mem_obj;
-
-    if (copyInto.offset >= mem->inmem_lo && copyInto.offset < mem->endOffset())
-        scheduleMemRead();
-    else
-        scheduleDiskRead();
+        noteNews();
 }
 
 void
@@ -479,15 +477,44 @@
     flags.store_copying = false;
 }
 
+/// whether at least one byte wanted by the client is in memory
+bool
+store_client::canReadFromMemory() const
+{
+    const auto &mem = entry->mem();
+    const auto memReadOffset = nextHttpReadOffset();
+    return mem.inmem_lo <= memReadOffset && memReadOffset < mem.endOffset() &&
+           parsingBuffer->spaceSize();
+}
+
+/// The offset of the next stored HTTP response byte wanted by the client.
+int64_t
+store_client::nextHttpReadOffset() const
+{
+    Assure(parsingBuffer);
+    const auto &mem = entry->mem();
+    const auto hdr_sz = mem.baseReply().hdr_sz;
+    // Certain SMP cache manager transactions do not store HTTP headers in
+    // mem_hdr; they store just a kid-specific piece of the future report body.
+    // In such cases, hdr_sz ought to be zero. In all other (known) cases,
+    // mem_hdr contains HTTP response headers (positive hdr_sz if parsed)
+    // followed by HTTP response body. This code math accommodates all cases.
+    return NaturalSum<int64_t>(hdr_sz, copyInto.offset, parsingBuffer->contentSize()).value();
+}
+
+/// Copies at least some of the requested body bytes from MemObject memory,
+/// satisfying the copy() request.
+/// \pre canReadFromMemory() is true
 void
-store_client::scheduleMemRead()
+store_client::readFromMemory()
 {
-    /* What the client wants is in memory */
-    /* Old style */
-    debugs(90, 3, "store_client::doCopy: Copying normal from memory");
-    const auto sz = entry->mem_obj->data_hdr.copy(copyInto); // may be <= 0 per copy() API
-    callback(sz);
-    flags.store_copying = false;
+    Assure(parsingBuffer);
+    const auto readInto = parsingBuffer->space().positionAt(nextHttpReadOffset());
+
+    debugs(90, 3, "copying HTTP body bytes from memory into " << readInto);
+    const auto sz = entry->mem_obj->data_hdr.copy(readInto);
+    Assure(sz > 0); // our canReadFromMemory() precondition guarantees that
+    parsingBuffer->appended(readInto.data, sz);
 }
 
 void
@@ -499,52 +526,132 @@
     assert(!flags.disk_io_pending);
     flags.disk_io_pending = true;
 
+    // mem->swap_hdr_sz is zero here during initial read(s)
+    const auto nextStoreReadOffset = NaturalSum<int64_t>(mem->swap_hdr_sz, nextHttpReadOffset()).value();
+
+    // XXX: If fileRead() is called when we do not yet know mem->swap_hdr_sz,
+    // then we must start reading from disk offset zero to learn it: we cannot
+    // compute correct HTTP response start offset on disk without it. However,
+    // late startSwapin() calls imply that the assertion below might fail.
+    Assure(mem->swap_hdr_sz > 0 || !nextStoreReadOffset);
+
+    // TODO: Remove this assertion. Introduced in 1998 commit 3157c72, it
+    // assumes that swapped out memory is freed unconditionally, but we no
+    // longer do that because trimMemory() path checks lowestMemReaderOffset().
+    // It is also misplaced: We are not swapping out anything here and should
+    // not care about any swapout invariants.
     if (mem->swap_hdr_sz != 0)
         if (entry->swappingOut())
-            assert(mem->swapout.sio->offset() > copyInto.offset + (int64_t)mem->swap_hdr_sz);
+            assert(mem->swapout.sio->offset() > nextStoreReadOffset);
+
+    // XXX: We should let individual cache_dirs limit the read size instead, but
+    // we cannot do that without more fixes and research because:
+    // * larger reads corrupt responses when cache_dir uses SharedMemory::get();
+    // * we do not know how to find all I/O code that assumes this limit;
+    // * performance effects of larger disk reads may be negative somewhere.
+    const decltype(StoreIOBuffer::length) maxReadSize = SM_PAGE_SIZE;
+
+    Assure(parsingBuffer);
+    // also, do not read more than we can return (via a copyInto.length buffer)
+    const auto readSize = std::min(copyInto.length, maxReadSize);
+    lastDiskRead = parsingBuffer->makeSpace(readSize).positionAt(nextStoreReadOffset);
+    debugs(90, 5, "into " << lastDiskRead);
 
     storeRead(swapin_sio,
-              copyInto.data,
-              copyInto.length,
-              copyInto.offset + mem->swap_hdr_sz,
+              lastDiskRead.data,
+              lastDiskRead.length,
+              lastDiskRead.offset,
               mem->swap_hdr_sz == 0 ? storeClientReadHeader
               : storeClientReadBody,
               this);
 }
 
 void
-store_client::readBody(const char *, ssize_t len)
+store_client::readBody(const char * const buf, const ssize_t lastIoResult)
 {
-    // Don't assert disk_io_pending here.. may be called by read_header
+    Assure(flags.disk_io_pending);
     flags.disk_io_pending = false;
     assert(_callback.pending());
-    debugs(90, 3, "storeClientReadBody: len " << len << "");
+    Assure(parsingBuffer);
+    debugs(90, 3, "got " << lastIoResult << " using " << *parsingBuffer);
 
-    if (len < 0)
+    if (lastIoResult < 0)
         return fail();
 
-    const auto rep = entry->mem_obj ? &entry->mem().baseReply() : nullptr;
-    if (copyInto.offset == 0 && len > 0 && rep && rep->sline.status() == Http::scNone) {
-        /* Our structure ! */
-        if (!entry->mem_obj->adjustableBaseReply().parseCharBuf(copyInto.data, headersEnd(copyInto.data, len))) {
-            debugs(90, DBG_CRITICAL, "ERROR: Could not parse headers from on disk object");
-        }
+    if (!lastIoResult) {
+        if (answeredOnce())
+            return noteNews();
+
+        debugs(90, DBG_CRITICAL, "ERROR: Truncated HTTP headers in on-disk object");
+        return fail();
     }
 
-    if (len > 0 && rep && entry->mem_obj->inmem_lo == 0 && entry->objectLen() <= (int64_t)Config.Store.maxInMemObjSize && Config.onoff.memory_cache_disk) {
-        storeGetMemSpace(len);
+    assert(lastDiskRead.data == buf);
+    lastDiskRead.length = lastIoResult;
+
+    parsingBuffer->appended(buf, lastIoResult);
+
+    // we know swap_hdr_sz by now and were reading beyond swap metadata because
+    // readHead() would have been called otherwise (to read swap metadata)
+    const auto swap_hdr_sz = entry->mem().swap_hdr_sz;
+    Assure(swap_hdr_sz > 0);
+    Assure(!Less(lastDiskRead.offset, swap_hdr_sz));
+
+    // Map lastDiskRead (i.e. the disk area we just read) to an HTTP reply part.
+    // The bytes are the same, but disk and HTTP offsets differ by swap_hdr_sz.
+    const auto httpOffset = lastDiskRead.offset - swap_hdr_sz;
+    const auto httpPart = StoreIOBuffer(lastDiskRead).positionAt(httpOffset);
+
+    maybeWriteFromDiskToMemory(httpPart);
+    handleBodyFromDisk();
+}
+
+/// de-serializes HTTP response (partially) read from disk storage
+void
+store_client::handleBodyFromDisk()
+{
+    // We cannot de-serialize on-disk HTTP response without MemObject because
+    // without MemObject::swap_hdr_sz we cannot know where that response starts.
+    Assure(entry->mem_obj);
+    Assure(entry->mem_obj->swap_hdr_sz > 0);
+
+    if (!answeredOnce()) {
+        // All on-disk responses have HTTP headers. First disk body read(s)
+        // include HTTP headers that we must parse (if needed) and skip.
+        const auto haveHttpHeaders = entry->mem_obj->baseReply().pstate == Http::Message::psParsed;
+        if (!haveHttpHeaders && !parseHttpHeadersFromDisk())
+            return;
+        skipHttpHeadersFromDisk();
+    }
+
+    noteNews();
+}
+
+/// Adds HTTP response data loaded from disk to the memory cache (if
+/// needed/possible). The given part may contain portions of HTTP response
+/// headers and/or HTTP response body.
+void
+store_client::maybeWriteFromDiskToMemory(const StoreIOBuffer &httpResponsePart)
+{
+    // XXX: Reject [memory-]uncachable/unshareable responses instead of assuming
+    // that an HTTP response should be written to MemObject's data_hdr (and that
+    // it may purge already cached entries) just because it "fits" and was
+    // loaded from disk. For example, this response may already be marked for
+    // release. The (complex) cachability decision(s) should be made outside
+    // (and obeyed by) this low-level code.
+    if (httpResponsePart.length && entry->mem_obj->inmem_lo == 0 && entry->objectLen() <= (int64_t)Config.Store.maxInMemObjSize && Config.onoff.memory_cache_disk) {
+        storeGetMemSpace(httpResponsePart.length);
+        // XXX: This "recheck" is not needed because storeGetMemSpace() cannot
+        // purge mem_hdr bytes of a locked entry, and we do lock ours. And
+        // inmem_lo offset itself should not be relevant to appending new bytes.
+        //
         // recheck for the above call may purge entry's data from the memory cache
         if (entry->mem_obj->inmem_lo == 0) {
-            /* Copy read data back into memory.
-             * copyInto.offset includes headers, which is what mem cache needs
-             */
-            if (copyInto.offset == entry->mem_obj->endOffset()) {
-                entry->mem_obj->write(StoreIOBuffer(len, copyInto.offset, copyInto.data));
-            }
+            // XXX: This code assumes a non-shared memory cache.
+            if (httpResponsePart.offset == entry->mem_obj->endOffset())
+                entry->mem_obj->write(httpResponsePart);
         }
     }
-
-    callback(len);
 }
 
 void
@@ -613,41 +720,25 @@
     if (!object_ok)
         return;
 
+    Assure(parsingBuffer);
+    debugs(90, 3, "got " << len << " using " << *parsingBuffer);
+
     if (len < 0)
         return fail();
 
     try {
+        Assure(!parsingBuffer->contentSize());
+        parsingBuffer->appended(buf, len);
         Store::UnpackHitSwapMeta(buf, len, *entry);
+        parsingBuffer->consume(mem->swap_hdr_sz);
     } catch (...) {
         debugs(90, DBG_IMPORTANT, "ERROR: Failed to unpack Store entry metadata: " << CurrentException);
         fail();
         return;
     }
 
-    /*
-     * If our last read got some data the client wants, then give
-     * it to them, otherwise schedule another read.
-     */
-    size_t body_sz = len - mem->swap_hdr_sz;
-
-    if (copyInto.offset < static_cast<int64_t>(body_sz)) {
-        /*
-         * we have (part of) what they want
-         */
-        size_t copy_sz = min(copyInto.length, body_sz);
-        debugs(90, 3, "storeClientReadHeader: copying " << copy_sz << " bytes of body");
-        memmove(copyInto.data, copyInto.data + mem->swap_hdr_sz, copy_sz);
-
-        readBody(copyInto.data, copy_sz);
-
-        return;
-    }
-
-    /*
-     * we don't have what the client wants, but at least we now
-     * know the swap header size.
-     */
-    fileRead();
+    maybeWriteFromDiskToMemory(parsingBuffer->content());
+    handleBodyFromDisk();
 }
 
 /*
@@ -893,13 +984,70 @@
     return true;
 }
 
+/// parses HTTP header bytes loaded from disk
+/// \returns false if fail() or scheduleDiskRead() has been called and, hence,
+/// the caller should just quit without any further action
+bool
+store_client::parseHttpHeadersFromDisk()
+{
+    try {
+        return tryParsingHttpHeaders();
+    } catch (...) {
+        // XXX: Our parser enforces Config.maxReplyHeaderSize limit, but our
+        // packer does not. Since packing might increase header size, we may
+        // cache a header that we cannot parse and get here. Same for MemStore.
+        debugs(90, DBG_CRITICAL, "ERROR: Cannot parse on-disk HTTP headers" <<
+               Debug::Extra << "exception: " << CurrentException <<
+               Debug::Extra << "raw input size: " << parsingBuffer->contentSize() << " bytes" <<
+               Debug::Extra << "current buffer capacity: " << parsingBuffer->capacity() << " bytes");
+        fail();
+        return false;
+    }
+}
+
+/// parseHttpHeadersFromDisk() helper
+/// \copydoc parseHttpHeaders()
+bool
+store_client::tryParsingHttpHeaders()
+{
+    Assure(parsingBuffer);
+    Assure(!copyInto.offset); // otherwise, parsingBuffer cannot have HTTP response headers
+    auto &adjustableReply = entry->mem().adjustableBaseReply();
+    if (adjustableReply.parseTerminatedPrefix(parsingBuffer->c_str(), parsingBuffer->contentSize()))
+        return true;
+
+    // TODO: Optimize by checking memory as well. For simplicity sake, we
+    // continue on the disk-reading path, but readFromMemory() can give us the
+    // missing header bytes immediately if a concurrent request put those bytes
+    // into memory while we were waiting for our disk response.
+    scheduleDiskRead();
+    return false;
+}
+
+/// skips HTTP header bytes previously loaded from disk
+void
+store_client::skipHttpHeadersFromDisk()
+{
+    const auto hdr_sz = entry->mem_obj->baseReply().hdr_sz;
+    Assure(hdr_sz > 0); // all on-disk responses have HTTP headers
+    if (Less(parsingBuffer->contentSize(), hdr_sz)) {
+        debugs(90, 5, "discovered " << hdr_sz << "-byte HTTP headers in memory after reading some of them from disk: " << *parsingBuffer);
+        parsingBuffer->consume(parsingBuffer->contentSize()); // skip loaded HTTP header prefix
+    } else {
+        parsingBuffer->consume(hdr_sz); // skip loaded HTTP headers
+        const auto httpBodyBytesAfterHeader = parsingBuffer->contentSize(); // may be zero
+        Assure(httpBodyBytesAfterHeader <= copyInto.length);
+        debugs(90, 5, "read HTTP body prefix: " << httpBodyBytesAfterHeader);
+    }
+}
+
 void
 store_client::dumpStats(MemBuf * output, int clientNumber) const
 {
     if (_callback.pending())
         return;
 
-    output->appendf("\tClient #%d, %p\n", clientNumber, _callback.callback_data);
+    output->appendf("\tClient #%d, %p\n", clientNumber, this);
     output->appendf("\t\tcopy_offset: %" PRId64 "\n", copyInto.offset);
     output->appendf("\t\tcopy_size: %" PRIuSIZE "\n", copyInto.length);
     output->append("\t\tflags:", 8);
@@ -924,7 +1072,7 @@
 
 store_client::Callback::Callback(STCB *function, void *data):
     callback_handler(function),
-    callback_data(data),
+    cbData(data),
     codeContext(CodeContext::Current())
 {
 }
diff -u -r -N squid-6.3/src/StoreClient.h squid-6.4/src/StoreClient.h
--- squid-6.3/src/StoreClient.h	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/StoreClient.h	2023-10-21 11:40:41.000000000 +1300
@@ -13,10 +13,23 @@
 #include "base/AsyncCall.h"
 #include "base/forward.h"
 #include "dlink.h"
+#include "store/ParsingBuffer.h"
 #include "StoreIOBuffer.h"
 #include "StoreIOState.h"
 
-typedef void STCB(void *, StoreIOBuffer);   /* store callback */
+/// A storeClientCopy() callback function.
+///
+/// Upon storeClientCopy() success, StoreIOBuffer::flags.error is zero, and
+/// * HTTP response headers (if any) are available via MemObject::freshestReply();
+/// * HTTP response body bytes (if any) are available via StoreIOBuffer.
+///
+/// STCB callbacks may use response semantics to detect certain EOF conditions.
+/// Callbacks that expect HTTP headers may call store_client::atEof(). Similar
+/// to clientStreamCallback() callbacks, callbacks dedicated to receiving HTTP
+/// bodies may use zero StoreIOBuffer::length as an EOF condition.
+///
+/// Errors are indicated by setting StoreIOBuffer flags.error.
+using STCB = void (void *, StoreIOBuffer);
 
 class StoreEntry;
 class ACLFilledChecklist;
@@ -88,7 +101,13 @@
 
     void dumpStats(MemBuf * output, int clientNumber) const;
 
-    int64_t cmp_offset;
+    // TODO: When STCB gets a dedicated Answer type, move this info there.
+    /// Whether the last successful storeClientCopy() answer was known to
+    /// contain the last body bytes of the HTTP response
+    /// \retval true requesting bytes at higher offsets is futile
+    /// \sa STCB
+    bool atEof() const { return atEof_; }
+
 #if STORE_CLIENT_LIST_DEBUG
 
     void *owner;
@@ -123,18 +142,27 @@
     dlink_node node;
 
 private:
-    bool moreToSend() const;
+    bool moreToRead() const;
+    bool canReadFromMemory() const;
+    bool answeredOnce() const { return answers >= 1; }
+    bool sendingHttpHeaders() const;
+    int64_t nextHttpReadOffset() const;
 
     void fileRead();
     void scheduleDiskRead();
-    void scheduleMemRead();
+    void readFromMemory();
     void scheduleRead();
     bool startSwapin();
+    void handleBodyFromDisk();
+    void maybeWriteFromDiskToMemory(const StoreIOBuffer &);
+
+    bool parseHttpHeadersFromDisk();
+    bool tryParsingHttpHeaders();
+    void skipHttpHeadersFromDisk();
 
     void fail();
     void callback(ssize_t);
     void noteCopiedBytes(size_t);
-    void noteEof();
     void noteNews();
     void finishCallback();
     static void FinishCallback(store_client *);
@@ -142,20 +170,30 @@
     int type;
     bool object_ok;
 
+    /// \copydoc atEof()
+    bool atEof_;
+
     /// Storage and metadata associated with the current copy() request. Ought
     /// to be ignored when not answering a copy() request.
     StoreIOBuffer copyInto;
 
-    /// The number of bytes loaded from Store into copyInto while answering the
-    /// current copy() request. Ought to be ignored when not answering.
-    size_t copiedSize;
+    /// the total number of finishCallback() calls
+    uint64_t answers;
+
+    /// Accumulates raw bytes read from Store while answering the current copy()
+    /// request. Buffer contents depends on the source and parsing stage; it may
+    /// hold (parts of) swap metadata, HTTP response headers, and/or HTTP
+    /// response body bytes.
+    std::optional<Store::ParsingBuffer> parsingBuffer;
+
+    StoreIOBuffer lastDiskRead; ///< buffer used for the last storeRead() call
 
     /* Until we finish stuffing code into store_client */
 
 public:
 
     struct Callback {
-        Callback ():callback_handler(nullptr), callback_data(nullptr) {}
+        Callback() = default;
 
         Callback (STCB *, void *);
 
@@ -164,8 +202,8 @@
         /// delivery to the STCB callback_handler.
         bool pending() const;
 
-        STCB *callback_handler;
-        void *callback_data;
+        STCB *callback_handler = nullptr; ///< where to deliver the answer
+        CallbackData cbData; ///< the first STCB callback parameter
         CodeContextPointer codeContext; ///< Store client context
 
         /// a scheduled asynchronous finishCallback() call (or nil)
@@ -173,7 +211,18 @@
     } _callback;
 };
 
+/// Asynchronously read HTTP response headers and/or body bytes from Store.
+///
+/// The requested zero-based HTTP body offset is specified via the
+/// StoreIOBuffer::offset field. The first call (for a given store_client
+/// object) must specify zero offset.
+///
+/// The requested HTTP body portion size is specified via the
+/// StoreIOBuffer::length field. The function may return fewer body bytes.
+///
+/// See STCB for result delivery details.
 void storeClientCopy(store_client *, StoreEntry *, StoreIOBuffer, STCB *, void *);
+
 store_client* storeClientListAdd(StoreEntry * e, void *data);
 int storeUnregister(store_client * sc, StoreEntry * e, void *data);
 int storePendingNClients(const StoreEntry * e);
diff -u -r -N squid-6.3/src/StoreIOBuffer.h squid-6.4/src/StoreIOBuffer.h
--- squid-6.3/src/StoreIOBuffer.h	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/StoreIOBuffer.h	2023-10-21 11:40:41.000000000 +1300
@@ -43,6 +43,9 @@
         return Range<int64_t>(offset, offset + length);
     }
 
+    /// convenience method for changing the offset of a being-configured buffer
+    StoreIOBuffer &positionAt(const int64_t newOffset) { offset = newOffset; return *this; }
+
     void dump() const {
         if (fwrite(data, length, 1, stderr)) {}
         if (fwrite("\n", 1, 1, stderr)) {}
diff -u -r -N squid-6.3/src/tests/stub_HttpReply.cc squid-6.4/src/tests/stub_HttpReply.cc
--- squid-6.3/src/tests/stub_HttpReply.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/tests/stub_HttpReply.cc	2023-10-21 11:40:41.000000000 +1300
@@ -24,6 +24,8 @@
 bool HttpReply::sanityCheckStartLine(const char *, const size_t, Http::StatusCode *) STUB_RETVAL(false)
 int HttpReply::httpMsgParseError() STUB_RETVAL(0)
 bool HttpReply::expectingBody(const HttpRequestMethod&, int64_t&) const STUB_RETVAL(false)
+size_t HttpReply::parseTerminatedPrefix(const char *, size_t) STUB_RETVAL(0)
+size_t HttpReply::prefixLen() const STUB_RETVAL(0)
 bool HttpReply::parseFirstLine(const char *, const char *) STUB_RETVAL(false)
 void HttpReply::hdrCacheInit() STUB
 HttpReply * HttpReply::clone() const STUB_RETVAL(nullptr)
diff -u -r -N squid-6.3/src/tests/stub_libhttp.cc squid-6.4/src/tests/stub_libhttp.cc
--- squid-6.3/src/tests/stub_libhttp.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/tests/stub_libhttp.cc	2023-10-21 11:40:41.000000000 +1300
@@ -92,6 +92,7 @@
 void StatusLine::clean() STUB
 void StatusLine::set(const AnyP::ProtocolVersion &, Http::StatusCode, const char *) STUB
 const char *StatusLine::reason() const STUB_RETVAL(nullptr)
+size_t StatusLine::packedLength() const STUB_RETVAL(0)
 void StatusLine::packInto(Packable *) const STUB
 bool StatusLine::parse(const String &, const char *, const char *) STUB_RETVAL(false)
 }
diff -u -r -N squid-6.3/src/tests/stub_libstore.cc squid-6.4/src/tests/stub_libstore.cc
--- squid-6.3/src/tests/stub_libstore.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/tests/stub_libstore.cc	2023-10-21 11:40:41.000000000 +1300
@@ -41,7 +41,7 @@
 void Controller::handleIdleEntry(StoreEntry &) STUB
 void Controller::freeMemorySpace(const int) STUB
 void Controller::memoryOut(StoreEntry &, const bool) STUB
-void Controller::updateOnNotModified(StoreEntry *, StoreEntry &) STUB
+bool Controller::updateOnNotModified(StoreEntry *, StoreEntry &) STUB
 bool Controller::allowCollapsing(StoreEntry *, const RequestFlags &, const HttpRequestMethod &) STUB_RETVAL(false)
 void Controller::addReading(StoreEntry *, const cache_key *) STUB
 void Controller::addWriting(StoreEntry *, const cache_key *) STUB
diff -u -r -N squid-6.3/src/Transients.cc squid-6.4/src/Transients.cc
--- squid-6.3/src/Transients.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/Transients.cc	2023-10-21 11:40:41.000000000 +1300
@@ -409,7 +409,7 @@
     TransientsMap::Owner *mapOwner = nullptr;
 };
 
-RunnerRegistrationEntry(TransientsRr);
+DefineRunnerRegistrator(TransientsRr);
 
 void
 TransientsRr::useConfig()
diff -u -r -N squid-6.3/src/urn.cc squid-6.4/src/urn.cc
--- squid-6.3/src/urn.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/src/urn.cc	2023-10-21 11:40:41.000000000 +1300
@@ -27,8 +27,6 @@
 #include "tools.h"
 #include "urn.h"
 
-#define URN_REQBUF_SZ   4096
-
 class UrnState : public StoreClient
 {
     CBDATA_CLASS(UrnState);
@@ -48,8 +46,8 @@
     HttpRequest::Pointer urlres_r;
     AccessLogEntry::Pointer ale; ///< details of the requesting transaction
 
-    char reqbuf[URN_REQBUF_SZ] = { '\0' };
-    int reqofs = 0;
+    /// for receiving a URN resolver reply body from Store and interpreting it
+    Store::ParsingBuffer parsingBuffer;
 
 private:
     /* StoreClient API */
@@ -70,7 +68,7 @@
 } url_entry;
 
 static STCB urnHandleReply;
-static url_entry *urnParseReply(const char *inbuf, const HttpRequestMethod&);
+static url_entry *urnParseReply(const SBuf &, const HttpRequestMethod &);
 static const char *const crlf = "\r\n";
 
 CBDATA_CLASS_INIT(UrnState);
@@ -189,13 +187,8 @@
         sc = storeClientListAdd(urlres_e, this);
     }
 
-    reqofs = 0;
-    StoreIOBuffer tempBuffer;
-    tempBuffer.offset = reqofs;
-    tempBuffer.length = URN_REQBUF_SZ;
-    tempBuffer.data = reqbuf;
     storeClientCopy(sc, urlres_e,
-                    tempBuffer,
+                    parsingBuffer.makeInitialSpace(),
                     urnHandleReply,
                     this);
 }
@@ -237,19 +230,14 @@
     UrnState *urnState = static_cast<UrnState *>(data);
     StoreEntry *e = urnState->entry;
     StoreEntry *urlres_e = urnState->urlres_e;
-    char *s = nullptr;
-    size_t k;
-    HttpReply *rep;
     url_entry *urls;
     url_entry *u;
     url_entry *min_u;
     ErrorState *err;
     int i;
     int urlcnt = 0;
-    char *buf = urnState->reqbuf;
-    StoreIOBuffer tempBuffer;
 
-    debugs(52, 3, "urnHandleReply: Called with size=" << result.length << ".");
+    debugs(52, 3, result << " with " << *e);
 
     if (EBIT_TEST(urlres_e->flags, ENTRY_ABORTED) || result.flags.error) {
         delete urnState;
@@ -262,59 +250,39 @@
         return;
     }
 
-    /* Update reqofs to point to where in the buffer we'd be */
-    urnState->reqofs += result.length;
-
-    /* Handle reqofs being bigger than normal */
-    if (urnState->reqofs >= URN_REQBUF_SZ) {
-        delete urnState;
-        return;
-    }
+    urnState->parsingBuffer.appended(result.data, result.length);
 
     /* If we haven't received the entire object (urn), copy more */
-    if (urlres_e->store_status == STORE_PENDING) {
-        Must(result.length > 0); // zero length ought to imply STORE_OK
-        tempBuffer.offset = urnState->reqofs;
-        tempBuffer.length = URN_REQBUF_SZ - urnState->reqofs;
-        tempBuffer.data = urnState->reqbuf + urnState->reqofs;
+    if (!urnState->sc->atEof()) {
+        const auto bufferedBytes = urnState->parsingBuffer.contentSize();
+        const auto remainingSpace = urnState->parsingBuffer.space().positionAt(bufferedBytes);
+
+        if (!remainingSpace.length) {
+            debugs(52, 3, "ran out of buffer space after " << bufferedBytes << " bytes");
+            // TODO: Here and in other error cases, send ERR_URN_RESOLVE to client.
+            delete urnState;
+            return;
+        }
+
         storeClientCopy(urnState->sc, urlres_e,
-                        tempBuffer,
+                        remainingSpace,
                         urnHandleReply,
                         urnState);
         return;
     }
 
-    /* we know its STORE_OK */
-    k = headersEnd(buf, urnState->reqofs);
-
-    if (0 == k) {
-        debugs(52, DBG_IMPORTANT, "urnHandleReply: didn't find end-of-headers for " << e->url()  );
-        delete urnState;
-        return;
-    }
-
-    s = buf + k;
-    // TODO: Check whether we should parse urlres_e reply, as before 528b2c61.
-    rep = new HttpReply;
-    rep->parseCharBuf(buf, k);
-    debugs(52, 3, "reply exists, code=" << rep->sline.status() << ".");
-
-    if (rep->sline.status() != Http::scOkay) {
+    const auto &peerReply = urlres_e->mem().baseReply();
+    debugs(52, 3, "got reply, code=" << peerReply.sline.status());
+    if (peerReply.sline.status() != Http::scOkay) {
         debugs(52, 3, "urnHandleReply: failed.");
         err = new ErrorState(ERR_URN_RESOLVE, Http::scNotFound, urnState->request.getRaw(), urnState->ale);
         err->url = xstrdup(e->url());
         errorAppendEntry(e, err);
-        delete rep;
         delete urnState;
         return;
     }
 
-    delete rep;
-
-    while (xisspace(*s))
-        ++s;
-
-    urls = urnParseReply(s, urnState->request->method);
+    urls = urnParseReply(urnState->parsingBuffer.toSBuf(), urnState->request->method);
 
     if (!urls) {     /* unknown URN error */
         debugs(52, 3, "urnTranslateDone: unknown URN " << e->url());
@@ -362,7 +330,7 @@
         "Generated by %s@%s\n"
         "</ADDRESS>\n",
         visible_appname_string, getMyHostname());
-    rep = new HttpReply;
+    const auto rep = new HttpReply;
     rep->setHeaders(Http::scFound, nullptr, "text/html", mb->length(), 0, squid_curtime);
 
     if (min_u) {
@@ -384,9 +352,8 @@
 }
 
 static url_entry *
-urnParseReply(const char *inbuf, const HttpRequestMethod& m)
+urnParseReply(const SBuf &inBuf, const HttpRequestMethod &m)
 {
-    char *buf = xstrdup(inbuf);
     char *token;
     url_entry *list;
     url_entry *old;
@@ -395,6 +362,13 @@
     debugs(52, 3, "urnParseReply");
     list = (url_entry *)xcalloc(n + 1, sizeof(*list));
 
+    // XXX: Switch to tokenizer-based parsing.
+    const auto allocated = SBufToCstring(inBuf);
+
+    auto buf = allocated;
+    while (xisspace(*buf))
+        ++buf;
+
     for (token = strtok(buf, crlf); token; token = strtok(nullptr, crlf)) {
         debugs(52, 3, "urnParseReply: got '" << token << "'");
 
@@ -430,7 +404,7 @@
     }
 
     debugs(52, 3, "urnParseReply: Found " << i << " URLs");
-    xfree(buf);
+    xfree(allocated);
     return list;
 }
 
diff -u -r -N squid-6.3/test-suite/test-functionality.sh squid-6.4/test-suite/test-functionality.sh
--- squid-6.3/test-suite/test-functionality.sh	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/test-suite/test-functionality.sh	2023-10-21 11:40:41.000000000 +1300
@@ -165,54 +165,20 @@
     return $result
 }
 
-check_pconn() {
-    run_confirmed_test pconn
-}
-
-check_busy_restart() {
-    run_confirmed_test busy-restart
-}
-
-check_proxy_collapsed_forwarding() {
-    if ! has_commit_by_message 1af789e 'Do not stall if xactions overwrite a recently active'
-    then
-        echo "No proxy-collapsed-forwarding due to stalling transactions"
-        return 0;
-    fi
-    run_confirmed_test proxy-collapsed-forwarding
-}
-
-check_proxy_update_headers_after_304() {
-    if grep 'AC_INIT.*Proxy.,.[1234][.]' configure.ac
-    then
-        echo "No proxy-update-headers-after-304 until v5";
-        return 0;
-    fi
-    run_confirmed_test proxy-update-headers-after-304
-}
-
-check_upgrade_protocols() {
-    if ! grep -q http_upgrade_request_protocols src/cf.data.pre
-    then
-        echo "No upgrade-protocols without http_upgrade_request_protocols support";
-        return 0;
-    fi
-    run_confirmed_test upgrade-protocols
-}
-
-check_truncated_responses() {
-    run_confirmed_test truncated-responses
-}
-
-# executes a single check_name test named by the parameter
+# executes a single test named by the parameter
 run_one_test() {
     local testName=$1
 
-    # convert a test name foo into a check_foo() function name suffix; e.g.
-    # busy-restart becomes busy_restart (to be called below as check_busy_restart)
-    check=`echo $testName | sed s/-/_/g`
+    # convert the given foo-bar test name into a check_foo_bar() function name
+    checkFunction=`echo "check_$testName" | sed s/-/_/g`
 
-    check_$check
+    if type $checkFunction 1> /dev/null 2> /dev/null
+    then
+        # a custom test wrapper exists
+        $checkFunction
+    else
+        run_confirmed_test $testName
+    fi
 }
 
 # executes all of the given tests, providing a summary of their failures
@@ -249,7 +215,9 @@
         local default_tests="
             pconn
             proxy-update-headers-after-304
+            accumulate-headers-after-304
             upgrade-protocols
+            cache-response
             proxy-collapsed-forwarding
             busy-restart
             truncated-responses
diff -u -r -N squid-6.3/tools/cachemgr.cc squid-6.4/tools/cachemgr.cc
--- squid-6.3/tools/cachemgr.cc	2023-09-03 18:17:45.000000000 +1200
+++ squid-6.4/tools/cachemgr.cc	2023-10-21 11:40:41.000000000 +1300
@@ -277,11 +277,16 @@
     printf("  if (x.readyState==4) {\n");
     printf("   if ((x.status>=200 && x.status <= 299) || x.status==401) {\n");
     printf("    var v = x.getResponseHeader('Server');\n");
-    printf("    if (v.substring(0,8) == 'squid/3.' && (v[8]=='H' || parseInt(v.substring(8)) >= 2)) {\n");
+    printf("    if (v.substring(0,6) == 'squid/' || v == 'squid') {\n");
     printf("     var d = document.getElementById('H' + s + 'mgr');\n");
     printf("     if (d.innerHTML == '') d.innerHTML = '<h2>HTTP' + (s=='s'?'S':'') + ' Managed Proxies</h2>';\n");
     printf("     d.innerHTML = d.innerHTML + '<p>Host: <a href=\"http' + s + '://' + t + '/squid-internal-mgr/\">' + t + '</a></p>';\n");
-    printf(" }}}}\n");
+    printf("     var sv = document.getElementById('server');\n");
+    printf("     var op = sv.getElementsByTagName('OPTION');\n");
+    printf("     for(var i=0; i<op.length; i++) { if (op[i].innerHTML == t) { sv.removeChild(op[i]); i--; }}\n");
+    printf("     if (sv.getElementsByTagName('OPTION').length == 0) {\n");
+    printf("      document.getElementById('Cmgr').innerHTML = '';\n");
+    printf(" }}}}}\n");
     printf(" x.send(null);\n");
     printf("}\n");
     printf("</script>\n");
@@ -451,8 +456,11 @@
     char *a_url;
     char *buf_copy;
 
-    const char bufLen = strlen(buf);
-    if (bufLen < 1 || *buf != ' ') {
+    const auto bufLen = strlen(buf);
+    if (bufLen < 1)
+        return; // nothing to append
+
+    if (*buf != ' ') {
         out.append(buf, bufLen);
         return;
     }
@@ -802,7 +810,7 @@
     }
 
     if (req->action == nullptr) {
-        req->action = xstrdup("");
+        req->action = xstrdup("menu");
     }
 
     if (strcmp(req->action, "authenticate") == 0) {
diff -u -r -N squid-6.3/tools/helper-mux/helper-mux.8 squid-6.4/tools/helper-mux/helper-mux.8
--- squid-6.3/tools/helper-mux/helper-mux.8	2023-09-03 20:37:54.000000000 +1200
+++ squid-6.4/tools/helper-mux/helper-mux.8	2023-10-22 01:47:17.000000000 +1300
@@ -133,7 +133,7 @@
 .\" ========================================================================
 .\"
 .IX Title "HELPER-MUX 8"
-.TH HELPER-MUX 8 "2023-09-03" "perl v5.36.0" "User Contributed Perl Documentation"
+.TH HELPER-MUX 8 "2023-10-21" "perl v5.36.0" "User Contributed Perl Documentation"
 .\" For nroff, turn off justification.  Always turn off hyphenation; it makes
 .\" way too many mistakes in technical documents.
 .if n .ad l
