VAULT-40506: Disable custom JSON limit parsing on ClusterListener (#10848) (#11075)

* add test for issue

* add bool to disable json limit parsing

* clean up tests

* fix godoc

* restore previous clustering setup

* add nil check for VaultNodeConfig

* add changelog

* move docker test to appropriate directory to allow testing in CI

---------

Co-authored-by: davidadeleon <56207066+davidadeleon@users.noreply.github.com>
Co-authored-by: davidadeleon <ddeleon@hashicorp.com>
This commit is contained in:
Vault Automation 2025-12-12 17:23:36 -05:00 committed by GitHub
parent b03ed9cf7f
commit 6c2f2a84ee
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 63 additions and 19 deletions

3
changelog/_10848.txt Normal file
View file

@ -0,0 +1,3 @@
```release-note:bug
http: skip JSON limit parsing on cluster listener
```

View file

@ -1514,8 +1514,10 @@ func (c *ServerCommand) Run(args []string) int {
// mode if it's set
core.SetClusterListenerAddrs(clusterAddrs)
core.SetClusterHandler(vaulthttp.Handler.Handler(&vault.HandlerProperties{
Core: core,
ListenerConfig: &configutil.Listener{},
Core: core,
ListenerConfig: &configutil.Listener{
DisableJSONLimitParsing: true,
},
}))
// Attempt unsealing in a background goroutine. This is needed for when a

View file

@ -74,6 +74,12 @@ func wrapJSONLimitsHandler(handler http.Handler, props *vault.HandlerProperties)
var maxRequestSize, maxJSONDepth, maxStringValueLength, maxObjectEntryCount, maxArrayElementCount, maxToken int64
if props.ListenerConfig != nil {
// Check to see if limits are disabled
if props.ListenerConfig.DisableJSONLimitParsing {
handler.ServeHTTP(w, r)
return
}
maxRequestSize = props.ListenerConfig.MaxRequestSize
maxJSONDepth = props.ListenerConfig.CustomMaxJSONDepth
maxStringValueLength = props.ListenerConfig.CustomMaxJSONStringValueLength

View file

@ -152,6 +152,11 @@ type Listener struct {
// JSON-specific limits
// DisableJSONLimitParsing disables the checking for JSON limits. This is only applicable to
// the listener config passed into the Cluster listener since this would impact forwarded
// requests that have already been checked via the API listener on the originating node.
DisableJSONLimitParsing bool `hcl:"-"`
// CustomMaxJSONDepth specifies the maximum nesting depth of a JSON object.
CustomMaxJSONDepthRaw interface{} `hcl:"max_json_depth"`
CustomMaxJSONDepth int64 `hcl:"-"`

View file

@ -20,6 +20,7 @@ import (
"fmt"
"io"
"io/ioutil"
"maps"
"math/big"
mathrand "math/rand"
"net"
@ -669,6 +670,11 @@ func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOption
defaultListenerConfig = n.createDefaultListenerConfig()
}
// Merge custom listener config options to default config
if opts.VaultNodeConfig != nil && opts.VaultNodeConfig.CustomListenerConfigOpts != nil {
maps.Copy(defaultListenerConfig["tcp"].(map[string]interface{}), opts.VaultNodeConfig.CustomListenerConfigOpts)
}
listenerConfig = append(listenerConfig, defaultListenerConfig)
ports := []string{"8200/tcp", "8201/tcp"}
@ -1424,6 +1430,28 @@ func (dc *DockerCluster) GetActiveClusterNode() *DockerClusterNode {
return dc.ClusterNodes[node]
}
func (dc *DockerCluster) GetActiveAndStandbys() (*DockerClusterNode, []*DockerClusterNode) {
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
activeIndex, err := testcluster.WaitForActiveNode(ctx, dc)
if err != nil {
panic(fmt.Sprintf("no cluster node became active in timeout window: %v", err))
}
var leaderNode *DockerClusterNode
var standbyNodes []*DockerClusterNode
for i, node := range dc.ClusterNodes {
if i == activeIndex {
leaderNode = node
continue
}
standbyNodes = append(standbyNodes, node)
}
return leaderNode, standbyNodes
}
/* Notes on testing the non-bridge network case:
- you need the test itself to be running in a container so that it can use
the network; create the network using

View file

@ -54,9 +54,10 @@ type VaultNodeConfig struct {
// ServiceRegistrationType string
// ServiceRegistrationOptions map[string]string
StorageOptions map[string]string `json:"-"`
AdditionalListeners []VaultNodeListenerConfig `json:"-"`
AdditionalTCPPorts []int `json:"-"`
StorageOptions map[string]string `json:"-"`
AdditionalListeners []VaultNodeListenerConfig `json:"-"`
CustomListenerConfigOpts map[string]interface{} `json:"-"`
AdditionalTCPPorts []int `json:"-"`
DefaultMaxRequestDuration time.Duration `json:"default_max_request_duration"`
LogFormat string `json:"log_format"`

View file

@ -1096,6 +1096,7 @@ type TestClusterOptions struct {
SkipInit bool
HandlerFunc HandlerHandler
DefaultHandlerProperties HandlerProperties
ClusterHandlerProperties HandlerProperties
// BaseListenAddress is used to explicitly assign ports in sequence to the
// listener of each core. It should be a string of the form
@ -1210,7 +1211,7 @@ func NewTestCluster(t testing.TB, base *CoreConfig, opts *TestClusterOptions) *T
}
var numCores int
if opts == nil || opts.NumCores == 0 {
if opts.NumCores == 0 {
numCores = DefaultNumCores
} else {
numCores = opts.NumCores
@ -1225,13 +1226,13 @@ func NewTestCluster(t testing.TB, base *CoreConfig, opts *TestClusterOptions) *T
var testCluster TestCluster
switch {
case opts != nil && opts.Logger != nil && !reflect.ValueOf(opts.Logger).IsNil():
case opts.Logger != nil && !reflect.ValueOf(opts.Logger).IsNil():
testCluster.Logger = opts.Logger
default:
testCluster.Logger = corehelpers.NewTestLogger(t)
}
if opts != nil && opts.TempDir != "" {
if opts.TempDir != "" {
if _, err := os.Stat(opts.TempDir); os.IsNotExist(err) {
if err := os.MkdirAll(opts.TempDir, 0o700); err != nil {
t.Fatal(err)
@ -1247,7 +1248,7 @@ func NewTestCluster(t testing.TB, base *CoreConfig, opts *TestClusterOptions) *T
}
var caKey *ecdsa.PrivateKey
if opts != nil && opts.CAKey != nil {
if opts.CAKey != nil {
caKey = opts.CAKey
} else {
caKey, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
@ -1257,7 +1258,7 @@ func NewTestCluster(t testing.TB, base *CoreConfig, opts *TestClusterOptions) *T
}
testCluster.CAKey = caKey
var caBytes []byte
if opts != nil && len(opts.CACert) > 0 {
if len(opts.CACert) > 0 {
caBytes = opts.CACert
} else {
caCertTemplate := &x509.Certificate{
@ -1418,7 +1419,7 @@ func NewTestCluster(t testing.TB, base *CoreConfig, opts *TestClusterOptions) *T
NextProtos: []string{"h2", "http/1.1"},
GetCertificate: certGetter.GetCertificate,
}
if opts != nil && opts.RequireClientAuth {
if opts.RequireClientAuth {
tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
testCluster.ClientAuthRequired = true
}
@ -1603,7 +1604,7 @@ func NewTestCluster(t testing.TB, base *CoreConfig, opts *TestClusterOptions) *T
testCluster.LicensePrivateKey = priKey
}
if opts != nil && opts.InmemClusterLayers {
if opts.InmemClusterLayers {
if opts.ClusterLayers != nil {
t.Fatalf("cannot specify ClusterLayers when InmemClusterLayers is true")
}
@ -1614,7 +1615,7 @@ func NewTestCluster(t testing.TB, base *CoreConfig, opts *TestClusterOptions) *T
opts.ClusterLayers = inmemCluster
}
if opts != nil && len(opts.Plugins) != 0 {
if len(opts.Plugins) != 0 {
var plugins []pluginhelpers.TestPlugin
for _, pluginType := range opts.Plugins {
if pluginType.Container && runtime.GOOS != "linux" {
@ -1694,7 +1695,7 @@ func NewTestCluster(t testing.TB, base *CoreConfig, opts *TestClusterOptions) *T
testCluster.Cores = ret
// Initialize cores
if opts == nil || !opts.SkipInit {
if !opts.SkipInit {
testCluster.initCores(t, opts)
}
@ -1716,11 +1717,9 @@ func NewTestCluster(t testing.TB, base *CoreConfig, opts *TestClusterOptions) *T
}
// Setup
if opts != nil {
if opts.SetupFunc != nil {
testCluster.SetupFunc = func() {
opts.SetupFunc(t, &testCluster)
}
if opts.SetupFunc != nil {
testCluster.SetupFunc = func() {
opts.SetupFunc(t, &testCluster)
}
}