vault/command/operator_migrate.go

432 lines
11 KiB
Go
Raw Normal View History

// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
package command
import (
"context"
"fmt"
"io/ioutil"
"math"
"net/url"
"os"
"sort"
"strings"
"time"
"github.com/hashicorp/cli"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-secure-stdlib/strutil"
"github.com/hashicorp/hcl"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/vault/command/server"
VAULT-32657 deprecate duplicate attributes in HCL configs and policies (#30386) * upgrade hcl dependency on api pkg This upgrades the hcl dependency for the API pkg, and adapts its usage so users of our API pkg are not affected. There's no good way of communicating a warning via a library call so we don't. The tokenHelper which is used by all Vault CLI commands in order to create the Vault client, as well as directly used by the login and server commands, is implemented on the api pkg, so this upgrade also affects all of those commands. Seems like this was only moved to the api pkg because the Terraform provider uses it, and I thought creating a full copy of all those files back under command would be too much spaghetti. Also leaving some TODOs to make next deprecation steps easier. * upgrade hcl dependency in vault and sdk pkgs * upgrade hcl dependency in vault and sdk pkgs * add CLI warnings to commands that take a config - vault agent (unit test on CMD warning) - vault proxy (unit test on CMD warning) - vault server (no test for the warning) - vault operator diagnose (no tests at all, uses the same function as vault server * ignore duplicates on ParseKMSes function * Extend policy parsing functions and warn on policy store * Add warning on policy fmt with duplicate attributes * Add warnings when creating/updating policy with duplicate HCL attrs * Add log warning when switchedGetPolicy finds duplicate attrs Following operations can trigger this warning when they run into a policy with duplicate attributes: * replication filtered path namespaces invalidation * policy read API * building an ACL (for many different purposes like most authZ operations) * looking up DR token policies * creating a token with named policies * when caching the policies for all namespaces during unseal * Print log warnings when token inline policy has duplicate attrs No unit tests on these as new test infra would have to be built on all. Operations affected, which will now print a log warning when the retrieved token has an inline policy with duplicate attributes: * capabilities endpoints in sys mount * handing events under a subscription with a token with duplicate attrs in inline policies * token used to create another token has duplicate attrs in inline policies (sudo check) * all uses of fetchACLTokenEntryAndEntity when the request uses a token with inline policies with duplicate attrs. Almost all reqs are subject to this * when tokens are created with inline policies (unclear exactly how that can happen) * add changelog and deprecation notice * add missing copywrite notice * fix copy-paste mistake good thing it was covered by unit tests * Fix manual parsing of telemetry field in SharedConfig This commit in the hcl library was not in the v1.0.1-vault-5 version we're using but is included in v1.0.1-vault-7: https://github.com/hashicorp/hcl/commit/e80118accb521e47bc5b93104bf46c67d89d2242 This thing of reusing when parsing means that our approach of manually re-parsing fields on top of fields that have already been parsed by the hcl annotation causes strings (maybe more?) to concatenate. Fix that by removing annotation. There's actually more occurrences of this thing of automatically parsing something that is also manually parsing. In some places we could just remove the boilerplate manual parsing, in others we better remove the auto parsing, but I don't wanna pull at that thread right now. I just checked that all places at least fully overwrite the automatically parsed field instead of reusing it as the target of the decode call. The only exception is the AOP field on ent but that doesn't have maps or slices, so I think it's fine. An alternative approach would be to ensure that the auto-parsed value is discarded, like the current parseCache function does note how it's template not templates * Fix linter complaints * Update command/base_predict.go Co-authored-by: Mike Palmiotto <mike.palmiotto@hashicorp.com> * address review * remove copywrite headers * re-add copywrite headers * make fmt * Update website/content/partials/deprecation/duplicate-hcl-attributes.mdx Co-authored-by: Sarah Chavis <62406755+schavis@users.noreply.github.com> * Update website/content/partials/deprecation/duplicate-hcl-attributes.mdx Co-authored-by: Sarah Chavis <62406755+schavis@users.noreply.github.com> * Update website/content/partials/deprecation/duplicate-hcl-attributes.mdx Co-authored-by: Sarah Chavis <62406755+schavis@users.noreply.github.com> * undo changes to deprecation.mdx * remove deprecation doc * fix conflict with changes from main --------- Co-authored-by: Mike Palmiotto <mike.palmiotto@hashicorp.com> Co-authored-by: Sarah Chavis <62406755+schavis@users.noreply.github.com>
2025-05-23 15:02:07 -04:00
"github.com/hashicorp/vault/helper/random"
"github.com/hashicorp/vault/physical/raft"
"github.com/hashicorp/vault/sdk/helper/logging"
"github.com/hashicorp/vault/sdk/physical"
"github.com/hashicorp/vault/vault"
"github.com/pkg/errors"
"github.com/posener/complete"
"golang.org/x/sync/errgroup"
)
var (
_ cli.Command = (*OperatorMigrateCommand)(nil)
_ cli.CommandAutocomplete = (*OperatorMigrateCommand)(nil)
)
var errAbort = errors.New("Migration aborted")
type OperatorMigrateCommand struct {
*BaseCommand
PhysicalBackends map[string]physical.Factory
flagConfig string
flagLogLevel string
flagStart string
flagReset bool
flagMaxParallel int
logger log.Logger
ShutdownCh chan struct{}
}
type migratorConfig struct {
StorageSource *server.Storage `hcl:"-"`
StorageDestination *server.Storage `hcl:"-"`
ClusterAddr string `hcl:"cluster_addr"`
}
func (c *OperatorMigrateCommand) Synopsis() string {
return "Migrates Vault data between storage backends"
}
func (c *OperatorMigrateCommand) Help() string {
helpText := `
Usage: vault operator migrate [options]
This command starts a storage backend migration process to copy all data
from one backend to another. This operates directly on encrypted data and
does not require a Vault server, nor any unsealing.
Start a migration with a configuration file:
$ vault operator migrate -config=migrate.hcl
For more information, please see the documentation.
` + c.Flags().Help()
return strings.TrimSpace(helpText)
}
func (c *OperatorMigrateCommand) Flags() *FlagSets {
set := NewFlagSets(c.UI)
f := set.NewFlagSet("Command Options")
f.StringVar(&StringVar{
Name: "config",
Target: &c.flagConfig,
Completion: complete.PredictOr(
complete.PredictFiles("*.hcl"),
),
Usage: "Path to a configuration file. This configuration file should " +
"contain only migrator directives.",
})
f.StringVar(&StringVar{
Name: "start",
Target: &c.flagStart,
Usage: "Only copy keys lexicographically at or after this value.",
})
f.BoolVar(&BoolVar{
Name: "reset",
Target: &c.flagReset,
Usage: "Reset the migration lock. No migration will occur.",
})
f.IntVar(&IntVar{
Name: "max-parallel",
Default: 10,
Target: &c.flagMaxParallel,
Usage: "Specifies the maximum number of parallel migration threads (goroutines) that may be used when migrating. " +
"This can speed up the migration process on slow backends but uses more resources.",
})
f.StringVar(&StringVar{
Name: "log-level",
Target: &c.flagLogLevel,
Default: "info",
EnvVar: "VAULT_LOG_LEVEL",
Completion: complete.PredictSet("trace", "debug", "info", "warn", "error"),
Usage: "Log verbosity level. Supported values (in order of detail) are " +
"\"trace\", \"debug\", \"info\", \"warn\", and \"error\". These are not case sensitive.",
})
return set
}
func (c *OperatorMigrateCommand) AutocompleteArgs() complete.Predictor {
return nil
}
func (c *OperatorMigrateCommand) AutocompleteFlags() complete.Flags {
return c.Flags().Completions()
}
func (c *OperatorMigrateCommand) Run(args []string) int {
f := c.Flags()
if err := f.Parse(args); err != nil {
c.UI.Error(err.Error())
return 1
}
c.flagLogLevel = strings.ToLower(c.flagLogLevel)
validLevels := []string{"trace", "debug", "info", "warn", "error"}
if !strutil.StrListContains(validLevels, c.flagLogLevel) {
c.UI.Error(fmt.Sprintf("%s is an unknown log level. Valid log levels are: %s", c.flagLogLevel, validLevels))
return 1
}
c.logger = logging.NewVaultLogger(log.LevelFromString(c.flagLogLevel))
if c.flagMaxParallel < 1 {
c.UI.Error(fmt.Sprintf("Argument to flag -max-parallel must be between 1 and %d", math.MaxInt))
return 1
}
if c.flagConfig == "" {
c.UI.Error("Must specify exactly one config path using -config")
return 1
}
config, err := c.loadMigratorConfig(c.flagConfig)
if err != nil {
c.UI.Error(fmt.Sprintf("Error loading configuration from %s: %s", c.flagConfig, err))
return 1
}
if err := c.migrate(config); err != nil {
if err == errAbort {
return 0
}
c.UI.Error(fmt.Sprintf("Error migrating: %s", err))
return 2
}
if c.flagReset {
c.UI.Output("Success! Migration lock reset (if it was set).")
} else {
c.UI.Output("Success! All of the keys have been migrated.")
}
return 0
}
// migrate attempts to instantiate the source and destinations backends,
// and then invoke the migration the root of the keyspace.
func (c *OperatorMigrateCommand) migrate(config *migratorConfig) error {
from, err := c.newBackend(config.StorageSource.Type, config.StorageSource.Config)
if err != nil {
return fmt.Errorf("error mounting 'storage_source': %w", err)
}
if c.flagReset {
2018-10-23 02:34:02 -04:00
if err := SetStorageMigration(from, false); err != nil {
return fmt.Errorf("error resetting migration lock: %w", err)
}
return nil
}
to, err := c.createDestinationBackend(config.StorageDestination.Type, config.StorageDestination.Config, config)
if err != nil {
return fmt.Errorf("error mounting 'storage_destination': %w", err)
}
2018-10-23 02:34:02 -04:00
migrationStatus, err := CheckStorageMigration(from)
if err != nil {
return fmt.Errorf("error checking migration status: %w", err)
}
if migrationStatus != nil {
raft: add support for using backend for ha_storage (#9193) * raft: initial work on raft ha storage support * add note on join * add todo note * raft: add support for bootstrapping and joining existing nodes * raft: gate bootstrap join by reading leader api address from storage * raft: properly check for raft-only for certain conditionals * raft: add bootstrap to api and cli * raft: fix bootstrap cli command * raft: add test for setting up new cluster with raft HA * raft: extend TestRaft_HA_NewCluster to include inmem and consul backends * raft: add test for updating an existing cluster to use raft HA * raft: remove debug log lines, clean up verifyRaftPeers * raft: minor cleanup * raft: minor cleanup * Update physical/raft/raft.go Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com> * Update vault/ha.go Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com> * Update vault/ha.go Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com> * Update vault/logical_system_raft.go Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com> * Update vault/raft.go Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com> * Update vault/raft.go Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com> * address feedback comments * address feedback comments * raft: refactor tls keyring logic * address feedback comments * Update vault/raft.go Co-authored-by: Alexander Bezobchuk <alexanderbez@users.noreply.github.com> * Update vault/raft.go Co-authored-by: Alexander Bezobchuk <alexanderbez@users.noreply.github.com> * address feedback comments * testing: fix import ordering * raft: rename var, cleanup comment line * docs: remove ha_storage restriction note on raft * docs: more raft HA interaction updates with migration and recovery mode * docs: update the raft join command * raft: update comments * raft: add missing isRaftHAOnly check for clearing out state set earlier * raft: update a few ha_storage config checks * Update command/operator_raft_bootstrap.go Co-authored-by: Vishal Nayak <vishalnayak@users.noreply.github.com> * raft: address feedback comments * raft: fix panic when checking for config.HAStorage.Type * Update vault/raft.go Co-authored-by: Alexander Bezobchuk <alexanderbez@users.noreply.github.com> * Update website/pages/docs/commands/operator/raft.mdx Co-authored-by: Alexander Bezobchuk <alexanderbez@users.noreply.github.com> * raft: remove bootstrap cli command * Update vault/raft.go Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com> * Update vault/raft.go Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com> * raft: address review feedback * raft: revert vendored sdk * raft: don't send applied index and node ID info if we're HA-only Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com> Co-authored-by: Alexander Bezobchuk <alexanderbez@users.noreply.github.com> Co-authored-by: Vishal Nayak <vishalnayak@users.noreply.github.com>
2020-06-23 15:04:13 -04:00
return fmt.Errorf("storage migration in progress (started: %s)", migrationStatus.Start.Format(time.RFC3339))
}
switch config.StorageSource.Type {
case "raft":
// Raft storage cannot be written to when shutdown. Also the boltDB file
// already uses file locking to ensure two processes are not accessing
// it.
default:
if err := SetStorageMigration(from, true); err != nil {
return fmt.Errorf("error setting migration lock: %w", err)
}
defer SetStorageMigration(from, false)
}
ctx, cancelFunc := context.WithCancel(context.Background())
doneCh := make(chan error)
go func() {
doneCh <- c.migrateAll(ctx, from, to, c.flagMaxParallel)
}()
select {
case err := <-doneCh:
2018-11-02 16:21:44 -04:00
cancelFunc()
return err
case <-c.ShutdownCh:
c.UI.Output("==> Migration shutdown triggered\n")
cancelFunc()
<-doneCh
return errAbort
}
}
// migrateAll copies all keys in lexicographic order.
func (c *OperatorMigrateCommand) migrateAll(ctx context.Context, from physical.Backend, to physical.Backend, maxParallel int) error {
return dfsScan(ctx, from, maxParallel, func(ctx context.Context, path string) error {
2018-10-23 02:34:02 -04:00
if path < c.flagStart || path == storageMigrationLock || path == vault.CoreLockPath {
return nil
}
entry, err := from.Get(ctx, path)
if err != nil {
return fmt.Errorf("error reading entry: %w", err)
}
if entry == nil {
return nil
}
if err := to.Put(ctx, entry); err != nil {
return fmt.Errorf("error writing entry: %w", err)
}
2018-10-09 12:43:17 -04:00
c.logger.Info("copied key", "path", path)
return nil
})
}
func (c *OperatorMigrateCommand) newBackend(kind string, conf map[string]string) (physical.Backend, error) {
factory, ok := c.PhysicalBackends[kind]
if !ok {
return nil, fmt.Errorf("no Vault storage backend named: %+q", kind)
}
return factory(conf, c.logger)
}
func (c *OperatorMigrateCommand) createDestinationBackend(kind string, conf map[string]string, config *migratorConfig) (physical.Backend, error) {
storage, err := c.newBackend(kind, conf)
if err != nil {
return nil, err
}
switch kind {
case "raft":
if len(config.ClusterAddr) == 0 {
return nil, errors.New("cluster_addr config not set")
}
raftStorage, ok := storage.(*raft.RaftBackend)
if !ok {
return nil, errors.New("wrong storage type for raft backend")
}
parsedClusterAddr, err := url.Parse(config.ClusterAddr)
if err != nil {
return nil, fmt.Errorf("error parsing cluster address: %w", err)
}
raft: add support for using backend for ha_storage (#9193) * raft: initial work on raft ha storage support * add note on join * add todo note * raft: add support for bootstrapping and joining existing nodes * raft: gate bootstrap join by reading leader api address from storage * raft: properly check for raft-only for certain conditionals * raft: add bootstrap to api and cli * raft: fix bootstrap cli command * raft: add test for setting up new cluster with raft HA * raft: extend TestRaft_HA_NewCluster to include inmem and consul backends * raft: add test for updating an existing cluster to use raft HA * raft: remove debug log lines, clean up verifyRaftPeers * raft: minor cleanup * raft: minor cleanup * Update physical/raft/raft.go Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com> * Update vault/ha.go Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com> * Update vault/ha.go Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com> * Update vault/logical_system_raft.go Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com> * Update vault/raft.go Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com> * Update vault/raft.go Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com> * address feedback comments * address feedback comments * raft: refactor tls keyring logic * address feedback comments * Update vault/raft.go Co-authored-by: Alexander Bezobchuk <alexanderbez@users.noreply.github.com> * Update vault/raft.go Co-authored-by: Alexander Bezobchuk <alexanderbez@users.noreply.github.com> * address feedback comments * testing: fix import ordering * raft: rename var, cleanup comment line * docs: remove ha_storage restriction note on raft * docs: more raft HA interaction updates with migration and recovery mode * docs: update the raft join command * raft: update comments * raft: add missing isRaftHAOnly check for clearing out state set earlier * raft: update a few ha_storage config checks * Update command/operator_raft_bootstrap.go Co-authored-by: Vishal Nayak <vishalnayak@users.noreply.github.com> * raft: address feedback comments * raft: fix panic when checking for config.HAStorage.Type * Update vault/raft.go Co-authored-by: Alexander Bezobchuk <alexanderbez@users.noreply.github.com> * Update website/pages/docs/commands/operator/raft.mdx Co-authored-by: Alexander Bezobchuk <alexanderbez@users.noreply.github.com> * raft: remove bootstrap cli command * Update vault/raft.go Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com> * Update vault/raft.go Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com> * raft: address review feedback * raft: revert vendored sdk * raft: don't send applied index and node ID info if we're HA-only Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com> Co-authored-by: Alexander Bezobchuk <alexanderbez@users.noreply.github.com> Co-authored-by: Vishal Nayak <vishalnayak@users.noreply.github.com>
2020-06-23 15:04:13 -04:00
if err := raftStorage.Bootstrap([]raft.Peer{
{
ID: raftStorage.NodeID(),
Address: parsedClusterAddr.Host,
},
}); err != nil {
return nil, fmt.Errorf("could not bootstrap clustered storage: %w", err)
}
if err := raftStorage.SetupCluster(context.Background(), raft.SetupOpts{
StartAsLeader: true,
}); err != nil {
return nil, fmt.Errorf("could not start clustered storage: %w", err)
}
}
return storage, nil
}
// loadMigratorConfig loads the configuration at the given path
func (c *OperatorMigrateCommand) loadMigratorConfig(path string) (*migratorConfig, error) {
fi, err := os.Stat(path)
if err != nil {
return nil, err
}
if fi.IsDir() {
return nil, fmt.Errorf("location is a directory, not a file")
}
d, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
VAULT-32657 deprecate duplicate attributes in HCL configs and policies (#30386) * upgrade hcl dependency on api pkg This upgrades the hcl dependency for the API pkg, and adapts its usage so users of our API pkg are not affected. There's no good way of communicating a warning via a library call so we don't. The tokenHelper which is used by all Vault CLI commands in order to create the Vault client, as well as directly used by the login and server commands, is implemented on the api pkg, so this upgrade also affects all of those commands. Seems like this was only moved to the api pkg because the Terraform provider uses it, and I thought creating a full copy of all those files back under command would be too much spaghetti. Also leaving some TODOs to make next deprecation steps easier. * upgrade hcl dependency in vault and sdk pkgs * upgrade hcl dependency in vault and sdk pkgs * add CLI warnings to commands that take a config - vault agent (unit test on CMD warning) - vault proxy (unit test on CMD warning) - vault server (no test for the warning) - vault operator diagnose (no tests at all, uses the same function as vault server * ignore duplicates on ParseKMSes function * Extend policy parsing functions and warn on policy store * Add warning on policy fmt with duplicate attributes * Add warnings when creating/updating policy with duplicate HCL attrs * Add log warning when switchedGetPolicy finds duplicate attrs Following operations can trigger this warning when they run into a policy with duplicate attributes: * replication filtered path namespaces invalidation * policy read API * building an ACL (for many different purposes like most authZ operations) * looking up DR token policies * creating a token with named policies * when caching the policies for all namespaces during unseal * Print log warnings when token inline policy has duplicate attrs No unit tests on these as new test infra would have to be built on all. Operations affected, which will now print a log warning when the retrieved token has an inline policy with duplicate attributes: * capabilities endpoints in sys mount * handing events under a subscription with a token with duplicate attrs in inline policies * token used to create another token has duplicate attrs in inline policies (sudo check) * all uses of fetchACLTokenEntryAndEntity when the request uses a token with inline policies with duplicate attrs. Almost all reqs are subject to this * when tokens are created with inline policies (unclear exactly how that can happen) * add changelog and deprecation notice * add missing copywrite notice * fix copy-paste mistake good thing it was covered by unit tests * Fix manual parsing of telemetry field in SharedConfig This commit in the hcl library was not in the v1.0.1-vault-5 version we're using but is included in v1.0.1-vault-7: https://github.com/hashicorp/hcl/commit/e80118accb521e47bc5b93104bf46c67d89d2242 This thing of reusing when parsing means that our approach of manually re-parsing fields on top of fields that have already been parsed by the hcl annotation causes strings (maybe more?) to concatenate. Fix that by removing annotation. There's actually more occurrences of this thing of automatically parsing something that is also manually parsing. In some places we could just remove the boilerplate manual parsing, in others we better remove the auto parsing, but I don't wanna pull at that thread right now. I just checked that all places at least fully overwrite the automatically parsed field instead of reusing it as the target of the decode call. The only exception is the AOP field on ent but that doesn't have maps or slices, so I think it's fine. An alternative approach would be to ensure that the auto-parsed value is discarded, like the current parseCache function does note how it's template not templates * Fix linter complaints * Update command/base_predict.go Co-authored-by: Mike Palmiotto <mike.palmiotto@hashicorp.com> * address review * remove copywrite headers * re-add copywrite headers * make fmt * Update website/content/partials/deprecation/duplicate-hcl-attributes.mdx Co-authored-by: Sarah Chavis <62406755+schavis@users.noreply.github.com> * Update website/content/partials/deprecation/duplicate-hcl-attributes.mdx Co-authored-by: Sarah Chavis <62406755+schavis@users.noreply.github.com> * Update website/content/partials/deprecation/duplicate-hcl-attributes.mdx Co-authored-by: Sarah Chavis <62406755+schavis@users.noreply.github.com> * undo changes to deprecation.mdx * remove deprecation doc * fix conflict with changes from main --------- Co-authored-by: Mike Palmiotto <mike.palmiotto@hashicorp.com> Co-authored-by: Sarah Chavis <62406755+schavis@users.noreply.github.com>
2025-05-23 15:02:07 -04:00
// TODO (HCL_DUP_KEYS_DEPRECATION): Return to hcl.Parse once duplicates are forbidden
obj, duplicate, err := random.ParseAndCheckForDuplicateHclAttributes(string(d))
if err != nil {
return nil, err
}
VAULT-32657 deprecate duplicate attributes in HCL configs and policies (#30386) * upgrade hcl dependency on api pkg This upgrades the hcl dependency for the API pkg, and adapts its usage so users of our API pkg are not affected. There's no good way of communicating a warning via a library call so we don't. The tokenHelper which is used by all Vault CLI commands in order to create the Vault client, as well as directly used by the login and server commands, is implemented on the api pkg, so this upgrade also affects all of those commands. Seems like this was only moved to the api pkg because the Terraform provider uses it, and I thought creating a full copy of all those files back under command would be too much spaghetti. Also leaving some TODOs to make next deprecation steps easier. * upgrade hcl dependency in vault and sdk pkgs * upgrade hcl dependency in vault and sdk pkgs * add CLI warnings to commands that take a config - vault agent (unit test on CMD warning) - vault proxy (unit test on CMD warning) - vault server (no test for the warning) - vault operator diagnose (no tests at all, uses the same function as vault server * ignore duplicates on ParseKMSes function * Extend policy parsing functions and warn on policy store * Add warning on policy fmt with duplicate attributes * Add warnings when creating/updating policy with duplicate HCL attrs * Add log warning when switchedGetPolicy finds duplicate attrs Following operations can trigger this warning when they run into a policy with duplicate attributes: * replication filtered path namespaces invalidation * policy read API * building an ACL (for many different purposes like most authZ operations) * looking up DR token policies * creating a token with named policies * when caching the policies for all namespaces during unseal * Print log warnings when token inline policy has duplicate attrs No unit tests on these as new test infra would have to be built on all. Operations affected, which will now print a log warning when the retrieved token has an inline policy with duplicate attributes: * capabilities endpoints in sys mount * handing events under a subscription with a token with duplicate attrs in inline policies * token used to create another token has duplicate attrs in inline policies (sudo check) * all uses of fetchACLTokenEntryAndEntity when the request uses a token with inline policies with duplicate attrs. Almost all reqs are subject to this * when tokens are created with inline policies (unclear exactly how that can happen) * add changelog and deprecation notice * add missing copywrite notice * fix copy-paste mistake good thing it was covered by unit tests * Fix manual parsing of telemetry field in SharedConfig This commit in the hcl library was not in the v1.0.1-vault-5 version we're using but is included in v1.0.1-vault-7: https://github.com/hashicorp/hcl/commit/e80118accb521e47bc5b93104bf46c67d89d2242 This thing of reusing when parsing means that our approach of manually re-parsing fields on top of fields that have already been parsed by the hcl annotation causes strings (maybe more?) to concatenate. Fix that by removing annotation. There's actually more occurrences of this thing of automatically parsing something that is also manually parsing. In some places we could just remove the boilerplate manual parsing, in others we better remove the auto parsing, but I don't wanna pull at that thread right now. I just checked that all places at least fully overwrite the automatically parsed field instead of reusing it as the target of the decode call. The only exception is the AOP field on ent but that doesn't have maps or slices, so I think it's fine. An alternative approach would be to ensure that the auto-parsed value is discarded, like the current parseCache function does note how it's template not templates * Fix linter complaints * Update command/base_predict.go Co-authored-by: Mike Palmiotto <mike.palmiotto@hashicorp.com> * address review * remove copywrite headers * re-add copywrite headers * make fmt * Update website/content/partials/deprecation/duplicate-hcl-attributes.mdx Co-authored-by: Sarah Chavis <62406755+schavis@users.noreply.github.com> * Update website/content/partials/deprecation/duplicate-hcl-attributes.mdx Co-authored-by: Sarah Chavis <62406755+schavis@users.noreply.github.com> * Update website/content/partials/deprecation/duplicate-hcl-attributes.mdx Co-authored-by: Sarah Chavis <62406755+schavis@users.noreply.github.com> * undo changes to deprecation.mdx * remove deprecation doc * fix conflict with changes from main --------- Co-authored-by: Mike Palmiotto <mike.palmiotto@hashicorp.com> Co-authored-by: Sarah Chavis <62406755+schavis@users.noreply.github.com>
2025-05-23 15:02:07 -04:00
if duplicate {
c.UI.Warn("WARNING: Duplicate keys found in migration configuration file, duplicate keys in HCL files are deprecated and will be forbidden in a future release.")
}
var result migratorConfig
if err := hcl.DecodeObject(&result, obj); err != nil {
return nil, err
}
list, ok := obj.Node.(*ast.ObjectList)
if !ok {
return nil, fmt.Errorf("error parsing: file doesn't contain a root object")
}
// Look for storage_* stanzas
for _, stanza := range []string{"storage_source", "storage_destination"} {
o := list.Filter(stanza)
if len(o.Items) != 1 {
return nil, fmt.Errorf("exactly one %q block is required", stanza)
}
if err := parseStorage(&result, o, stanza); err != nil {
return nil, fmt.Errorf("error parsing %q: %w", stanza, err)
}
}
return &result, nil
}
// parseStorage reuses the existing storage parsing that's part of the main Vault
// config processing, but only keeps the storage result.
func parseStorage(result *migratorConfig, list *ast.ObjectList, name string) error {
tmpConfig := new(server.Config)
if err := server.ParseStorage(tmpConfig, list, name); err != nil {
return err
}
switch name {
case "storage_source":
result.StorageSource = tmpConfig.Storage
case "storage_destination":
result.StorageDestination = tmpConfig.Storage
default:
return fmt.Errorf("unknown storage name: %s", name)
}
return nil
}
// dfsScan will invoke cb with every key from source.
// Keys will be traversed in lexicographic, depth-first order.
func dfsScan(ctx context.Context, source physical.Backend, maxParallel int, cb func(ctx context.Context, path string) error) error {
dfs := []string{""}
eg, ctx := errgroup.WithContext(ctx)
eg.SetLimit(maxParallel)
for l := len(dfs); l > 0; l = len(dfs) {
// Check for cancellation
select {
case <-ctx.Done():
return ctx.Err()
default:
}
key := dfs[len(dfs)-1]
if key == "" || strings.HasSuffix(key, "/") {
children, err := source.List(ctx, key)
if err != nil {
return fmt.Errorf("failed to scan for children: %w", err)
}
sort.Strings(children)
// remove List-triggering key and add children in reverse order
dfs = dfs[:len(dfs)-1]
for i := len(children) - 1; i >= 0; i-- {
if children[i] != "" {
dfs = append(dfs, key+children[i])
}
}
} else {
// Pooling
eg.Go(func() error {
return cb(ctx, key)
})
dfs = dfs[:len(dfs)-1]
}
}
return eg.Wait()
}