2019-08-24 05:24:45 -04:00
|
|
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
2022-11-27 13:20:29 -05:00
|
|
|
// SPDX-License-Identifier: MIT
|
2019-08-24 05:24:45 -04:00
|
|
|
|
|
|
|
|
package setting
|
|
|
|
|
|
|
|
|
|
import (
|
|
|
|
|
"errors"
|
|
|
|
|
"fmt"
|
2023-11-01 14:00:20 -04:00
|
|
|
"net"
|
2019-08-24 05:24:45 -04:00
|
|
|
"net/url"
|
|
|
|
|
"os"
|
|
|
|
|
"path/filepath"
|
2025-03-30 07:34:02 -04:00
|
|
|
"strconv"
|
2019-08-24 05:24:45 -04:00
|
|
|
"strings"
|
|
|
|
|
"time"
|
2025-03-30 07:34:02 -04:00
|
|
|
|
|
|
|
|
"xorm.io/xorm"
|
2019-08-24 05:24:45 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
var (
|
2021-12-07 00:44:08 -05:00
|
|
|
// SupportedDatabaseTypes includes all XORM supported databases type, sqlite3 maybe added by `database_sqlite3.go`
|
2024-04-04 12:02:24 -04:00
|
|
|
SupportedDatabaseTypes = []string{"mysql", "postgres"}
|
2021-12-07 00:44:08 -05:00
|
|
|
// DatabaseTypeNames contains the friendly names for all database types
|
2024-04-04 12:02:24 -04:00
|
|
|
DatabaseTypeNames = map[string]string{"mysql": "MySQL", "postgres": "PostgreSQL", "sqlite3": "SQLite3"}
|
2019-08-24 05:24:45 -04:00
|
|
|
|
|
|
|
|
// EnableSQLite3 use SQLite3, set by build flag
|
|
|
|
|
EnableSQLite3 bool
|
|
|
|
|
|
|
|
|
|
// Database holds the database settings
|
2025-03-30 07:34:02 -04:00
|
|
|
Database = DatabaseSettings{
|
2020-10-14 09:07:51 -04:00
|
|
|
Timeout: 500,
|
|
|
|
|
IterateBufferSize: 50,
|
2019-08-24 05:24:45 -04:00
|
|
|
}
|
|
|
|
|
)
|
|
|
|
|
|
2025-03-30 07:34:02 -04:00
|
|
|
type DatabaseSettings struct {
|
|
|
|
|
Type DatabaseType
|
|
|
|
|
Host string
|
|
|
|
|
HostPrimary string
|
|
|
|
|
HostReplica string
|
|
|
|
|
LoadBalancePolicy string
|
|
|
|
|
LoadBalanceWeights string
|
|
|
|
|
Name string
|
|
|
|
|
User string
|
|
|
|
|
Passwd string
|
|
|
|
|
Schema string
|
|
|
|
|
SSLMode string
|
|
|
|
|
Path string
|
|
|
|
|
LogSQL bool
|
|
|
|
|
MysqlCharset string
|
|
|
|
|
CharsetCollation string
|
|
|
|
|
Timeout int // seconds
|
|
|
|
|
SQLiteJournalMode string
|
|
|
|
|
DBConnectRetries int
|
|
|
|
|
DBConnectBackoff time.Duration
|
|
|
|
|
MaxIdleConns int
|
|
|
|
|
MaxOpenConns int
|
|
|
|
|
ConnMaxIdleTime time.Duration
|
|
|
|
|
ConnMaxLifetime time.Duration
|
|
|
|
|
IterateBufferSize int
|
|
|
|
|
AutoMigration bool
|
|
|
|
|
SlowQueryThreshold time.Duration
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-19 11:12:01 -05:00
|
|
|
// LoadDBSetting loads the database settings
|
|
|
|
|
func LoadDBSetting() {
|
2023-03-16 03:22:54 -04:00
|
|
|
loadDBSetting(CfgProvider)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func loadDBSetting(rootCfg ConfigProvider) {
|
|
|
|
|
sec := rootCfg.Section("database")
|
2023-03-07 05:51:06 -05:00
|
|
|
Database.Type = DatabaseType(sec.Key("DB_TYPE").String())
|
|
|
|
|
|
2019-08-24 05:24:45 -04:00
|
|
|
Database.Host = sec.Key("HOST").String()
|
2025-03-30 07:34:02 -04:00
|
|
|
Database.HostPrimary = sec.Key("HOST_PRIMARY").String()
|
|
|
|
|
Database.HostReplica = sec.Key("HOST_REPLICA").String()
|
|
|
|
|
Database.LoadBalancePolicy = sec.Key("LOAD_BALANCE_POLICY").String()
|
|
|
|
|
Database.LoadBalanceWeights = sec.Key("LOAD_BALANCE_WEIGHTS").String()
|
2019-08-24 05:24:45 -04:00
|
|
|
Database.Name = sec.Key("NAME").String()
|
|
|
|
|
Database.User = sec.Key("USER").String()
|
|
|
|
|
if len(Database.Passwd) == 0 {
|
2025-12-18 14:55:56 -05:00
|
|
|
Database.Passwd = loadSecret(sec, "PASSWD_URI", "PASSWD")
|
2019-08-24 05:24:45 -04:00
|
|
|
}
|
2020-01-20 10:45:14 -05:00
|
|
|
Database.Schema = sec.Key("SCHEMA").String()
|
2019-08-24 05:24:45 -04:00
|
|
|
Database.SSLMode = sec.Key("SSL_MODE").MustString("disable")
|
2024-01-10 06:03:23 -05:00
|
|
|
Database.CharsetCollation = sec.Key("CHARSET_COLLATION").String()
|
2022-06-13 08:55:08 -04:00
|
|
|
|
2022-12-19 15:01:46 -05:00
|
|
|
Database.Path = sec.Key("PATH").MustString(filepath.Join(AppDataPath, "forgejo.db"))
|
fix: improve SQLite "database is locked" errors by increasing default `SQLITE_TIMEOUT` (take 2) (#11292)
Increase the default `SQLITE_TIMEOUT` from 500ms to 60s.
In #11179 this was bumped up to 5s. But when that was backported to v14 in #11220, it failed consistently in CI through a couple increases, until it was bumped up further to 60s. This PR updates the `forgejo` branch to the same so that in the future when Forgejo 15 is released, it isn't regressed. `test-sqlite` has been failing on `forgejo` occasionally as well, so this increase is justified on this branch for this reason as well.
Putting aside the tests, I think a high value for the timeout (this 60s) is generally safer for production usage than a small timeout. The worst case with a high timeout is a slow request when there is high write contention on the DB.
## Checklist
The [contributor guide](https://forgejo.org/docs/next/contributor/) contains information that will be helpful to first time contributors. There also are a few [conditions for merging Pull Requests in Forgejo repositories](https://codeberg.org/forgejo/governance/src/branch/main/PullRequestsAgreement.md). You are also welcome to join the [Forgejo development chatroom](https://matrix.to/#/#forgejo-development:matrix.org).
### Tests
- I added test coverage for Go changes...
- [ ] in their respective `*_test.go` for unit tests.
- [ ] in the `tests/integration` directory if it involves interactions with a live Forgejo server.
- I added test coverage for JavaScript changes...
- [ ] in `web_src/js/*.test.js` if it can be unit tested.
- [ ] in `tests/e2e/*.test.e2e.js` if it requires interactions with a live Forgejo server (see also the [developer guide for JavaScript testing](https://codeberg.org/forgejo/forgejo/src/branch/forgejo/tests/e2e/README.md#end-to-end-tests)).
### Documentation
- [ ] I created a pull request [to the documentation](https://codeberg.org/forgejo/docs) to explain to Forgejo users how to use this change.
- **Will update** the documentation for the default value if approved.
- [ ] I did not document these changes and I do not expect someone else to do it.
### Release notes
- [x] This change will be noticed by a Forgejo user or admin (feature, bug fix, performance, etc.). I suggest to include a release note for this change.
- [ ] This change is not visible to a Forgejo user or admin (refactor, dependency upgrade, etc.). I think there is no need to add a release note for this change.
*The decision if the pull request will be shown in the release notes is up to the mergers / release team.*
The content of the `release-notes/<pull request number>.md` file will serve as the basis for the release notes. If the file does not exist, the title of the pull request will be used instead.
<!--start release-notes-assistant-->
## Release notes
<!--URL:https://codeberg.org/forgejo/forgejo-->
- Other changes without a feature or bug label
- [PR](https://codeberg.org/forgejo/forgejo/pulls/11292): <!--number 11292 --><!--line 0 --><!--description aW1wcm92ZSBTUUxpdGUgImRhdGFiYXNlIGlzIGxvY2tlZCIgZXJyb3JzIGJ5IGluY3JlYXNpbmcgZGVmYXVsdCBgU1FMSVRFX1RJTUVPVVRgICh0YWtlIDIp-->improve SQLite "database is locked" errors by increasing default `SQLITE_TIMEOUT` (take 2)<!--description-->
<!--end release-notes-assistant-->
Co-authored-by: forgejo-backport-action <forgejo-backport-action@noreply.codeberg.org>
Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/11292
Reviewed-by: Michael Kriese <michael.kriese@gmx.de>
2026-02-14 16:34:09 -05:00
|
|
|
Database.Timeout = sec.Key("SQLITE_TIMEOUT").MustInt(60000)
|
2026-01-27 07:59:01 -05:00
|
|
|
Database.SQLiteJournalMode = sec.Key("SQLITE_JOURNAL_MODE").MustString("WAL")
|
2022-07-30 15:57:41 -04:00
|
|
|
|
2019-10-21 17:20:47 -04:00
|
|
|
Database.MaxIdleConns = sec.Key("MAX_IDLE_CONNS").MustInt(2)
|
2023-03-07 05:51:06 -05:00
|
|
|
if Database.Type.IsMySQL() {
|
2021-11-17 05:59:23 -05:00
|
|
|
Database.ConnMaxLifetime = sec.Key("CONN_MAX_LIFETIME").MustDuration(3 * time.Second)
|
2019-10-21 17:20:47 -04:00
|
|
|
} else {
|
2021-11-17 05:59:23 -05:00
|
|
|
Database.ConnMaxLifetime = sec.Key("CONN_MAX_LIFETIME").MustDuration(0)
|
2019-10-21 17:20:47 -04:00
|
|
|
}
|
2024-02-21 07:17:16 -05:00
|
|
|
Database.ConnMaxIdleTime = sec.Key("CONN_MAX_IDLETIME").MustDuration(0)
|
2026-01-14 04:10:02 -05:00
|
|
|
Database.MaxOpenConns = sec.Key("MAX_OPEN_CONNS").MustInt(30)
|
2019-08-24 05:24:45 -04:00
|
|
|
|
|
|
|
|
Database.IterateBufferSize = sec.Key("ITERATE_BUFFER_SIZE").MustInt(50)
|
Rewrite logger system (#24726)
## ⚠️ Breaking
The `log.<mode>.<logger>` style config has been dropped. If you used it,
please check the new config manual & app.example.ini to make your
instance output logs as expected.
Although many legacy options still work, it's encouraged to upgrade to
the new options.
The SMTP logger is deleted because SMTP is not suitable to collect logs.
If you have manually configured Gitea log options, please confirm the
logger system works as expected after upgrading.
## Description
Close #12082 and maybe more log-related issues, resolve some related
FIXMEs in old code (which seems unfixable before)
Just like rewriting queue #24505 : make code maintainable, clear legacy
bugs, and add the ability to support more writers (eg: JSON, structured
log)
There is a new document (with examples): `logging-config.en-us.md`
This PR is safer than the queue rewriting, because it's just for
logging, it won't break other logic.
## The old problems
The logging system is quite old and difficult to maintain:
* Unclear concepts: Logger, NamedLogger, MultiChannelledLogger,
SubLogger, EventLogger, WriterLogger etc
* Some code is diffuclt to konw whether it is right:
`log.DelNamedLogger("console")` vs `log.DelNamedLogger(log.DEFAULT)` vs
`log.DelLogger("console")`
* The old system heavily depends on ini config system, it's difficult to
create new logger for different purpose, and it's very fragile.
* The "color" trick is difficult to use and read, many colors are
unnecessary, and in the future structured log could help
* It's difficult to add other log formats, eg: JSON format
* The log outputer doesn't have full control of its goroutine, it's
difficult to make outputer have advanced behaviors
* The logs could be lost in some cases: eg: no Fatal error when using
CLI.
* Config options are passed by JSON, which is quite fragile.
* INI package makes the KEY in `[log]` section visible in `[log.sub1]`
and `[log.sub1.subA]`, this behavior is quite fragile and would cause
more unclear problems, and there is no strong requirement to support
`log.<mode>.<logger>` syntax.
## The new design
See `logger.go` for documents.
## Screenshot
<details>



</details>
## TODO
* [x] add some new tests
* [x] fix some tests
* [x] test some sub-commands (manually ....)
---------
Co-authored-by: Jason Song <i@wolfogre.com>
Co-authored-by: delvh <dev.lh@web.de>
Co-authored-by: Giteabot <teabot@gitea.io>
2023-05-21 18:35:11 -04:00
|
|
|
Database.LogSQL = sec.Key("LOG_SQL").MustBool(false)
|
2019-08-24 05:24:45 -04:00
|
|
|
Database.DBConnectRetries = sec.Key("DB_RETRIES").MustInt(10)
|
|
|
|
|
Database.DBConnectBackoff = sec.Key("DB_RETRY_BACKOFF").MustDuration(3 * time.Second)
|
2022-12-07 10:58:31 -05:00
|
|
|
Database.AutoMigration = sec.Key("AUTO_MIGRATION").MustBool(true)
|
2024-01-24 10:55:34 -05:00
|
|
|
|
|
|
|
|
deprecatedSetting(rootCfg, "database", "SLOW_QUERY_TRESHOLD", "database", "SLOW_QUERY_THRESHOLD", "1.23")
|
|
|
|
|
if sec.HasKey("SLOW_QUERY_TRESHOLD") && !sec.HasKey("SLOW_QUERY_THRESHOLD") {
|
|
|
|
|
Database.SlowQueryThreshold = sec.Key("SLOW_QUERY_TRESHOLD").MustDuration(5 * time.Second)
|
|
|
|
|
} else {
|
|
|
|
|
Database.SlowQueryThreshold = sec.Key("SLOW_QUERY_THRESHOLD").MustDuration(5 * time.Second)
|
|
|
|
|
}
|
2019-08-24 05:24:45 -04:00
|
|
|
}
|
|
|
|
|
|
2025-03-30 07:34:02 -04:00
|
|
|
// DBMasterConnStr returns the connection string for the master (primary) database.
|
|
|
|
|
// If a primary host is defined in the configuration, it is used;
|
|
|
|
|
// otherwise, it falls back to Database.Host.
|
|
|
|
|
// Returns an error if no master host is provided but a slave is defined.
|
|
|
|
|
func DBMasterConnStr() (string, error) {
|
|
|
|
|
var host string
|
|
|
|
|
if Database.HostPrimary != "" {
|
|
|
|
|
host = Database.HostPrimary
|
|
|
|
|
} else {
|
|
|
|
|
host = Database.Host
|
|
|
|
|
}
|
|
|
|
|
if host == "" && Database.HostReplica != "" {
|
|
|
|
|
return "", errors.New("master host is not defined while slave is defined; cannot proceed")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// For SQLite, no host is needed
|
|
|
|
|
if host == "" && !Database.Type.IsSQLite3() {
|
|
|
|
|
return "", errors.New("no database host defined")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return dbConnStrWithHost(host)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DBSlaveConnStrs returns one or more connection strings for the replica databases.
|
|
|
|
|
// If a replica host is defined (possibly as a comma-separated list) then those DSNs are returned.
|
|
|
|
|
// Otherwise, this function falls back to the master DSN (with a warning log).
|
|
|
|
|
func DBSlaveConnStrs() ([]string, error) {
|
|
|
|
|
var dsns []string
|
|
|
|
|
if Database.HostReplica != "" {
|
|
|
|
|
// support multiple replica hosts separated by commas
|
|
|
|
|
replicas := strings.SplitSeq(Database.HostReplica, ",")
|
|
|
|
|
for r := range replicas {
|
|
|
|
|
trimmed := strings.TrimSpace(r)
|
|
|
|
|
if trimmed == "" {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
dsn, err := dbConnStrWithHost(trimmed)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
dsns = append(dsns, dsn)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return dsns, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func BuildLoadBalancePolicy(settings *DatabaseSettings, slaveEngines []*xorm.Engine) xorm.GroupPolicy {
|
|
|
|
|
var policy xorm.GroupPolicy
|
|
|
|
|
switch settings.LoadBalancePolicy { // Use the settings parameter directly
|
|
|
|
|
case "WeightRandom":
|
|
|
|
|
var weights []int
|
|
|
|
|
if settings.LoadBalanceWeights != "" { // Use the settings parameter directly
|
|
|
|
|
for part := range strings.SplitSeq(settings.LoadBalanceWeights, ",") {
|
|
|
|
|
w, err := strconv.Atoi(strings.TrimSpace(part))
|
|
|
|
|
if err != nil {
|
|
|
|
|
w = 1 // use a default weight if conversion fails
|
|
|
|
|
}
|
|
|
|
|
weights = append(weights, w)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// If no valid weights were provided, default each slave to weight 1
|
|
|
|
|
if len(weights) == 0 {
|
|
|
|
|
weights = make([]int, len(slaveEngines))
|
|
|
|
|
for i := range weights {
|
|
|
|
|
weights[i] = 1
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
policy = xorm.WeightRandomPolicy(weights)
|
2025-05-13 21:17:57 -04:00
|
|
|
case "WeightRoundRobin":
|
|
|
|
|
var weights []int
|
|
|
|
|
if settings.LoadBalanceWeights != "" {
|
|
|
|
|
for part := range strings.SplitSeq(settings.LoadBalanceWeights, ",") {
|
|
|
|
|
w, err := strconv.Atoi(strings.TrimSpace(part))
|
|
|
|
|
if err != nil {
|
|
|
|
|
w = 1 // use a default weight if conversion fails
|
|
|
|
|
}
|
|
|
|
|
weights = append(weights, w)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// If no valid weights were provided, default each slave to weight 1
|
|
|
|
|
if len(weights) == 0 {
|
|
|
|
|
weights = make([]int, len(slaveEngines))
|
|
|
|
|
for i := range weights {
|
|
|
|
|
weights[i] = 1
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
policy = xorm.WeightRoundRobinPolicy(weights)
|
2025-03-30 07:34:02 -04:00
|
|
|
case "RoundRobin":
|
|
|
|
|
policy = xorm.RoundRobinPolicy()
|
2025-05-13 21:17:57 -04:00
|
|
|
case "LeastConn":
|
|
|
|
|
policy = xorm.LeastConnPolicy()
|
2025-03-30 07:34:02 -04:00
|
|
|
default:
|
|
|
|
|
policy = xorm.RandomPolicy()
|
|
|
|
|
}
|
|
|
|
|
return policy
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// dbConnStrWithHost constructs the connection string, given a host value.
|
|
|
|
|
func dbConnStrWithHost(host string) (string, error) {
|
2022-06-20 06:02:49 -04:00
|
|
|
var connStr string
|
2023-06-21 06:49:25 -04:00
|
|
|
paramSep := "?"
|
|
|
|
|
if strings.Contains(Database.Name, paramSep) {
|
|
|
|
|
paramSep = "&"
|
2019-08-24 05:24:45 -04:00
|
|
|
}
|
|
|
|
|
switch Database.Type {
|
|
|
|
|
case "mysql":
|
|
|
|
|
connType := "tcp"
|
2025-03-30 07:34:02 -04:00
|
|
|
// if the host starts with '/' it is assumed to be a unix socket path
|
|
|
|
|
if len(host) > 0 && host[0] == '/' {
|
2019-08-24 05:24:45 -04:00
|
|
|
connType = "unix"
|
|
|
|
|
}
|
|
|
|
|
tls := Database.SSLMode
|
2025-03-30 07:34:02 -04:00
|
|
|
// allow the "disable" value (borrowed from Postgres defaults) to behave as false
|
|
|
|
|
if tls == "disable" {
|
2019-08-24 05:24:45 -04:00
|
|
|
tls = "false"
|
|
|
|
|
}
|
2024-01-10 06:03:23 -05:00
|
|
|
connStr = fmt.Sprintf("%s:%s@%s(%s)/%s%sparseTime=true&tls=%s",
|
2025-03-30 07:34:02 -04:00
|
|
|
Database.User, Database.Passwd, connType, host, Database.Name, paramSep, tls)
|
2019-08-24 05:24:45 -04:00
|
|
|
case "postgres":
|
2025-03-30 07:34:02 -04:00
|
|
|
connStr = getPostgreSQLConnectionString(host, Database.User, Database.Passwd, Database.Name, Database.SSLMode)
|
2019-08-24 05:24:45 -04:00
|
|
|
case "sqlite3":
|
|
|
|
|
if !EnableSQLite3 {
|
2023-06-21 06:49:25 -04:00
|
|
|
return "", errors.New("this Gitea binary was not built with SQLite3 support")
|
2019-08-24 05:24:45 -04:00
|
|
|
}
|
2023-12-06 11:57:52 -05:00
|
|
|
if err := os.MkdirAll(filepath.Dir(Database.Path), os.ModePerm); err != nil {
|
2025-03-30 07:34:02 -04:00
|
|
|
return "", fmt.Errorf("failed to create directories: %w", err)
|
2019-08-24 05:24:45 -04:00
|
|
|
}
|
2026-01-13 15:04:35 -05:00
|
|
|
opts := ""
|
2022-07-30 15:57:41 -04:00
|
|
|
if Database.SQLiteJournalMode != "" {
|
2026-01-13 15:04:35 -05:00
|
|
|
opts = "&_journal_mode=" + Database.SQLiteJournalMode
|
2022-07-30 15:57:41 -04:00
|
|
|
}
|
2026-01-13 15:04:35 -05:00
|
|
|
|
|
|
|
|
// in memory mode needs shared cache to be usable by multiple connections
|
|
|
|
|
// only used in tests normally
|
|
|
|
|
if Database.Path == ":memory:" {
|
|
|
|
|
opts += "&cache=shared"
|
|
|
|
|
} else {
|
|
|
|
|
opts += "&mode=rwc"
|
|
|
|
|
}
|
|
|
|
|
connStr = fmt.Sprintf("file:%s?_busy_timeout=%d&_txlock=immediate%s", Database.Path, Database.Timeout, opts)
|
2019-08-24 05:24:45 -04:00
|
|
|
default:
|
2023-06-21 06:49:25 -04:00
|
|
|
return "", fmt.Errorf("unknown database type: %s", Database.Type)
|
2019-08-24 05:24:45 -04:00
|
|
|
}
|
|
|
|
|
return connStr, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// parsePostgreSQLHostPort parses given input in various forms defined in
|
|
|
|
|
// https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING
|
|
|
|
|
// and returns proper host and port number.
|
2023-11-01 14:00:20 -04:00
|
|
|
func parsePostgreSQLHostPort(info string) (host, port string) {
|
|
|
|
|
if h, p, err := net.SplitHostPort(info); err == nil {
|
|
|
|
|
host, port = h, p
|
|
|
|
|
} else {
|
|
|
|
|
// treat the "info" as "host", if it's an IPv6 address, remove the wrapper
|
2019-08-24 05:24:45 -04:00
|
|
|
host = info
|
2023-11-01 14:00:20 -04:00
|
|
|
if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") {
|
|
|
|
|
host = host[1 : len(host)-1]
|
|
|
|
|
}
|
2019-08-24 05:24:45 -04:00
|
|
|
}
|
2023-11-01 14:00:20 -04:00
|
|
|
|
|
|
|
|
// set fallback values
|
2022-07-13 01:33:31 -04:00
|
|
|
if host == "" {
|
|
|
|
|
host = "127.0.0.1"
|
|
|
|
|
}
|
|
|
|
|
if port == "" {
|
|
|
|
|
port = "5432"
|
|
|
|
|
}
|
2019-08-24 05:24:45 -04:00
|
|
|
return host, port
|
|
|
|
|
}
|
|
|
|
|
|
2023-11-16 21:30:57 -05:00
|
|
|
func getPostgreSQLConnectionString(dbHost, dbUser, dbPasswd, dbName, dbsslMode string) (connStr string) {
|
|
|
|
|
dbName, dbParam, _ := strings.Cut(dbName, "?")
|
2025-11-30 11:47:45 -05:00
|
|
|
|
|
|
|
|
// pgx multi-host specification: "host1:port1,host2:port2"
|
|
|
|
|
if strings.Contains(dbHost, ",") {
|
|
|
|
|
var hostParts []string
|
|
|
|
|
for host := range strings.SplitSeq(dbHost, ",") {
|
|
|
|
|
trimmed := strings.TrimSpace(host)
|
|
|
|
|
if trimmed == "" {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
h, p := parsePostgreSQLHostPort(trimmed)
|
|
|
|
|
hostParts = append(hostParts, net.JoinHostPort(h, p))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate that we have at least one valid host after parsing
|
|
|
|
|
if len(hostParts) > 0 {
|
|
|
|
|
connURL := url.URL{
|
|
|
|
|
Scheme: "postgres",
|
|
|
|
|
User: url.UserPassword(dbUser, dbPasswd),
|
|
|
|
|
Host: strings.Join(hostParts, ","),
|
|
|
|
|
Path: dbName,
|
|
|
|
|
OmitHost: false,
|
|
|
|
|
RawQuery: dbParam,
|
|
|
|
|
}
|
|
|
|
|
query := connURL.Query()
|
|
|
|
|
query.Set("sslmode", dbsslMode)
|
|
|
|
|
connURL.RawQuery = query.Encode()
|
|
|
|
|
return connURL.String()
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-08-24 05:24:45 -04:00
|
|
|
host, port := parsePostgreSQLHostPort(dbHost)
|
2023-11-01 14:00:20 -04:00
|
|
|
connURL := url.URL{
|
|
|
|
|
Scheme: "postgres",
|
|
|
|
|
User: url.UserPassword(dbUser, dbPasswd),
|
|
|
|
|
Host: net.JoinHostPort(host, port),
|
|
|
|
|
Path: dbName,
|
|
|
|
|
OmitHost: false,
|
|
|
|
|
RawQuery: dbParam,
|
|
|
|
|
}
|
|
|
|
|
query := connURL.Query()
|
2024-01-20 11:04:47 -05:00
|
|
|
if strings.HasPrefix(host, "/") { // looks like a unix socket
|
|
|
|
|
query.Add("host", host)
|
2023-11-01 14:00:20 -04:00
|
|
|
connURL.Host = ":" + port
|
2019-08-24 05:24:45 -04:00
|
|
|
}
|
2023-11-01 14:00:20 -04:00
|
|
|
query.Set("sslmode", dbsslMode)
|
|
|
|
|
connURL.RawQuery = query.Encode()
|
|
|
|
|
return connURL.String()
|
2019-08-24 05:24:45 -04:00
|
|
|
}
|
|
|
|
|
|
2023-03-07 05:51:06 -05:00
|
|
|
type DatabaseType string
|
|
|
|
|
|
|
|
|
|
func (t DatabaseType) String() string {
|
|
|
|
|
return string(t)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (t DatabaseType) IsSQLite3() bool {
|
|
|
|
|
return t == "sqlite3"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (t DatabaseType) IsMySQL() bool {
|
|
|
|
|
return t == "mysql"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (t DatabaseType) IsPostgreSQL() bool {
|
|
|
|
|
return t == "postgres"
|
|
|
|
|
}
|