mirror of
https://github.com/grafana/grafana.git
synced 2025-12-18 22:16:21 -05:00
Remove support for Google Spanner database. (#105846)
* Remove support for Google Spanner database.
This commit is contained in:
parent
9769871a88
commit
c4d3eb1cd0
50 changed files with 26 additions and 1265 deletions
2
.gitignore
vendored
2
.gitignore
vendored
|
|
@ -41,8 +41,6 @@ __debug_bin*
|
|||
# This is the new place of the block, but I leave the previous here for a while
|
||||
/devenv/docker/blocks/auth/saml-enterprise
|
||||
/devenv/docker/blocks/auth/signer
|
||||
/devenv/docker/blocks/spanner_tests
|
||||
/devenv/docker/blocks/spanner_tests_multi
|
||||
/devenv/docker/blocks/mt-db
|
||||
|
||||
/tmp
|
||||
|
|
|
|||
1
.ignore
1
.ignore
|
|
@ -19,4 +19,3 @@
|
|||
# This is the new place of the block, but I leave the previous here for a while
|
||||
!/devenv/docker/blocks/auth/saml-enterprise
|
||||
!/devenv/docker/blocks/auth/signer
|
||||
!/devenv/docker/blocks/spanner_tests
|
||||
3
go.mod
3
go.mod
|
|
@ -6,7 +6,6 @@ require (
|
|||
buf.build/gen/go/parca-dev/parca/connectrpc/go v1.17.0-20240902100956-02fd72488966.1 // @grafana/observability-traces-and-profiling
|
||||
buf.build/gen/go/parca-dev/parca/protocolbuffers/go v1.34.2-20240902100956-02fd72488966.2 // @grafana/observability-traces-and-profiling
|
||||
cloud.google.com/go/kms v1.20.5 // @grafana/grafana-backend-group
|
||||
cloud.google.com/go/spanner v1.75.0 // @grafana/grafana-search-and-storage
|
||||
cloud.google.com/go/storage v1.50.0 // @grafana/grafana-backend-group
|
||||
connectrpc.com/connect v1.17.0 // @grafana/observability-traces-and-profiling
|
||||
cuelang.org/go v0.11.1 // @grafana/grafana-as-code
|
||||
|
|
@ -75,7 +74,6 @@ require (
|
|||
github.com/google/uuid v1.6.0 // @grafana/grafana-backend-group
|
||||
github.com/google/wire v0.6.0 // @grafana/grafana-backend-group
|
||||
github.com/googleapis/gax-go/v2 v2.14.1 // @grafana/grafana-backend-group
|
||||
github.com/googleapis/go-sql-spanner v1.11.1 // @grafana/grafana-search-and-storage
|
||||
github.com/gorilla/mux v1.8.1 // @grafana/grafana-backend-group
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // @grafana/grafana-app-platform-squad
|
||||
github.com/grafana/alerting v0.0.0-20250521131632-6e476b0b04c3 // @grafana/alerting-backend
|
||||
|
|
@ -253,7 +251,6 @@ require (
|
|||
github.com/Azure/go-ntlmssp v0.0.0-20220621081337-cb9428e4ac1e // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 // indirect
|
||||
github.com/FZambia/eagle v0.2.0 // indirect
|
||||
github.com/GoogleCloudPlatform/grpc-gcp-go/grpcgcp v1.5.2 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.49.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.49.0 // indirect
|
||||
|
|
|
|||
13
go.sum
13
go.sum
|
|
@ -187,7 +187,6 @@ cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvj
|
|||
cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA=
|
||||
cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs=
|
||||
cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU=
|
||||
cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE=
|
||||
cloud.google.com/go/compute v1.19.3/go.mod h1:qxvISKp/gYnXkSAD1ppcSOveRAmzxicEv/JlizULFrI=
|
||||
cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
|
||||
cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU=
|
||||
|
|
@ -541,8 +540,6 @@ cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+
|
|||
cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos=
|
||||
cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk=
|
||||
cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M=
|
||||
cloud.google.com/go/spanner v1.75.0 h1:2zrltTJv/4P3pCgpYgde4Eb1vN8Cgy1fNy7pbTnOovg=
|
||||
cloud.google.com/go/spanner v1.75.0/go.mod h1:TLFZBvPQmx3We7sGh12eTk9lLsRLczzZaiweqfMpR80=
|
||||
cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM=
|
||||
cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ=
|
||||
cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0=
|
||||
|
|
@ -718,8 +715,6 @@ github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3
|
|||
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
github.com/FZambia/eagle v0.2.0 h1:1kQaZpJvbkvAXFRE/9K2ucBMuVqo+E29EMLYB74hIis=
|
||||
github.com/FZambia/eagle v0.2.0/go.mod h1:LKMYBwGYhao5sJI0TppvQ4SvvldFj9gITxrl8NvGwG0=
|
||||
github.com/GoogleCloudPlatform/grpc-gcp-go/grpcgcp v1.5.2 h1:DBjmt6/otSdULyJdVg2BlG0qGZO5tKL4VzOs0jpvw5Q=
|
||||
github.com/GoogleCloudPlatform/grpc-gcp-go/grpcgcp v1.5.2/go.mod h1:dppbR7CwXD4pgtV9t3wD1812RaLDcBjtblcDF5f1vI0=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0 h1:f2Qw/Ehhimh5uO1fayV0QIW7DShEQqhtUfhYc+cBPlw=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0/go.mod h1:2bIszWvQRlJVmJLiuLhukLImRjKPcYdzzsx6darK02A=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.49.0 h1:o90wcURuxekmXrtxmYWTyNla0+ZEHhud6DI1ZTxd1vI=
|
||||
|
|
@ -1019,7 +1014,6 @@ github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWH
|
|||
github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 h1:Om6kYQYDUk5wWbT0t0q6pvyM49i9XZAv9dDrkDA7gjk=
|
||||
github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
|
||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||
|
|
@ -1133,7 +1127,6 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.
|
|||
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
|
||||
github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34=
|
||||
github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI=
|
||||
github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q=
|
||||
github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M=
|
||||
github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA=
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A=
|
||||
|
|
@ -1144,7 +1137,6 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
|
|||
github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU=
|
||||
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
|
|
@ -1549,8 +1541,6 @@ github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEP
|
|||
github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/googleapis/gnostic v0.3.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/googleapis/go-sql-spanner v1.11.1 h1:z3ThtKV5HFvaNv9UGc26+ggS+lS0dsCAkaFduKL7vws=
|
||||
github.com/googleapis/go-sql-spanner v1.11.1/go.mod h1:fuA5q4yMS3SZiVfRr5bvksPNk7zUn/irbQW62H/ffZw=
|
||||
github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
|
||||
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
|
||||
github.com/gophercloud/gophercloud v0.3.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
|
||||
|
|
@ -2392,7 +2382,6 @@ github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
|
|||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
|
|
@ -3398,11 +3387,9 @@ google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCD
|
|||
google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
|
||||
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
|
||||
google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY=
|
||||
google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY=
|
||||
google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
|
||||
google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g=
|
||||
google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8=
|
||||
google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
|
||||
google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA=
|
||||
google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||
|
|
|
|||
|
|
@ -442,6 +442,7 @@ cloud.google.com/go/shell v1.8.1/go.mod h1:jaU7OHeldDhTwgs3+clM0KYEDYnBAPevUI6wN
|
|||
cloud.google.com/go/shell v1.8.3 h1:mjYgUsOtV3jl9xvDmcvlRRmA64deEPf52zOfuc68b/g=
|
||||
cloud.google.com/go/shell v1.8.3/go.mod h1:OYcrgWF6JSp/uk76sNTtYFlMD0ho2+Cdzc7U3P/bF54=
|
||||
cloud.google.com/go/spanner v1.70.0/go.mod h1:X5T0XftydYp0K1adeJQDJtdWpbrOeJ7wHecM4tK6FiE=
|
||||
cloud.google.com/go/spanner v1.73.0 h1:0bab8QDn6MNj9lNK6XyGAVFhMlhMU2waePPa6GZNoi8=
|
||||
cloud.google.com/go/spanner v1.73.0/go.mod h1:mw98ua5ggQXVWwp83yjwggqEmW9t8rjs9Po1ohcUGW4=
|
||||
cloud.google.com/go/speech v1.25.1 h1:iGZJS3wrdkje/Vqiacx1+r+zVwUZoXVMdklYIVsvfNw=
|
||||
cloud.google.com/go/speech v1.25.1/go.mod h1:WgQghvghkZ1htG6BhYn98mP7Tg0mti8dBFDLMVXH/vM=
|
||||
|
|
@ -1047,8 +1048,6 @@ github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg78
|
|||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||
github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c=
|
||||
github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
|
||||
github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU=
|
||||
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ package extensions
|
|||
import (
|
||||
_ "cloud.google.com/go/kms/apiv1"
|
||||
_ "cloud.google.com/go/kms/apiv1/kmspb"
|
||||
_ "cloud.google.com/go/spanner"
|
||||
_ "github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
||||
_ "github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys"
|
||||
_ "github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault"
|
||||
|
|
|
|||
|
|
@ -54,7 +54,6 @@ type InitTestDBOpt = sqlstore.InitTestDBOpt
|
|||
var SetupTestDB = sqlstore.SetupTestDB
|
||||
var CleanupTestDB = sqlstore.CleanupTestDB
|
||||
var ProvideService = sqlstore.ProvideService
|
||||
var SkipTestsOnSpanner = sqlstore.SkipTestsOnSpanner
|
||||
|
||||
func InitTestDB(t sqlutil.ITestDB, opts ...InitTestDBOpt) *sqlstore.SQLStore {
|
||||
db, _ := InitTestDBWithCfg(t, opts...)
|
||||
|
|
@ -96,11 +95,3 @@ func IsTestDBMSSQL() bool {
|
|||
|
||||
return false
|
||||
}
|
||||
|
||||
func IsTestDBSpanner() bool {
|
||||
if db, present := os.LookupEnv("GRAFANA_TEST_DB"); present {
|
||||
return db == migrator.Spanner
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -374,10 +374,6 @@ func (sl *ServerLockService) createLock(ctx context.Context,
|
|||
}
|
||||
lockRow.Id = id
|
||||
} else {
|
||||
if sl.SQLStore.GetDBType() == migrator.Spanner {
|
||||
rawSQL += " THEN RETURN id" // Required for successful LastInsertId call.
|
||||
}
|
||||
|
||||
res, err := dbSession.Exec(
|
||||
rawSQL,
|
||||
lockRow.OperationUID, lockRow.LastExecution, 0)
|
||||
|
|
|
|||
|
|
@ -203,10 +203,6 @@ func TestIntegrationAnnotationListingWithInheritedRBAC(t *testing.T) {
|
|||
t.Skip("skipping integration test")
|
||||
}
|
||||
|
||||
if db.IsTestDBSpanner() {
|
||||
t.Skip("skipping integration test")
|
||||
}
|
||||
|
||||
orgID := int64(1)
|
||||
permissions := []accesscontrol.Permission{
|
||||
{
|
||||
|
|
|
|||
|
|
@ -334,7 +334,6 @@ func (r *xormRepositoryImpl) Get(ctx context.Context, query annotations.ItemQuer
|
|||
}
|
||||
|
||||
if len(tags) > 0 {
|
||||
// "at" is a keyword in Spanner and needs to be quoted.
|
||||
tagsSubQuery := fmt.Sprintf(`
|
||||
SELECT SUM(1) FROM annotation_tag `+r.db.Quote("at")+`
|
||||
INNER JOIN tag on tag.id = `+r.db.Quote("at")+`.tag_id
|
||||
|
|
|
|||
|
|
@ -146,14 +146,11 @@ func (s *AnonDBStore) CreateOrUpdateDevice(ctx context.Context, device *Device)
|
|||
}
|
||||
|
||||
// If CreatedAt time is not set (i.e. it's zero), and we end up creating the device, use current time as creation time.
|
||||
// Spanner converts zero time to NULL, but CreatedAt is not nullable, so this helps to fix that problem too.
|
||||
// If database converts zero time to NULL, but CreatedAt is not nullable, this helps to fix that problem too.
|
||||
created := device.CreatedAt
|
||||
if created.IsZero() {
|
||||
created = time.Now()
|
||||
}
|
||||
if s.sqlStore.GetDBType() == migrator.Spanner {
|
||||
return s.insertIntoSpanner(ctx, device, created)
|
||||
}
|
||||
|
||||
args := []any{device.DeviceID, device.ClientIP, device.UserAgent, created.UTC(), device.UpdatedAt.UTC()}
|
||||
switch s.sqlStore.GetDBType() {
|
||||
|
|
@ -192,30 +189,6 @@ func (s *AnonDBStore) CreateOrUpdateDevice(ctx context.Context, device *Device)
|
|||
return err
|
||||
}
|
||||
|
||||
// In Spanner INSERT OR UPDATE only works when conflict is on primary key. However here we expect conflict on non-PK
|
||||
// column "device_id", so we need to use a transaction instead.
|
||||
func (s *AnonDBStore) insertIntoSpanner(ctx context.Context, dev *Device, created time.Time) error {
|
||||
return s.sqlStore.WithTransactionalDbSession(ctx, func(dbSession *sqlstore.DBSession) error {
|
||||
prev := &Device{DeviceID: dev.DeviceID}
|
||||
ok, err := dbSession.Table(tableName).Get(prev)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
// Don't call Insert(dev) directly, because successful Insert modifies dev.DeviceID,
|
||||
// but we don't do the same for other databases. That's why we create a copy.
|
||||
devCopy := *dev
|
||||
devCopy.CreatedAt = created
|
||||
_, err = dbSession.Table(tableName).Insert(devCopy)
|
||||
return err
|
||||
}
|
||||
|
||||
// Include all columns in the update, even when empty (it's what inserts for other databases do too).
|
||||
_, err = dbSession.Table(tableName).Where("device_id=?", dev.DeviceID).MustCols("client_ip", "user_agent", "created_at", "updated_at").Update(dev)
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func (s *AnonDBStore) CountDevices(ctx context.Context, from time.Time, to time.Time) (int64, error) {
|
||||
var count int64
|
||||
err := s.sqlStore.WithDbSession(ctx, func(dbSession *sqlstore.DBSession) error {
|
||||
|
|
|
|||
|
|
@ -234,10 +234,6 @@ func TestIntegrationDashboardInheritedFolderRBAC(t *testing.T) {
|
|||
if testing.Short() {
|
||||
t.Skip("skipping integration test")
|
||||
}
|
||||
if db.IsTestDBSpanner() {
|
||||
t.Skip("skipping integration test")
|
||||
}
|
||||
|
||||
// the maximux nested folder hierarchy starting from parent down to subfolders
|
||||
nestedFolders := make([]*folder.Folder, 0, folder.MaxNestedFolderDepth+1)
|
||||
|
||||
|
|
|
|||
|
|
@ -168,10 +168,6 @@ func (s *Service) DBMigration(db db.DB) {
|
|||
SELECT uid, org_id, title, created, updated FROM dashboard WHERE is_folder = true
|
||||
ON CONFLICT(uid, org_id) DO UPDATE SET title=excluded.title, updated=excluded.updated
|
||||
`)
|
||||
} else if db.GetDialect().DriverName() == migrator.Spanner {
|
||||
// We may eventually make this migration work with Spanner, but for now don't do anything.
|
||||
// We intend to store dashboards and folders only in unified storage when using spanner.
|
||||
deleteOldFolders = false
|
||||
} else {
|
||||
// covered by UQE_folder_org_id_uid
|
||||
_, err = sess.Exec(`
|
||||
|
|
|
|||
|
|
@ -620,7 +620,7 @@ func (ss *FolderStoreImpl) GetDescendants(ctx context.Context, orgID int64, ance
|
|||
|
||||
func getFullpathSQL(dialect migrator.Dialect) string {
|
||||
escaped := `\/`
|
||||
if dialect.DriverName() == migrator.MySQL || dialect.DriverName() == migrator.Spanner {
|
||||
if dialect.DriverName() == migrator.MySQL {
|
||||
escaped = `\\/`
|
||||
}
|
||||
concatCols := make([]string, 0, folder.MaxNestedFolderDepth)
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ func TestIntegrationAuthInfoStore(t *testing.T) {
|
|||
|
||||
// There is no guarantee that user with user_id=1 gets "oauth_azuread" or "ldap".
|
||||
// Both are valid results for the query (basically SELECT * FROM `user_auth` WHERE `user_id` IN (1,2) ORDER BY created),
|
||||
// Spanner emulator will randomize its output, so test cannot rely on the ordering (other than "Created" column, which is equal here).
|
||||
// Some databases may randomize its output, so test cannot rely on the ordering (other than "Created" column, which is equal here).
|
||||
require.True(t, labels[1] == login.AzureADAuthModule || labels[1] == login.LDAPAuthModule)
|
||||
require.Equal(t, login.GoogleAuthModule, labels[2])
|
||||
})
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ import (
|
|||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
testsuite.RunButSkipOnSpanner(m)
|
||||
testsuite.Run(m)
|
||||
}
|
||||
|
||||
func TestIntegrationAlertmanagerStore(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -203,8 +203,6 @@ func (dbCfg *DatabaseConfig) buildConnectionString(cfg *setting.Cfg, features fe
|
|||
}
|
||||
|
||||
cnnstr += buildExtraConnectionString('&', dbCfg.UrlQueryParams)
|
||||
case migrator.Spanner:
|
||||
cnnstr = dbCfg.Name
|
||||
default:
|
||||
return fmt.Errorf("unknown database type: %s", dbCfg.Type)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -77,8 +77,8 @@ func TestIntegrationMigrationLock(t *testing.T) {
|
|||
}
|
||||
|
||||
dbType := sqlutil.GetTestDBType()
|
||||
// skip for SQLite and Spanner since there is no database locking (only migrator locking)
|
||||
if dbType == SQLite || dbType == Spanner {
|
||||
// skip for SQLite since there is no database locking (only migrator locking)
|
||||
if dbType == SQLite {
|
||||
t.Skip()
|
||||
}
|
||||
|
||||
|
|
@ -235,8 +235,8 @@ func TestMigratorLocking(t *testing.T) {
|
|||
func TestDatabaseLocking(t *testing.T) {
|
||||
dbType := sqlutil.GetTestDBType()
|
||||
|
||||
// skip for SQLite and Spanner since there is no database locking (only migrator locking)
|
||||
if dbType == SQLite || dbType == Spanner {
|
||||
// skip for SQLite since there is no database locking (only migrator locking)
|
||||
if dbType == SQLite {
|
||||
t.Skip()
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -82,15 +82,6 @@ func RunStarMigrations(sess *xorm.Session, driverName string) error {
|
|||
star.org_id = dashboard.org_id,
|
||||
star.updated = NOW()
|
||||
WHERE star.dashboard_uid IS NULL OR star.org_id IS NULL;`
|
||||
case Spanner:
|
||||
sql = `UPDATE star
|
||||
SET
|
||||
dashboard_uid = (SELECT uid FROM dashboard WHERE dashboard.id = star.dashboard_id),
|
||||
org_id = (SELECT org_id FROM dashboard WHERE dashboard.id = star.dashboard_id),
|
||||
updated = CURRENT_TIMESTAMP()
|
||||
WHERE
|
||||
(dashboard_uid IS NULL OR org_id IS NULL)
|
||||
AND EXISTS (SELECT 1 FROM dashboard WHERE dashboard.id = star.dashboard_id)`
|
||||
}
|
||||
|
||||
if _, err := sess.Exec(sql); err != nil {
|
||||
|
|
|
|||
|
|
@ -154,8 +154,7 @@ func addUserMigrations(mg *Migrator) {
|
|||
mg.AddMigration("Make sure users uid are set", NewRawSQLMigration("").
|
||||
SQLite("UPDATE user SET uid=printf('u%09d',id) WHERE uid is NULL OR uid = '';").
|
||||
Postgres("UPDATE `user` SET uid='u' || lpad('' || id::text,9,'0') WHERE uid is NULL OR uid = '';").
|
||||
Mysql("UPDATE user SET uid=concat('u',lpad(id,9,'0')) WHERE uid is NULL OR uid = '';").
|
||||
Spanner("UPDATE user SET uid=concat('u',lpad(CAST(id AS STRING),9,'0')) WHERE uid IS NULL OR uid = '';"))
|
||||
Mysql("UPDATE user SET uid=concat('u',lpad(id,9,'0')) WHERE uid is NULL OR uid = '';"))
|
||||
|
||||
mg.AddMigration("Add unique index user_uid", NewAddIndexMigration(userV2, &Index{
|
||||
Cols: []string{"uid"}, Type: UniqueIndex,
|
||||
|
|
|
|||
|
|
@ -72,19 +72,6 @@ func (p *ServiceAccountsSameLoginCrossOrgs) Exec(sess *xorm.Session, mg *migrato
|
|||
AND is_service_account = 1
|
||||
AND login NOT LIKE 'sa-' || CAST(org_id AS TEXT) || '-%';
|
||||
`)
|
||||
case migrator.Spanner:
|
||||
_, err = p.sess.Exec(`
|
||||
UPDATE user
|
||||
SET login = CONCAT('sa-', CAST(org_id AS STRING), '-',
|
||||
CASE
|
||||
WHEN login LIKE 'sa-%' THEN SUBSTRING(login, 4)
|
||||
ELSE login
|
||||
END
|
||||
)
|
||||
WHERE login IS NOT NULL
|
||||
AND is_service_account
|
||||
AND login NOT LIKE CONCAT('sa-', CAST(org_id AS STRING), '-%')
|
||||
`)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("dialect not supported: %s", p.dialect)
|
||||
|
|
@ -142,19 +129,6 @@ func (p *ServiceAccountsDeduplicateOrgInLogin) Exec(sess *xorm.Session, mg *migr
|
|||
WHERE u2.login = 'sa-' || CAST(u.org_id AS TEXT) || SUBSTRING(u.login, LENGTH('sa-'||CAST(u.org_id AS TEXT)||'-'||CAST(u.org_id AS TEXT))+1)
|
||||
);;
|
||||
`)
|
||||
case migrator.Spanner:
|
||||
_, err = sess.Exec(`
|
||||
UPDATE ` + dialect.Quote("user") + ` AS u
|
||||
SET login = 'sa-' || CAST(u.org_id AS STRING) || SUBSTRING(u.login, LENGTH('sa-'||CAST(u.org_id AS STRING)||'-'||CAST(u.org_id AS STRING))+1)
|
||||
WHERE u.login IS NOT NULL
|
||||
AND u.is_service_account
|
||||
AND u.login LIKE 'sa-'||CAST(u.org_id AS STRING)||'-'||CAST(u.org_id AS STRING)||'-%'
|
||||
AND NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM ` + dialect.Quote("user") + `AS u2
|
||||
WHERE u2.login = 'sa-' || CAST(u.org_id AS STRING) || SUBSTRING(u.login, LENGTH('sa-'||CAST(u.org_id AS STRING)||'-'||CAST(u.org_id AS STRING))+1)
|
||||
);;
|
||||
`)
|
||||
default:
|
||||
return fmt.Errorf("dialect not supported: %s", dialect)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -85,10 +85,6 @@ func (m *RawSQLMigration) Mssql(sql string) *RawSQLMigration {
|
|||
return m.Set(MSSQL, sql)
|
||||
}
|
||||
|
||||
func (m *RawSQLMigration) Spanner(sql string) *RawSQLMigration {
|
||||
return m.Set(Spanner, sql)
|
||||
}
|
||||
|
||||
type AddColumnMigration struct {
|
||||
MigrationBase
|
||||
tableName string
|
||||
|
|
|
|||
|
|
@ -417,11 +417,6 @@ func (mg *Migrator) InTransaction(callback dbTransactionFunc) error {
|
|||
sess := mg.DBEngine.NewSession()
|
||||
defer sess.Close()
|
||||
|
||||
// XXX: Spanner cannot execute DDL statements in transactions
|
||||
if mg.Dialect.DriverName() == Spanner {
|
||||
return callback(sess)
|
||||
}
|
||||
|
||||
if err := sess.Begin(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,355 +0,0 @@
|
|||
//go:build enterprise || pro
|
||||
|
||||
package migrator
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/spanner"
|
||||
database "cloud.google.com/go/spanner/admin/database/apiv1"
|
||||
"cloud.google.com/go/spanner/admin/database/apiv1/databasepb"
|
||||
"github.com/googleapis/gax-go/v2"
|
||||
spannerdriver "github.com/googleapis/go-sql-spanner"
|
||||
"github.com/grafana/grafana/pkg/util/xorm"
|
||||
"google.golang.org/grpc/codes"
|
||||
|
||||
"github.com/grafana/dskit/concurrency"
|
||||
utilspanner "github.com/grafana/grafana/pkg/util/spanner"
|
||||
"github.com/grafana/grafana/pkg/util/xorm/core"
|
||||
)
|
||||
|
||||
type SpannerDialect struct {
|
||||
BaseDialect
|
||||
d core.Dialect
|
||||
}
|
||||
|
||||
func init() {
|
||||
supportedDialects[Spanner] = NewSpannerDialect
|
||||
}
|
||||
|
||||
func NewSpannerDialect() Dialect {
|
||||
d := SpannerDialect{d: core.QueryDialect(Spanner)}
|
||||
d.dialect = &d
|
||||
d.driverName = Spanner
|
||||
return &d
|
||||
}
|
||||
|
||||
func (s *SpannerDialect) AutoIncrStr() string { return s.d.AutoIncrStr() }
|
||||
func (s *SpannerDialect) Quote(name string) string { return s.d.Quote(name) }
|
||||
func (s *SpannerDialect) SupportEngine() bool { return s.d.SupportEngine() }
|
||||
|
||||
func (s *SpannerDialect) LikeOperator(column string, wildcardBefore bool, pattern string, wildcardAfter bool) (string, string) {
|
||||
param := strings.ToLower(pattern)
|
||||
if wildcardBefore {
|
||||
param = "%" + param
|
||||
}
|
||||
if wildcardAfter {
|
||||
param = param + "%"
|
||||
}
|
||||
return fmt.Sprintf("LOWER(%s) LIKE ?", column), param
|
||||
}
|
||||
|
||||
func (s *SpannerDialect) IndexCheckSQL(tableName, indexName string) (string, []any) {
|
||||
return s.d.IndexCheckSql(tableName, indexName)
|
||||
}
|
||||
func (s *SpannerDialect) SQLType(col *Column) string {
|
||||
c := core.NewColumn(col.Name, "", core.SQLType{Name: col.Type}, col.Length, col.Length2, col.Nullable)
|
||||
return s.d.SqlType(c)
|
||||
}
|
||||
|
||||
func (s *SpannerDialect) BatchSize() int { return 1000 }
|
||||
|
||||
func (s *SpannerDialect) BooleanValue(b bool) any {
|
||||
return b
|
||||
}
|
||||
|
||||
func (s *SpannerDialect) BooleanStr(b bool) string {
|
||||
if b {
|
||||
return "true"
|
||||
}
|
||||
return "false"
|
||||
}
|
||||
func (s *SpannerDialect) ErrorMessage(err error) string {
|
||||
return spanner.ErrDesc(spanner.ToSpannerError(err))
|
||||
}
|
||||
func (s *SpannerDialect) IsDeadlock(err error) bool {
|
||||
return spanner.ErrCode(spanner.ToSpannerError(err)) == codes.Aborted
|
||||
}
|
||||
func (s *SpannerDialect) IsUniqueConstraintViolation(err error) bool {
|
||||
return spanner.ErrCode(spanner.ToSpannerError(err)) == codes.AlreadyExists
|
||||
}
|
||||
|
||||
func (s *SpannerDialect) CreateTableSQL(table *Table) string {
|
||||
t := core.NewEmptyTable()
|
||||
t.Name = table.Name
|
||||
t.PrimaryKeys = table.PrimaryKeys
|
||||
for _, c := range table.Columns {
|
||||
col := core.NewColumn(c.Name, c.Name, core.SQLType{Name: c.Type}, c.Length, c.Length2, c.Nullable)
|
||||
col.IsAutoIncrement = c.IsAutoIncrement
|
||||
col.Default = c.Default
|
||||
t.AddColumn(col)
|
||||
}
|
||||
if len(t.PrimaryKeys) == 0 {
|
||||
for _, ix := range table.Indices {
|
||||
if ix.Name == "PRIMARY_KEY" {
|
||||
t.PrimaryKeys = append(t.PrimaryKeys, ix.Cols...)
|
||||
}
|
||||
}
|
||||
}
|
||||
return s.d.CreateTableSql(t, t.Name, "", "")
|
||||
}
|
||||
|
||||
func (s *SpannerDialect) CreateIndexSQL(tableName string, index *Index) string {
|
||||
idx := core.NewIndex(index.Name, index.Type)
|
||||
idx.Cols = index.Cols
|
||||
return s.d.CreateIndexSql(tableName, idx)
|
||||
}
|
||||
|
||||
func (s *SpannerDialect) UpsertMultipleSQL(tableName string, keyCols, updateCols []string, count int) (string, error) {
|
||||
return "", errors.New("not supported")
|
||||
}
|
||||
|
||||
func (s *SpannerDialect) DropIndexSQL(tableName string, index *Index) string {
|
||||
return fmt.Sprintf("DROP INDEX %v", s.Quote(index.XName(tableName)))
|
||||
}
|
||||
|
||||
func (s *SpannerDialect) DropTable(tableName string) string {
|
||||
return fmt.Sprintf("DROP TABLE %s", s.Quote(tableName))
|
||||
}
|
||||
|
||||
func (s *SpannerDialect) ColStringNoPk(col *Column) string {
|
||||
sql := s.dialect.Quote(col.Name) + " "
|
||||
|
||||
sql += s.dialect.SQLType(col) + " "
|
||||
|
||||
if s.dialect.ShowCreateNull() && !col.Nullable {
|
||||
sql += "NOT NULL "
|
||||
}
|
||||
|
||||
if col.Default != "" {
|
||||
// Default value must be in parentheses.
|
||||
sql += "DEFAULT (" + s.dialect.Default(col) + ") "
|
||||
}
|
||||
|
||||
return sql
|
||||
}
|
||||
|
||||
func (s *SpannerDialect) TruncateDBTables(engine *xorm.Engine) error {
|
||||
// Get tables names only, no columns or indexes.
|
||||
tables, err := engine.Dialect().GetTables()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sess := engine.NewSession()
|
||||
defer sess.Close()
|
||||
|
||||
var statements []string
|
||||
|
||||
for _, table := range tables {
|
||||
switch table.Name {
|
||||
case "":
|
||||
continue
|
||||
case "autoincrement_sequences":
|
||||
// Don't delete sequence number for migration_log.id column.
|
||||
statements = append(statements, fmt.Sprintf("DELETE FROM %v WHERE name <> 'migration_log:id'", s.Quote(table.Name)))
|
||||
case "migration_log":
|
||||
continue
|
||||
case "dashboard_acl":
|
||||
// keep default dashboard permissions
|
||||
statements = append(statements, fmt.Sprintf("DELETE FROM %v WHERE dashboard_id != -1 AND org_id != -1;", s.Quote(table.Name)))
|
||||
default:
|
||||
statements = append(statements, fmt.Sprintf("DELETE FROM %v WHERE TRUE;", s.Quote(table.Name)))
|
||||
}
|
||||
}
|
||||
|
||||
// Run statements concurrently.
|
||||
return concurrency.ForEachJob(context.Background(), len(statements), 10, func(ctx context.Context, idx int) error {
|
||||
_, err := sess.Exec(statements[idx])
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
// CleanDB drops all existing tables and their indexes.
|
||||
func (s *SpannerDialect) CleanDB(engine *xorm.Engine) error {
|
||||
tables, err := engine.DBMetas()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Collect all DROP statements.
|
||||
changeStreams, err := s.findChangeStreams(engine)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
statements := make([]string, 0, len(tables)+len(changeStreams))
|
||||
for _, cs := range changeStreams {
|
||||
statements = append(statements, fmt.Sprintf("DROP CHANGE STREAM `%s`", cs))
|
||||
}
|
||||
|
||||
for _, table := range tables {
|
||||
// Indexes must be dropped first, otherwise dropping tables fails.
|
||||
for _, index := range table.Indexes {
|
||||
if !index.IsRegular {
|
||||
// Don't drop primary key.
|
||||
continue
|
||||
}
|
||||
sql := fmt.Sprintf("DROP INDEX %s", s.Quote(index.XName(table.Name)))
|
||||
statements = append(statements, sql)
|
||||
}
|
||||
|
||||
sql := fmt.Sprintf("DROP TABLE %s", s.Quote(table.Name))
|
||||
statements = append(statements, sql)
|
||||
}
|
||||
|
||||
if len(statements) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return s.executeDDLStatements(context.Background(), engine, statements)
|
||||
}
|
||||
|
||||
//go:embed snapshot/spanner-ddl.json
|
||||
var snapshotDDL string
|
||||
|
||||
//go:embed snapshot/spanner-log.json
|
||||
var snapshotMigrations string
|
||||
|
||||
func (s *SpannerDialect) CreateDatabaseFromSnapshot(ctx context.Context, engine *xorm.Engine, tableName string) error {
|
||||
var statements, migrationIDs []string
|
||||
err := json.Unmarshal([]byte(snapshotDDL), &statements)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = json.Unmarshal([]byte(snapshotMigrations), &migrationIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.executeDDLStatements(ctx, engine, statements)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.recordMigrationsToMigrationLog(engine, migrationIDs, tableName)
|
||||
}
|
||||
|
||||
func (s *SpannerDialect) recordMigrationsToMigrationLog(engine *xorm.Engine, migrationIDs []string, tableName string) error {
|
||||
now := time.Now()
|
||||
makeRecord := func(id string) MigrationLog {
|
||||
return MigrationLog{
|
||||
MigrationID: id,
|
||||
SQL: "",
|
||||
Success: true,
|
||||
Timestamp: now,
|
||||
}
|
||||
}
|
||||
|
||||
sess := engine.NewSession()
|
||||
defer sess.Close()
|
||||
|
||||
// Insert records in batches to avoid many roundtrips to database.
|
||||
// Inserting all records at once fails due to "Number of parameters in query exceeds the maximum
|
||||
// allowed limit of 950." error, so we use smaller batches.
|
||||
const batchSize = 100
|
||||
|
||||
if err := sess.Begin(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
records := make([]MigrationLog, 0, len(migrationIDs))
|
||||
for _, mid := range migrationIDs {
|
||||
records = append(records, makeRecord(mid))
|
||||
|
||||
if len(records) >= batchSize {
|
||||
if _, err := sess.Table(tableName).InsertMulti(records); err != nil {
|
||||
err2 := sess.Rollback()
|
||||
return errors.Join(fmt.Errorf("failed to insert migration logs: %w", err), err2)
|
||||
}
|
||||
records = records[:0]
|
||||
}
|
||||
}
|
||||
|
||||
// Insert remaining records.
|
||||
if len(records) > 0 {
|
||||
if _, err := sess.Table(tableName).InsertMulti(records); err != nil {
|
||||
err2 := sess.Rollback()
|
||||
return errors.Join(fmt.Errorf("failed to insert migration logs: %w", err), err2)
|
||||
}
|
||||
}
|
||||
|
||||
if err := sess.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Spanner can be very slow at executing single DDL statements (it can take up to a minute), but when
|
||||
// many DDL statements are batched together, Spanner is *much* faster (total time to execute all statements
|
||||
// is often in tens of seconds). We can't execute batch of DDL statements using sql wrapper, we use "database admin client"
|
||||
// from Spanner library instead.
|
||||
func (s *SpannerDialect) executeDDLStatements(ctx context.Context, engine *xorm.Engine, statements []string) error {
|
||||
// Datasource name contains string used for sql.Open.
|
||||
dsn := engine.Dialect().DataSourceName()
|
||||
cfg, err := spannerdriver.ExtractConnectorConfig(dsn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts := utilspanner.ConnectorConfigToClientOptions(cfg)
|
||||
|
||||
databaseAdminClient, err := database.NewDatabaseAdminClient(ctx, opts...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create database admin client: %v", err)
|
||||
}
|
||||
//nolint:errcheck // If the databaseAdminClient.Close fails, we simply don't care.
|
||||
defer databaseAdminClient.Close()
|
||||
|
||||
databaseName := fmt.Sprintf("projects/%s/instances/%s/databases/%s", cfg.Project, cfg.Instance, cfg.Database)
|
||||
|
||||
op, err := databaseAdminClient.UpdateDatabaseDdl(ctx, &databasepb.UpdateDatabaseDdlRequest{
|
||||
Database: databaseName,
|
||||
Statements: statements,
|
||||
}, gax.WithTimeout(0)) /* disable default timeout */
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start database DDL update: %v", err)
|
||||
}
|
||||
|
||||
err = op.Wait(ctx, gax.WithTimeout(0)) /* disable default timeout */
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to apply database DDL update: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SpannerDialect) UnionDistinct() string {
|
||||
return "UNION DISTINCT"
|
||||
}
|
||||
|
||||
func (s *SpannerDialect) findChangeStreams(engine *xorm.Engine) ([]string, error) {
|
||||
var result []string
|
||||
query := `SELECT c.CHANGE_STREAM_NAME
|
||||
FROM INFORMATION_SCHEMA.CHANGE_STREAMS AS C
|
||||
WHERE C.CHANGE_STREAM_CATALOG=''
|
||||
AND C.CHANGE_STREAM_SCHEMA=''`
|
||||
rows, err := engine.DB().Query(query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//nolint:errcheck // If the rows.Close fails, we simply don't care.
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
var name string
|
||||
if err := rows.Scan(&name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result = append(result, name)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
|
@ -12,7 +12,6 @@ const (
|
|||
SQLite = "sqlite3"
|
||||
MySQL = "mysql"
|
||||
MSSQL = "mssql"
|
||||
Spanner = "spanner"
|
||||
)
|
||||
|
||||
type Migration interface {
|
||||
|
|
|
|||
|
|
@ -444,9 +444,6 @@ func TestIntegration_DashboardNestedPermissionFilter(t *testing.T) {
|
|||
expectedResult: []string{"parent"},
|
||||
},
|
||||
}
|
||||
if db.IsTestDBSpanner() {
|
||||
t.Skip("skipping integration test")
|
||||
}
|
||||
|
||||
var orgID int64 = 1
|
||||
|
||||
|
|
@ -554,9 +551,6 @@ func TestIntegration_DashboardNestedPermissionFilter_WithSelfContainedPermission
|
|||
expectedResult: []string{"parent"},
|
||||
},
|
||||
}
|
||||
if db.IsTestDBSpanner() {
|
||||
t.Skip("skipping integration test")
|
||||
}
|
||||
|
||||
var orgID int64 = 1
|
||||
|
||||
|
|
@ -665,10 +659,6 @@ func TestIntegration_DashboardNestedPermissionFilter_WithActionSets(t *testing.T
|
|||
},
|
||||
}
|
||||
|
||||
if db.IsTestDBSpanner() {
|
||||
t.Skip("skipping integration test")
|
||||
}
|
||||
|
||||
var orgID int64 = 1
|
||||
|
||||
for _, tc := range testCases {
|
||||
|
|
@ -757,9 +747,6 @@ func setupTest(t *testing.T, numFolders, numDashboards int, permissions []access
|
|||
|
||||
// Insert dashboards in batches
|
||||
batchSize := 500
|
||||
if db.IsTestDBSpanner() {
|
||||
batchSize = 30 // spanner has a limit of 950 parameters per query
|
||||
}
|
||||
for i := 0; i < len(dashes); i += batchSize {
|
||||
end := i + batchSize
|
||||
if end > len(dashes) {
|
||||
|
|
|
|||
|
|
@ -146,11 +146,6 @@ func (sess *DBSession) WithReturningID(driverName string, query string, args []a
|
|||
return id, err
|
||||
}
|
||||
} else {
|
||||
if driverName == migrator.Spanner {
|
||||
// Only works with INSERT statements.
|
||||
query = fmt.Sprintf("%s THEN RETURN id", query)
|
||||
}
|
||||
|
||||
sqlOrArgs := append([]any{query}, args...)
|
||||
res, err := sess.Exec(sqlOrArgs...)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -110,11 +110,6 @@ func execWithReturningId(ctx context.Context, driverName string, query string, s
|
|||
}
|
||||
return id, nil
|
||||
} else {
|
||||
if driverName == "spanner" {
|
||||
// LastInsertId requires THEN RETURN directive.
|
||||
query = fmt.Sprintf("%s THEN RETURN id", query)
|
||||
}
|
||||
|
||||
res, err := sess.Exec(ctx, query, args...)
|
||||
if err != nil {
|
||||
return id, err
|
||||
|
|
|
|||
|
|
@ -8,8 +8,6 @@ import (
|
|||
|
||||
"github.com/mattn/go-sqlite3"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc/codes"
|
||||
grpcstatus "google.golang.org/grpc/status"
|
||||
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore/migrator"
|
||||
)
|
||||
|
|
@ -141,12 +139,10 @@ func getRetryErrors(t *testing.T, store *SQLStore) []error {
|
|||
switch store.GetDialect().DriverName() {
|
||||
case migrator.SQLite:
|
||||
retryErrors = []error{sqlite3.Error{Code: sqlite3.ErrBusy}, sqlite3.Error{Code: sqlite3.ErrLocked}}
|
||||
case migrator.Spanner:
|
||||
retryErrors = []error{grpcstatus.Error(codes.Aborted, "aborted transaction")}
|
||||
}
|
||||
|
||||
if len(retryErrors) == 0 {
|
||||
t.Skip("This test only works with sqlite or spanner")
|
||||
t.Skip("This test only works with sqlite")
|
||||
}
|
||||
return retryErrors
|
||||
}
|
||||
|
|
|
|||
|
|
@ -373,11 +373,6 @@ func (ss *SQLStore) RecursiveQueriesAreSupported() (bool, error) {
|
|||
return *ss.recursiveQueriesAreSupported, nil
|
||||
}
|
||||
recursiveQueriesAreSupported := func() (bool, error) {
|
||||
if ss.GetDBType() == migrator.Spanner {
|
||||
// no need to try...
|
||||
return false, nil
|
||||
}
|
||||
|
||||
var result []int
|
||||
if err := ss.WithDbSession(context.Background(), func(sess *DBSession) error {
|
||||
recQry := `WITH RECURSIVE cte (n) AS
|
||||
|
|
@ -414,7 +409,6 @@ var testSQLStoreSetup = false
|
|||
var testSQLStore *SQLStore
|
||||
var testSQLStoreMutex sync.Mutex
|
||||
var testSQLStoreCleanup []func()
|
||||
var testSQLStoreSkipTestsOnBackend string // When not empty and matches DB type, test is skipped.
|
||||
|
||||
// InitTestDBOpt contains options for InitTestDB.
|
||||
type InitTestDBOpt struct {
|
||||
|
|
@ -459,12 +453,6 @@ func SetupTestDB() {
|
|||
testSQLStoreSetup = true
|
||||
}
|
||||
|
||||
func SkipTestsOnSpanner() {
|
||||
testSQLStoreMutex.Lock()
|
||||
defer testSQLStoreMutex.Unlock()
|
||||
testSQLStoreSkipTestsOnBackend = "spanner"
|
||||
}
|
||||
|
||||
func CleanupTestDB() {
|
||||
testSQLStoreMutex.Lock()
|
||||
defer testSQLStoreMutex.Unlock()
|
||||
|
|
@ -549,10 +537,6 @@ func TestMain(m *testing.M) {
|
|||
if testSQLStore == nil {
|
||||
dbType := sqlutil.GetTestDBType()
|
||||
|
||||
if testSQLStoreSkipTestsOnBackend != "" && testSQLStoreSkipTestsOnBackend == dbType {
|
||||
t.Skipf("test skipped when using DB type %s", testSQLStoreSkipTestsOnBackend)
|
||||
}
|
||||
|
||||
// set test db config
|
||||
cfg := setting.NewCfg()
|
||||
// nolint:staticcheck
|
||||
|
|
|
|||
|
|
@ -9,18 +9,9 @@ import (
|
|||
"encoding/hex"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
database "cloud.google.com/go/spanner/admin/database/apiv1"
|
||||
"cloud.google.com/go/spanner/admin/database/apiv1/databasepb"
|
||||
"cloud.google.com/go/spanner/spannertest"
|
||||
spannerdriver "github.com/googleapis/go-sql-spanner"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
|
||||
"github.com/grafana/grafana/pkg/bus"
|
||||
"github.com/grafana/grafana/pkg/infra/tracing"
|
||||
"github.com/grafana/grafana/pkg/registry"
|
||||
|
|
@ -269,9 +260,6 @@ func createTemporaryDatabase(tb TestingTB) (*testDB, error) {
|
|||
// SQLite doesn't have a concept of a database server, so we always create a new file with no connections required.
|
||||
return newSQLite3DB(tb)
|
||||
}
|
||||
if dbType == "spanner" {
|
||||
return newSpannerDB(tb)
|
||||
}
|
||||
|
||||
// On the remaining databases, we first connect to the configured credentials, create a new database, then return this new database's info as a connection string.
|
||||
// We use databases rather than schemas as MySQL has no concept of schemas, so this aligns them more closely.
|
||||
|
|
@ -326,121 +314,7 @@ func createTemporaryDatabase(tb TestingTB) (*testDB, error) {
|
|||
func generateDatabaseName() string {
|
||||
// The database name has to be unique amongst all tests. It is highly unlikely we will have a collision here.
|
||||
// The database name has to be <= 64 chars long on MySQL, and <= 31 chars on Postgres.
|
||||
// Database ID length on Spanner must be between 2 and 30 characters. (https://cloud.google.com/spanner/quotas#database-limits)
|
||||
return "grafana_test_" + randomLowerHex(17)
|
||||
}
|
||||
|
||||
func newSpannerDB(tb TestingTB) (*testDB, error) {
|
||||
// See https://github.com/googleapis/go-sql-spanner/blob/main/driver.go#L56-L81 for connection string options.
|
||||
spannerDB := env("SPANNER_DB", "emulator")
|
||||
if spannerDB == "spannertest" {
|
||||
// Start new in-memory spannertest instance. This is mostly useless for our tests
|
||||
// (spannertest doesn't support many things that we use), but added for completion.
|
||||
// Each spannertest instance is a separate db.
|
||||
srv, err := spannertest.NewServer("localhost:0")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tb.Cleanup(srv.Close)
|
||||
|
||||
return &testDB{
|
||||
Driver: "spanner",
|
||||
Conn: fmt.Sprintf("%s/projects/grafanatest/instances/grafanatest/databases/grafanatest;usePlainText=true", srv.Addr),
|
||||
}, nil
|
||||
}
|
||||
|
||||
conn := spannerDB
|
||||
if spannerDB == "emulator" {
|
||||
host := env("SPANNER_EMULATOR_HOST", "localhost:9010")
|
||||
conn = fmt.Sprintf("%s/projects/grafanatest/instances/grafanatest/databases/grafanatest;usePlainText=true", host)
|
||||
}
|
||||
|
||||
cfg, err := spannerdriver.ExtractConnectorConfig(conn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
clientOptions := spannerConnectorConfigToClientOptions(cfg)
|
||||
|
||||
dbname := generateDatabaseName()
|
||||
fullDbName := fmt.Sprintf("projects/%s/instances/%s/databases/%s", cfg.Project, cfg.Instance, dbname)
|
||||
dbCreated := false
|
||||
|
||||
databaseAdminClient, err := database.NewDatabaseAdminClient(tb.Context(), clientOptions...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create database admin client: %v", err)
|
||||
}
|
||||
tb.Cleanup(func() {
|
||||
if dbCreated {
|
||||
// Drop database in the cleanup.
|
||||
// Can't use tb.Context() here, since that is canceled before calling Cleanup functions.
|
||||
err := databaseAdminClient.DropDatabase(context.Background(), &databasepb.DropDatabaseRequest{
|
||||
Database: fullDbName,
|
||||
})
|
||||
if err != nil {
|
||||
tb.Logf("Failed to drop Spanner database %s due to error %v", fullDbName, err)
|
||||
} else {
|
||||
tb.Logf("Dropped temporary Spanner database %s", fullDbName)
|
||||
}
|
||||
}
|
||||
|
||||
_ = databaseAdminClient.Close()
|
||||
})
|
||||
|
||||
op, err := databaseAdminClient.CreateDatabase(tb.Context(), &databasepb.CreateDatabaseRequest{
|
||||
Parent: fmt.Sprintf("projects/%s/instances/%s", cfg.Project, cfg.Instance),
|
||||
CreateStatement: fmt.Sprintf("CREATE DATABASE `%s`", dbname),
|
||||
DatabaseDialect: databasepb.DatabaseDialect_GOOGLE_STANDARD_SQL,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create database: %v", err)
|
||||
}
|
||||
_, err = op.Wait(tb.Context())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create database: %v", err)
|
||||
}
|
||||
tb.Logf("Created temporary Spanner database %s", fullDbName)
|
||||
|
||||
dbCreated = true
|
||||
|
||||
// Rebuild connection string, but change database to ID of just-created database.
|
||||
// Example: `localhost:9010/projects/test-project/instances/test-instance/databases/test-database;usePlainText=true;disableRouteToLeader=true;enableEndToEndTracing=true`
|
||||
connString := ""
|
||||
if cfg.Host != "" {
|
||||
connString = fmt.Sprintf("%s/", cfg.Host)
|
||||
}
|
||||
// Use new DB name instead of cfg.Database.
|
||||
connString = connString + fmt.Sprintf("projects/%s/instances/%s/databases/%s", cfg.Project, cfg.Instance, dbname)
|
||||
for k, v := range cfg.Params {
|
||||
connString = connString + fmt.Sprintf(";%s=%s", k, v)
|
||||
}
|
||||
|
||||
return &testDB{
|
||||
Driver: "spanner",
|
||||
Conn: connString,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// This is same code as xorm.SpannerConnectorConfigToClientOptions, but we cannot use that because it's under "enterprise" build tag.
|
||||
func spannerConnectorConfigToClientOptions(connectorConfig spannerdriver.ConnectorConfig) []option.ClientOption {
|
||||
var opts []option.ClientOption
|
||||
if connectorConfig.Host != "" {
|
||||
opts = append(opts, option.WithEndpoint(connectorConfig.Host))
|
||||
}
|
||||
if strval, ok := connectorConfig.Params["credentials"]; ok {
|
||||
opts = append(opts, option.WithCredentialsFile(strval))
|
||||
}
|
||||
if strval, ok := connectorConfig.Params["credentialsjson"]; ok {
|
||||
opts = append(opts, option.WithCredentialsJSON([]byte(strval)))
|
||||
}
|
||||
if strval, ok := connectorConfig.Params["useplaintext"]; ok {
|
||||
if val, err := strconv.ParseBool(strval); err == nil && val {
|
||||
opts = append(opts,
|
||||
option.WithGRPCDialOption(grpc.WithTransportCredentials(insecure.NewCredentials())),
|
||||
option.WithoutAuthentication())
|
||||
}
|
||||
}
|
||||
return opts
|
||||
return "grafana_test_" + randomLowerHex(18)
|
||||
}
|
||||
|
||||
func env(name, fallback string) string {
|
||||
|
|
|
|||
|
|
@ -6,8 +6,6 @@ import (
|
|||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"cloud.google.com/go/spanner/spannertest"
|
||||
)
|
||||
|
||||
// ITestDB is an interface of arguments for testing db
|
||||
|
|
@ -45,8 +43,6 @@ func GetTestDB(dbType string) (*TestDB, error) {
|
|||
return postgresTestDB()
|
||||
case "sqlite3":
|
||||
return sqLite3TestDB()
|
||||
case "spanner":
|
||||
return spannerTestDB()
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unknown test db type: %s", dbType)
|
||||
|
|
@ -156,49 +152,3 @@ func postgresTestDB() (*TestDB, error) {
|
|||
Cleanup: func() {},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func spannerTestDB() (*TestDB, error) {
|
||||
// See https://github.com/googleapis/go-sql-spanner/blob/main/driver.go#L56-L81 for connection string options.
|
||||
|
||||
spannerDB := os.Getenv("SPANNER_DB")
|
||||
if spannerDB == "" {
|
||||
spannerDB = "emulator"
|
||||
}
|
||||
|
||||
if spannerDB == "spannertest" {
|
||||
// Start in-memory spannertest instance.
|
||||
srv, err := spannertest.NewServer("localhost:0")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &TestDB{
|
||||
DriverName: "spanner",
|
||||
ConnStr: fmt.Sprintf("%s/projects/grafanatest/instances/grafanatest/databases/grafanatest;usePlainText=true", srv.Addr),
|
||||
Cleanup: srv.Close,
|
||||
}, nil
|
||||
}
|
||||
|
||||
if spannerDB == "emulator" {
|
||||
host := os.Getenv("SPANNER_EMULATOR_HOST")
|
||||
if host == "" {
|
||||
host = "localhost:9010"
|
||||
}
|
||||
|
||||
// To create instance and database manually, run:
|
||||
//
|
||||
// $ curl "localhost:9020/v1/projects/grafanatest/instances" --data '{"instanceId": "'grafanatest'"}'
|
||||
// $ curl "localhost:9020/v1/projects/grafanatest/instances/grafanatest/databases" --data '{"createStatement": "CREATE DATABASE `grafanatest`"}'
|
||||
return &TestDB{
|
||||
DriverName: "spanner",
|
||||
ConnStr: fmt.Sprintf("%s/projects/grafanatest/instances/grafanatest/databases/grafanatest;usePlainText=true;inMemSequenceGenerator=true", host),
|
||||
Cleanup: func() {},
|
||||
}, nil
|
||||
}
|
||||
|
||||
return &TestDB{
|
||||
DriverName: "spanner",
|
||||
ConnStr: spannerDB,
|
||||
Cleanup: func() {},
|
||||
}, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ func (ss *SQLStore) inTransactionWithRetryCtx(ctx context.Context, engine *xorm.
|
|||
return err
|
||||
}
|
||||
|
||||
// special handling of database locked errors for sqlite and spanner, then we can retry 5 times
|
||||
// special handling of database locked errors for sqlite, then we can retry 5 times
|
||||
if r, ok := engine.Dialect().(xorm.DialectWithRetryableErrors); ok {
|
||||
if retry < ss.dbCfg.TransactionRetries && r.RetryOnError(err) {
|
||||
if rollErr := sess.Rollback(); rollErr != nil {
|
||||
|
|
|
|||
|
|
@ -144,7 +144,7 @@ func (s *SSOSettingsStore) Delete(ctx context.Context, provider string) error {
|
|||
existing.Updated = time.Now().UTC()
|
||||
existing.IsDeleted = true
|
||||
|
||||
// We must explicitly omit ID column from updates, because some databases (e.g. Spanner) don't allow updating
|
||||
// We must explicitly omit ID column from updates, because some databases don't allow updating
|
||||
// primary key. Xorm ignores autoincrement columns during updates, but since ID column here is a string,
|
||||
// it's not ignored by default.
|
||||
_, err = sess.ID(existing.ID).Omit(idColumn).MustCols(updatedColumn, isDeletedColumn).Update(existing)
|
||||
|
|
|
|||
|
|
@ -24,8 +24,6 @@ func DialectForDriver(driverName string) Dialect {
|
|||
return PostgreSQL
|
||||
case "sqlite", "sqlite3":
|
||||
return SQLite
|
||||
case "spanner":
|
||||
return Spanner
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,32 +0,0 @@
|
|||
package sqltemplate
|
||||
|
||||
// Spanner is an implementation of Dialect for the Google Spanner database.
|
||||
var Spanner = spanner{}
|
||||
|
||||
var _ Dialect = Spanner
|
||||
|
||||
type spanner struct{}
|
||||
|
||||
func (s spanner) DialectName() string {
|
||||
return "spanner"
|
||||
}
|
||||
|
||||
func (s spanner) Ident(a string) (string, error) {
|
||||
return backtickIdent{}.Ident(a)
|
||||
}
|
||||
|
||||
func (s spanner) ArgPlaceholder(argNum int) string {
|
||||
return argFmtSQL92.ArgPlaceholder(argNum)
|
||||
}
|
||||
|
||||
func (s spanner) SelectFor(a ...string) (string, error) {
|
||||
return rowLockingClauseSpanner.SelectFor(a...)
|
||||
}
|
||||
|
||||
func (spanner) CurrentEpoch() string {
|
||||
return "UNIX_MICROS(CURRENT_TIMESTAMP())"
|
||||
}
|
||||
|
||||
var rowLockingClauseSpanner = rowLockingClauseMap{
|
||||
SelectForUpdate: SelectForUpdate,
|
||||
}
|
||||
|
|
@ -6,6 +6,8 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/grafana/grafana/pkg/infra/db"
|
||||
"github.com/grafana/grafana/pkg/infra/tracing"
|
||||
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
||||
|
|
@ -15,7 +17,6 @@ import (
|
|||
"github.com/grafana/grafana/pkg/storage/unified/sql"
|
||||
"github.com/grafana/grafana/pkg/storage/unified/sql/db/dbimpl"
|
||||
test "github.com/grafana/grafana/pkg/storage/unified/testing"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func newTestBackend(b testing.TB) resource.StorageBackend {
|
||||
|
|
@ -40,9 +41,6 @@ func TestIntegrationBenchmarkSQLStorageBackend(t *testing.T) {
|
|||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
if db.IsTestDBSpanner() {
|
||||
t.Skip("Skipping benchmark on Spanner")
|
||||
}
|
||||
opts := test.DefaultBenchmarkOptions()
|
||||
if db.IsTestDbSQLite() {
|
||||
opts.Concurrency = 1 // to avoid SQLite database is locked error
|
||||
|
|
@ -54,9 +52,6 @@ func TestIntegrationBenchmarkResourceServer(t *testing.T) {
|
|||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
if db.IsTestDBSpanner() {
|
||||
t.Skip("Skipping benchmark on Spanner")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
opts := &test.BenchmarkOptions{
|
||||
|
|
|
|||
|
|
@ -35,9 +35,6 @@ func TestMain(m *testing.M) {
|
|||
}
|
||||
|
||||
func TestIntegrationStorageServer(t *testing.T) {
|
||||
if db.IsTestDBSpanner() {
|
||||
t.Skip("skipping integration test")
|
||||
}
|
||||
unitest.RunStorageServerTest(t, func(ctx context.Context) resource.StorageBackend {
|
||||
dbstore := db.InitTestDB(t)
|
||||
eDB, err := dbimpl.ProvideResourceDB(dbstore, setting.NewCfg(), nil)
|
||||
|
|
@ -58,10 +55,6 @@ func TestIntegrationStorageServer(t *testing.T) {
|
|||
|
||||
// TestStorageBackend is a test for the StorageBackend interface.
|
||||
func TestIntegrationSQLStorageBackend(t *testing.T) {
|
||||
if db.IsTestDBSpanner() {
|
||||
t.Skip("skipping integration test")
|
||||
}
|
||||
|
||||
t.Run("IsHA (polling notifier)", func(t *testing.T) {
|
||||
unitest.RunStorageBackendTest(t, func(ctx context.Context) resource.StorageBackend {
|
||||
dbstore := db.InitTestDB(t)
|
||||
|
|
@ -105,9 +98,6 @@ func TestIntegrationSearchAndStorage(t *testing.T) {
|
|||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
if db.IsTestDBSpanner() {
|
||||
t.Skip("Skipping benchmark on Spanner")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
|
|
@ -145,9 +135,6 @@ func TestClientServer(t *testing.T) {
|
|||
if db.IsTestDbSQLite() {
|
||||
t.Skip("TODO: test blocking, skipping to unblock Enterprise until we fix this")
|
||||
}
|
||||
if db.IsTestDBSpanner() {
|
||||
t.Skip("skipping integration test")
|
||||
}
|
||||
|
||||
ctx := testutil.NewTestContext(t, time.Now().Add(5*time.Second))
|
||||
dbstore := db.InitTestDB(t)
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ const (
|
|||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
testsuite.RunButSkipOnSpanner(m)
|
||||
testsuite.Run(m)
|
||||
}
|
||||
|
||||
func TestGrafanaRuleConfig(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ import (
|
|||
var testData embed.FS
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
testsuite.RunButSkipOnSpanner(m)
|
||||
testsuite.Run(m)
|
||||
}
|
||||
|
||||
func getTestHelper(t *testing.T) *apis.K8sTestHelper {
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ import (
|
|||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
testsuite.RunButSkipOnSpanner(m)
|
||||
testsuite.Run(m)
|
||||
}
|
||||
|
||||
func getTestHelper(t *testing.T) *apis.K8sTestHelper {
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ import (
|
|||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
testsuite.RunButSkipOnSpanner(m)
|
||||
testsuite.Run(m)
|
||||
}
|
||||
|
||||
func getTestHelper(t *testing.T) *apis.K8sTestHelper {
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ import (
|
|||
var testData embed.FS
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
testsuite.RunButSkipOnSpanner(m)
|
||||
testsuite.Run(m)
|
||||
}
|
||||
|
||||
func getTestHelper(t *testing.T) *apis.K8sTestHelper {
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ import (
|
|||
"gopkg.in/ini.v1"
|
||||
|
||||
"github.com/grafana/dskit/kv"
|
||||
|
||||
"github.com/grafana/grafana/pkg/api"
|
||||
"github.com/grafana/grafana/pkg/extensions"
|
||||
"github.com/grafana/grafana/pkg/infra/db"
|
||||
|
|
@ -508,17 +509,9 @@ func CreateGrafDir(t *testing.T, opts GrafanaOpts) (string, string) {
|
|||
require.NoError(t, err)
|
||||
_, err = dbSection.NewKey("query_retries", fmt.Sprintf("%d", queryRetries))
|
||||
require.NoError(t, err)
|
||||
if db.IsTestDBSpanner() {
|
||||
_, err = dbSection.NewKey("max_open_conn", "20")
|
||||
} else {
|
||||
_, err = dbSection.NewKey("max_open_conn", "2")
|
||||
}
|
||||
_, err = dbSection.NewKey("max_open_conn", "2")
|
||||
require.NoError(t, err)
|
||||
if db.IsTestDBSpanner() {
|
||||
_, err = dbSection.NewKey("max_idle_conn", "20")
|
||||
} else {
|
||||
_, err = dbSection.NewKey("max_idle_conn", "2")
|
||||
}
|
||||
_, err = dbSection.NewKey("max_idle_conn", "2")
|
||||
require.NoError(t, err)
|
||||
|
||||
cfgPath := filepath.Join(cfgDir, "test.ini")
|
||||
|
|
|
|||
|
|
@ -13,8 +13,3 @@ func Run(m *testing.M) {
|
|||
db.CleanupTestDB()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
func RunButSkipOnSpanner(m *testing.M) {
|
||||
db.SkipTestsOnSpanner()
|
||||
Run(m)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,40 +0,0 @@
|
|||
// Package spanner should only be used from tests, or from enterprise code (eg. protected by build tags).
|
||||
package spanner
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
spannerdriver "github.com/googleapis/go-sql-spanner"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
func UsePlainText(connectorConfig spannerdriver.ConnectorConfig) bool {
|
||||
if strval, ok := connectorConfig.Params["useplaintext"]; ok {
|
||||
if val, err := strconv.ParseBool(strval); err == nil {
|
||||
return val
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ConnectorConfigToClientOptions is adapted from https://github.com/googleapis/go-sql-spanner/blob/main/driver.go#L341-L477, from version 1.11.1.
|
||||
func ConnectorConfigToClientOptions(connectorConfig spannerdriver.ConnectorConfig) []option.ClientOption {
|
||||
var opts []option.ClientOption
|
||||
if connectorConfig.Host != "" {
|
||||
opts = append(opts, option.WithEndpoint(connectorConfig.Host))
|
||||
}
|
||||
if strval, ok := connectorConfig.Params["credentials"]; ok {
|
||||
opts = append(opts, option.WithCredentialsFile(strval))
|
||||
}
|
||||
if strval, ok := connectorConfig.Params["credentialsjson"]; ok {
|
||||
opts = append(opts, option.WithCredentialsJSON([]byte(strval)))
|
||||
}
|
||||
if UsePlainText(connectorConfig) {
|
||||
opts = append(opts,
|
||||
option.WithGRPCDialOption(grpc.WithTransportCredentials(insecure.NewCredentials())),
|
||||
option.WithoutAuthentication())
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
|
@ -1,399 +0,0 @@
|
|||
//go:build enterprise || pro
|
||||
|
||||
package xorm
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
spannerclient "cloud.google.com/go/spanner"
|
||||
_ "github.com/googleapis/go-sql-spanner"
|
||||
spannerdriver "github.com/googleapis/go-sql-spanner"
|
||||
"github.com/grafana/grafana/pkg/util/xorm/core"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
func init() {
|
||||
core.RegisterDriver("spanner", &spannerDriver{})
|
||||
core.RegisterDialect("spanner", func() core.Dialect { return &spanner{} })
|
||||
}
|
||||
|
||||
// https://cloud.google.com/spanner/docs/reference/standard-sql/lexical#reserved_keywords
|
||||
var spannerReservedKeywords = map[string]struct{}{
|
||||
"ALL": {},
|
||||
"AND": {},
|
||||
"ANY": {},
|
||||
"ARRAY": {},
|
||||
"AS": {},
|
||||
"ASC": {},
|
||||
"ASSERT_ROWS_MODIFIED": {},
|
||||
"AT": {},
|
||||
"BETWEEN": {},
|
||||
"BY": {},
|
||||
"CASE": {},
|
||||
"CAST": {},
|
||||
"COLLATE": {},
|
||||
"CONTAINS": {},
|
||||
"CREATE": {},
|
||||
"CROSS": {},
|
||||
"CUBE": {},
|
||||
"CURRENT": {},
|
||||
"DEFAULT": {},
|
||||
"DEFINE": {},
|
||||
"DESC": {},
|
||||
"DISTINCT": {},
|
||||
"ELSE": {},
|
||||
"END": {},
|
||||
"ENUM": {},
|
||||
"ESCAPE": {},
|
||||
"EXCEPT": {},
|
||||
"EXCLUDE": {},
|
||||
"EXISTS": {},
|
||||
"EXTRACT": {},
|
||||
"FALSE": {},
|
||||
"FETCH": {},
|
||||
"FOLLOWING": {},
|
||||
"FOR": {},
|
||||
"FROM": {},
|
||||
"FULL": {},
|
||||
"GROUP": {},
|
||||
"GROUPING": {},
|
||||
"GROUPS": {},
|
||||
"HASH": {},
|
||||
"HAVING": {},
|
||||
"IF": {},
|
||||
"IGNORE": {},
|
||||
"IN": {},
|
||||
"INNER": {},
|
||||
"INTERSECT": {},
|
||||
"INTERVAL": {},
|
||||
"INTO": {},
|
||||
"IS": {},
|
||||
"JOIN": {},
|
||||
"LATERAL": {},
|
||||
"LEFT": {},
|
||||
"LIKE": {},
|
||||
"LIMIT": {},
|
||||
"LOOKUP": {},
|
||||
"MERGE": {},
|
||||
"NATURAL": {},
|
||||
"NEW": {},
|
||||
"NO": {},
|
||||
"NOT": {},
|
||||
"NULL": {},
|
||||
"NULLS": {},
|
||||
"OF": {},
|
||||
"ON": {},
|
||||
"OR": {},
|
||||
"ORDER": {},
|
||||
"OUTER": {},
|
||||
"OVER": {},
|
||||
"PARTITION": {},
|
||||
"PRECEDING": {},
|
||||
"PROTO": {},
|
||||
"RANGE": {},
|
||||
"RECURSIVE": {},
|
||||
"RESPECT": {},
|
||||
"RIGHT": {},
|
||||
"ROLLUP": {},
|
||||
"ROWS": {},
|
||||
"SELECT": {},
|
||||
"SET": {},
|
||||
"SOME": {},
|
||||
"STRUCT": {},
|
||||
"TABLESAMPLE": {},
|
||||
"THEN": {},
|
||||
"TO": {},
|
||||
"TREAT": {},
|
||||
"TRUE": {},
|
||||
"UNBOUNDED": {},
|
||||
"UNION": {},
|
||||
"UNNEST": {},
|
||||
"USING": {},
|
||||
"WHEN": {},
|
||||
"WHERE": {},
|
||||
"WINDOW": {},
|
||||
"WITH": {},
|
||||
"WITHIN": {},
|
||||
}
|
||||
|
||||
type spannerDriver struct{}
|
||||
|
||||
func (d *spannerDriver) Parse(_driverName, datasourceName string) (*core.Uri, error) {
|
||||
return &core.Uri{DbType: "spanner", DbName: datasourceName}, nil
|
||||
}
|
||||
|
||||
type spanner struct {
|
||||
core.Base
|
||||
}
|
||||
|
||||
func (s *spanner) Init(db *core.DB, uri *core.Uri, driverName string, datasourceName string) error {
|
||||
return s.Base.Init(db, s, uri, driverName, datasourceName)
|
||||
}
|
||||
func (s *spanner) Filters() []core.Filter { return []core.Filter{&core.IdFilter{}} }
|
||||
func (s *spanner) IsReserved(name string) bool {
|
||||
_, exists := spannerReservedKeywords[name]
|
||||
return exists
|
||||
}
|
||||
func (s *spanner) AndStr() string { return "AND" }
|
||||
func (s *spanner) OrStr() string { return "OR" }
|
||||
func (s *spanner) EqStr() string { return "=" }
|
||||
func (s *spanner) RollBackStr() string { return "ROLL BACK" }
|
||||
func (s *spanner) AutoIncrStr() string {
|
||||
// Spanner does not support auto-increment, but supports unique generated IDs (not sequential!).
|
||||
return "GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE)"
|
||||
}
|
||||
func (s *spanner) SupportInsertMany() bool { return false } // Needs manual transaction batching
|
||||
func (s *spanner) SupportEngine() bool { return false } // No support for engine selection
|
||||
func (s *spanner) SupportCharset() bool { return false } // ...or charsets
|
||||
func (s *spanner) SupportDropIfExists() bool { return false } // Drop should be handled differently
|
||||
func (s *spanner) IndexOnTable() bool { return false }
|
||||
func (s *spanner) ShowCreateNull() bool { return false }
|
||||
func (s *spanner) Quote(name string) string { return "`" + name + "`" }
|
||||
func (s *spanner) SqlType(col *core.Column) string {
|
||||
switch col.SQLType.Name {
|
||||
case core.Int, core.SmallInt, core.BigInt:
|
||||
return "INT64"
|
||||
case core.Varchar, core.Text, core.MediumText, core.LongText, core.Char, core.NVarchar, core.NChar, core.NText:
|
||||
l := col.Length
|
||||
if l == 0 {
|
||||
l = col.SQLType.DefaultLength
|
||||
}
|
||||
if l > 0 {
|
||||
return fmt.Sprintf("STRING(%d)", l)
|
||||
}
|
||||
return "STRING(MAX)"
|
||||
case core.Jsonb:
|
||||
return "STRING(MAX)"
|
||||
case core.Bool, core.TinyInt:
|
||||
return "BOOL"
|
||||
case core.Float, core.Double:
|
||||
return "FLOAT64"
|
||||
case core.Bytea, core.Blob, core.MediumBlob, core.LongBlob:
|
||||
l := col.Length
|
||||
if l == 0 {
|
||||
l = col.SQLType.DefaultLength
|
||||
}
|
||||
if l > 0 {
|
||||
return fmt.Sprintf("BYTES(%d)", l)
|
||||
}
|
||||
return "BYTES(MAX)"
|
||||
case core.DateTime, core.TimeStamp:
|
||||
return "TIMESTAMP"
|
||||
default:
|
||||
panic("unknown column type: " + col.SQLType.Name)
|
||||
//default:
|
||||
// return "STRING(MAX)" // XXX: more types to add
|
||||
}
|
||||
}
|
||||
|
||||
func (s *spanner) GetColumns(tableName string) ([]string, map[string]*core.Column, error) {
|
||||
query := `SELECT COLUMN_NAME, SPANNER_TYPE, IS_NULLABLE, IS_IDENTITY, IDENTITY_GENERATION, IDENTITY_KIND, COLUMN_DEFAULT
|
||||
FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = ? AND TABLE_SCHEMA="" ORDER BY ORDINAL_POSITION`
|
||||
rows, err := s.DB().Query(query, tableName)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
columns := make(map[string]*core.Column)
|
||||
var colNames []string
|
||||
|
||||
var name, sqlType, isNullable string
|
||||
var isIdentity, identityGeneration, identityKind, columnDefault sql.NullString
|
||||
for rows.Next() {
|
||||
if err := rows.Scan(&name, &sqlType, &isNullable, &isIdentity, &identityGeneration, &identityKind, &columnDefault); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var length int
|
||||
switch {
|
||||
case sqlType == "INT64":
|
||||
sqlType = core.Int
|
||||
case sqlType == "FLOAT32" || sqlType == "FLOAT64":
|
||||
sqlType = core.Float
|
||||
case sqlType == "BOOL":
|
||||
sqlType = core.Bool
|
||||
case sqlType == "BYTES(MAX)":
|
||||
sqlType = core.Blob
|
||||
case sqlType == "STRING(MAX)":
|
||||
sqlType = core.NVarchar
|
||||
case sqlType == "TIMESTAMP":
|
||||
sqlType = core.DateTime
|
||||
case strings.HasPrefix(sqlType, "BYTES("):
|
||||
// 6 == len(`BYTES(`), we also remove ")" from the end.
|
||||
if l, err := strconv.Atoi(sqlType[6 : len(sqlType)-1]); err == nil {
|
||||
length = l
|
||||
}
|
||||
sqlType = core.Blob
|
||||
case strings.HasPrefix(sqlType, "STRING("):
|
||||
// 7 == len(`STRING(`), we also remove ")" from the end.
|
||||
if l, err := strconv.Atoi(sqlType[7 : len(sqlType)-1]); err == nil {
|
||||
length = l
|
||||
}
|
||||
sqlType = core.Varchar
|
||||
default:
|
||||
panic("unknown column type: " + sqlType)
|
||||
}
|
||||
|
||||
autoincrement := isIdentity.Valid && isIdentity.String == "YES" &&
|
||||
identityGeneration.Valid && identityGeneration.String == "BY DEFAULT" &&
|
||||
identityKind.Valid && identityKind.String == "BIT_REVERSED_POSITIVE_SEQUENCE"
|
||||
|
||||
defValue := ""
|
||||
defEmpty := true
|
||||
if columnDefault.Valid {
|
||||
defValue = columnDefault.String
|
||||
defEmpty = false
|
||||
}
|
||||
|
||||
col := &core.Column{
|
||||
Name: name,
|
||||
SQLType: core.SQLType{Name: sqlType},
|
||||
Length: length,
|
||||
Nullable: isNullable == "YES",
|
||||
IsAutoIncrement: autoincrement,
|
||||
Indexes: map[string]int{},
|
||||
Default: defValue,
|
||||
DefaultIsEmpty: defEmpty,
|
||||
}
|
||||
columns[name] = col
|
||||
colNames = append(colNames, name)
|
||||
}
|
||||
|
||||
return colNames, columns, rows.Err()
|
||||
}
|
||||
|
||||
func (s *spanner) CreateTableSql(table *core.Table, tableName, _, charset string) string {
|
||||
sql := "CREATE TABLE " + s.Quote(tableName) + " ("
|
||||
|
||||
for i, col := range table.Columns() {
|
||||
if i > 0 {
|
||||
sql += ", "
|
||||
}
|
||||
|
||||
sql += s.Quote(col.Name) + " " + s.SqlType(col)
|
||||
if !col.Nullable {
|
||||
sql += " NOT NULL"
|
||||
}
|
||||
if col.Default != "" {
|
||||
sql += " DEFAULT (" + col.Default + ")"
|
||||
}
|
||||
if col.IsAutoIncrement {
|
||||
sql += " GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE)"
|
||||
}
|
||||
}
|
||||
|
||||
sql += ") PRIMARY KEY (" + strings.Join(table.PrimaryKeys, ",") + ")"
|
||||
return sql
|
||||
}
|
||||
|
||||
func (s *spanner) CreateIndexSql(tableName string, index *core.Index) string {
|
||||
sql := "CREATE "
|
||||
if index.Type == core.UniqueType {
|
||||
sql += "UNIQUE NULL_FILTERED "
|
||||
}
|
||||
sql += "INDEX " + s.Quote(index.XName(tableName)) + " ON " + s.Quote(tableName) + " (" + strings.Join(index.Cols, ", ") + ")"
|
||||
return sql
|
||||
}
|
||||
|
||||
func (s *spanner) IndexCheckSql(tableName, indexName string) (string, []any) {
|
||||
return `SELECT index_name FROM information_schema.indexes
|
||||
WHERE table_name = ? AND table_schema = "" AND index_name = ?`,
|
||||
[]any{tableName, indexName}
|
||||
}
|
||||
|
||||
func (s *spanner) TableCheckSql(tableName string) (string, []any) {
|
||||
return `SELECT table_name FROM information_schema.tables
|
||||
WHERE table_name = ? AND table_schema = ""`,
|
||||
[]any{tableName}
|
||||
}
|
||||
|
||||
func (s *spanner) GetTables() ([]*core.Table, error) {
|
||||
res, err := s.DB().Query(`SELECT table_name FROM information_schema.tables WHERE table_schema = ""`)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Close()
|
||||
|
||||
tables := []*core.Table{}
|
||||
for res.Next() {
|
||||
var name string
|
||||
if err := res.Scan(&name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t := core.NewEmptyTable()
|
||||
t.Name = name
|
||||
tables = append(tables, t)
|
||||
}
|
||||
return tables, res.Err()
|
||||
}
|
||||
|
||||
func (s *spanner) GetIndexes(tableName string) (map[string]*core.Index, error) {
|
||||
res, err := s.DB().Query(`SELECT ix.INDEX_NAME, ix.INDEX_TYPE, ix.IS_UNIQUE, c.COLUMN_NAME
|
||||
FROM INFORMATION_SCHEMA.INDEXES ix
|
||||
JOIN INFORMATION_SCHEMA.INDEX_COLUMNS c ON (ix.TABLE_NAME=c.TABLE_NAME AND ix.INDEX_NAME=c.INDEX_NAME)
|
||||
WHERE ix.TABLE_SCHEMA = "" AND ix.TABLE_NAME=?
|
||||
ORDER BY ix.INDEX_NAME, c.ORDINAL_POSITION`, tableName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Close()
|
||||
|
||||
indexes := map[string]*core.Index{}
|
||||
var ixName, ixType, colName string
|
||||
var isUnique bool
|
||||
for res.Next() {
|
||||
err := res.Scan(&ixName, &ixType, &isUnique, &colName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
isRegular := false
|
||||
if strings.HasPrefix(ixName, "IDX_"+tableName) || strings.HasPrefix(ixName, "UQE_"+tableName) {
|
||||
ixName = ixName[5+len(tableName):]
|
||||
isRegular = true
|
||||
}
|
||||
|
||||
var index *core.Index
|
||||
var ok bool
|
||||
if index, ok = indexes[ixName]; !ok {
|
||||
t := core.IndexType // ixType == "INDEX" && !isUnique
|
||||
if ixType == "PRIMARY KEY" || isUnique {
|
||||
t = core.UniqueType
|
||||
}
|
||||
|
||||
index = &core.Index{}
|
||||
index.IsRegular = isRegular
|
||||
index.Type = t
|
||||
index.Name = ixName
|
||||
indexes[ixName] = index
|
||||
}
|
||||
index.AddColumn(colName)
|
||||
}
|
||||
return indexes, res.Err()
|
||||
}
|
||||
|
||||
func (s *spanner) CreateSequenceGenerator(db *sql.DB) (SequenceGenerator, error) {
|
||||
dsn := s.DataSourceName()
|
||||
connectorConfig, err := spannerdriver.ExtractConnectorConfig(dsn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if connectorConfig.Params[strings.ToLower("inMemSequenceGenerator")] == "true" {
|
||||
// Switch to using in-memory sequence number generator.
|
||||
// Using database-based sequence generator doesn't work with emulator, as emulator
|
||||
// only supports single transaction. If there is already another transaction started
|
||||
// generating new ID via database-based sequence generator would always fail.
|
||||
return newInMemSequenceGenerator(), nil
|
||||
}
|
||||
|
||||
return newSequenceGenerator(db), nil
|
||||
}
|
||||
|
||||
func (s *spanner) RetryOnError(err error) bool {
|
||||
return err != nil && spannerclient.ErrCode(spannerclient.ToSpannerError(err)) == codes.Aborted
|
||||
}
|
||||
|
|
@ -15,8 +15,9 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana/pkg/util/xorm/core"
|
||||
"xorm.io/builder"
|
||||
|
||||
"github.com/grafana/grafana/pkg/util/xorm/core"
|
||||
)
|
||||
|
||||
// ErrNoElementsOnSlice represents an error there is no element when insert
|
||||
|
|
@ -422,10 +423,6 @@ func (session *Session) innerInsert(bean any) (int64, error) {
|
|||
buf.WriteString(" RETURNING " + session.engine.Quote(table.AutoIncrement))
|
||||
}
|
||||
|
||||
if len(table.AutoIncrement) > 0 && session.engine.dialect.DBType() == "spanner" {
|
||||
buf.WriteString(" THEN RETURN " + session.engine.Quote(table.AutoIncrement))
|
||||
}
|
||||
|
||||
sqlStr := buf.String()
|
||||
args = buf.Args()
|
||||
|
||||
|
|
|
|||
|
|
@ -24,8 +24,6 @@ import (
|
|||
const (
|
||||
// Version show the xorm's version
|
||||
Version string = "0.8.0.1015"
|
||||
|
||||
Spanner = "spanner"
|
||||
)
|
||||
|
||||
func regDrvsNDialects() bool {
|
||||
|
|
@ -102,12 +100,6 @@ func NewEngine(driverName string, dataSourceName string) (*Engine, error) {
|
|||
switch uri.DbType {
|
||||
case core.SQLITE:
|
||||
engine.DatabaseTZ = time.UTC
|
||||
case Spanner:
|
||||
engine.DatabaseTZ = time.UTC
|
||||
// We need to specify "Z" to indicate that timestamp is in UTC.
|
||||
// Otherwise Spanner uses default America/Los_Angeles timezone.
|
||||
// https://cloud.google.com/spanner/docs/reference/standard-sql/data-types#time_zones
|
||||
engine.timestampFormat = "2006-01-02 15:04:05Z"
|
||||
default:
|
||||
engine.DatabaseTZ = time.Local
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,29 +0,0 @@
|
|||
//go:build enterprise || pro
|
||||
|
||||
package xorm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/spanner/spannertest"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBasicOperationsWithSpanner(t *testing.T) {
|
||||
span, err := spannertest.NewServer("localhost:0")
|
||||
require.NoError(t, err)
|
||||
defer span.Close()
|
||||
|
||||
eng, err := NewEngine("spanner", fmt.Sprintf("%s/projects/test/instances/test/databases/test;usePlainText=true", span.Addr))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, eng)
|
||||
require.Equal(t, "spanner", eng.DriverName())
|
||||
|
||||
_, err = eng.Exec("CREATE TABLE test_struct (id int64, comment string(max), json string(max)) primary key (id)")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Currently broken because simple INSERT into spannertest doesn't work: https://github.com/googleapis/go-sql-spanner/issues/392
|
||||
// testBasicOperations(t, eng)
|
||||
}
|
||||
Loading…
Reference in a new issue