mirror of
https://github.com/jackc/pgx.git
synced 2025-09-04 19:37:10 +00:00
Compare commits
No commits in common. "master" and "v4.10.1" have entirely different histories.
54
.github/ISSUE_TEMPLATE/bug_report.md
vendored
54
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@ -1,54 +0,0 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
|
||||
If possible, please provide runnable example such as:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
)
|
||||
|
||||
func main() {
|
||||
conn, err := pgx.Connect(context.Background(), os.Getenv("DATABASE_URL"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer conn.Close(context.Background())
|
||||
|
||||
// Your code here...
|
||||
}
|
||||
```
|
||||
|
||||
Please run your example with the race detector enabled. For example, `go run -race main.go` or `go test -race`.
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Actual behavior**
|
||||
A clear and concise description of what actually happened.
|
||||
|
||||
**Version**
|
||||
- Go: `$ go version` -> [e.g. go version go1.18.3 darwin/amd64]
|
||||
- PostgreSQL: `$ psql --no-psqlrc --tuples-only -c 'select version()'` -> [e.g. PostgreSQL 14.4 on x86_64-apple-darwin21.5.0, compiled by Apple clang version 13.1.6 (clang-1316.0.21.2.5), 64-bit]
|
||||
- pgx: `$ grep 'github.com/jackc/pgx/v[0-9]' go.mod` -> [e.g. v4.16.1]
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@ -1,20 +0,0 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
10
.github/ISSUE_TEMPLATE/other-issues.md
vendored
10
.github/ISSUE_TEMPLATE/other-issues.md
vendored
@ -1,10 +0,0 @@
|
||||
---
|
||||
name: Other issues
|
||||
about: Any issue that is not a bug or a feature request
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
Please describe the issue in detail. If this is a question about how to use pgx please use discussions instead.
|
156
.github/workflows/ci.yml
vendored
156
.github/workflows/ci.yml
vendored
@ -1,156 +0,0 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
pull_request:
|
||||
branches: [master]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: Test
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: ["1.23", "1.24"]
|
||||
pg-version: [13, 14, 15, 16, 17, cockroachdb]
|
||||
include:
|
||||
- pg-version: 13
|
||||
pgx-test-database: "host=127.0.0.1 user=pgx_md5 password=secret dbname=pgx_test"
|
||||
pgx-test-unix-socket-conn-string: "host=/var/run/postgresql dbname=pgx_test"
|
||||
pgx-test-tcp-conn-string: "host=127.0.0.1 user=pgx_md5 password=secret dbname=pgx_test"
|
||||
pgx-test-scram-password-conn-string: "host=127.0.0.1 user=pgx_scram password=secret dbname=pgx_test"
|
||||
pgx-test-md5-password-conn-string: "host=127.0.0.1 user=pgx_md5 password=secret dbname=pgx_test"
|
||||
pgx-test-plain-password-conn-string: "host=127.0.0.1 user=pgx_pw password=secret dbname=pgx_test"
|
||||
pgx-test-tls-conn-string: "host=localhost user=pgx_ssl password=secret sslmode=verify-full sslrootcert=/tmp/ca.pem dbname=pgx_test"
|
||||
pgx-ssl-password: certpw
|
||||
pgx-test-tls-client-conn-string: "host=localhost user=pgx_sslcert sslmode=verify-full sslrootcert=/tmp/ca.pem sslcert=/tmp/pgx_sslcert.crt sslkey=/tmp/pgx_sslcert.key dbname=pgx_test"
|
||||
- pg-version: 14
|
||||
pgx-test-database: "host=127.0.0.1 user=pgx_md5 password=secret dbname=pgx_test"
|
||||
pgx-test-unix-socket-conn-string: "host=/var/run/postgresql dbname=pgx_test"
|
||||
pgx-test-tcp-conn-string: "host=127.0.0.1 user=pgx_md5 password=secret dbname=pgx_test"
|
||||
pgx-test-scram-password-conn-string: "host=127.0.0.1 user=pgx_scram password=secret dbname=pgx_test"
|
||||
pgx-test-md5-password-conn-string: "host=127.0.0.1 user=pgx_md5 password=secret dbname=pgx_test"
|
||||
pgx-test-plain-password-conn-string: "host=127.0.0.1 user=pgx_pw password=secret dbname=pgx_test"
|
||||
pgx-test-tls-conn-string: "host=localhost user=pgx_ssl password=secret sslmode=verify-full sslrootcert=/tmp/ca.pem dbname=pgx_test"
|
||||
pgx-ssl-password: certpw
|
||||
pgx-test-tls-client-conn-string: "host=localhost user=pgx_sslcert sslmode=verify-full sslrootcert=/tmp/ca.pem sslcert=/tmp/pgx_sslcert.crt sslkey=/tmp/pgx_sslcert.key dbname=pgx_test"
|
||||
- pg-version: 15
|
||||
pgx-test-database: "host=127.0.0.1 user=pgx_md5 password=secret dbname=pgx_test"
|
||||
pgx-test-unix-socket-conn-string: "host=/var/run/postgresql dbname=pgx_test"
|
||||
pgx-test-tcp-conn-string: "host=127.0.0.1 user=pgx_md5 password=secret dbname=pgx_test"
|
||||
pgx-test-scram-password-conn-string: "host=127.0.0.1 user=pgx_scram password=secret dbname=pgx_test"
|
||||
pgx-test-md5-password-conn-string: "host=127.0.0.1 user=pgx_md5 password=secret dbname=pgx_test"
|
||||
pgx-test-plain-password-conn-string: "host=127.0.0.1 user=pgx_pw password=secret dbname=pgx_test"
|
||||
pgx-test-tls-conn-string: "host=localhost user=pgx_ssl password=secret sslmode=verify-full sslrootcert=/tmp/ca.pem dbname=pgx_test"
|
||||
pgx-ssl-password: certpw
|
||||
pgx-test-tls-client-conn-string: "host=localhost user=pgx_sslcert sslmode=verify-full sslrootcert=/tmp/ca.pem sslcert=/tmp/pgx_sslcert.crt sslkey=/tmp/pgx_sslcert.key dbname=pgx_test"
|
||||
- pg-version: 16
|
||||
pgx-test-database: "host=127.0.0.1 user=pgx_md5 password=secret dbname=pgx_test"
|
||||
pgx-test-unix-socket-conn-string: "host=/var/run/postgresql dbname=pgx_test"
|
||||
pgx-test-tcp-conn-string: "host=127.0.0.1 user=pgx_md5 password=secret dbname=pgx_test"
|
||||
pgx-test-scram-password-conn-string: "host=127.0.0.1 user=pgx_scram password=secret dbname=pgx_test"
|
||||
pgx-test-md5-password-conn-string: "host=127.0.0.1 user=pgx_md5 password=secret dbname=pgx_test"
|
||||
pgx-test-plain-password-conn-string: "host=127.0.0.1 user=pgx_pw password=secret dbname=pgx_test"
|
||||
pgx-test-tls-conn-string: "host=localhost user=pgx_ssl password=secret sslmode=verify-full sslrootcert=/tmp/ca.pem dbname=pgx_test"
|
||||
pgx-ssl-password: certpw
|
||||
pgx-test-tls-client-conn-string: "host=localhost user=pgx_sslcert sslmode=verify-full sslrootcert=/tmp/ca.pem sslcert=/tmp/pgx_sslcert.crt sslkey=/tmp/pgx_sslcert.key dbname=pgx_test"
|
||||
- pg-version: 17
|
||||
pgx-test-database: "host=127.0.0.1 user=pgx_md5 password=secret dbname=pgx_test"
|
||||
pgx-test-unix-socket-conn-string: "host=/var/run/postgresql dbname=pgx_test"
|
||||
pgx-test-tcp-conn-string: "host=127.0.0.1 user=pgx_md5 password=secret dbname=pgx_test"
|
||||
pgx-test-scram-password-conn-string: "host=127.0.0.1 user=pgx_scram password=secret dbname=pgx_test"
|
||||
pgx-test-md5-password-conn-string: "host=127.0.0.1 user=pgx_md5 password=secret dbname=pgx_test"
|
||||
pgx-test-plain-password-conn-string: "host=127.0.0.1 user=pgx_pw password=secret dbname=pgx_test"
|
||||
pgx-test-tls-conn-string: "host=localhost user=pgx_ssl password=secret sslmode=verify-full sslrootcert=/tmp/ca.pem dbname=pgx_test"
|
||||
pgx-ssl-password: certpw
|
||||
pgx-test-tls-client-conn-string: "host=localhost user=pgx_sslcert sslmode=verify-full sslrootcert=/tmp/ca.pem sslcert=/tmp/pgx_sslcert.crt sslkey=/tmp/pgx_sslcert.key dbname=pgx_test"
|
||||
- pg-version: cockroachdb
|
||||
pgx-test-database: "postgresql://root@127.0.0.1:26257/pgx_test?sslmode=disable&experimental_enable_temp_tables=on"
|
||||
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go ${{ matrix.go-version }}
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
- name: Setup database server for testing
|
||||
run: ci/setup_test.bash
|
||||
env:
|
||||
PGVERSION: ${{ matrix.pg-version }}
|
||||
|
||||
# - name: Setup upterm session
|
||||
# uses: lhotari/action-upterm@v1
|
||||
# with:
|
||||
# ## limits ssh access and adds the ssh public key for the user which triggered the workflow
|
||||
# limit-access-to-actor: true
|
||||
# env:
|
||||
# PGX_TEST_DATABASE: ${{ matrix.pgx-test-database }}
|
||||
# PGX_TEST_UNIX_SOCKET_CONN_STRING: ${{ matrix.pgx-test-unix-socket-conn-string }}
|
||||
# PGX_TEST_TCP_CONN_STRING: ${{ matrix.pgx-test-tcp-conn-string }}
|
||||
# PGX_TEST_SCRAM_PASSWORD_CONN_STRING: ${{ matrix.pgx-test-scram-password-conn-string }}
|
||||
# PGX_TEST_MD5_PASSWORD_CONN_STRING: ${{ matrix.pgx-test-md5-password-conn-string }}
|
||||
# PGX_TEST_PLAIN_PASSWORD_CONN_STRING: ${{ matrix.pgx-test-plain-password-conn-string }}
|
||||
# PGX_TEST_TLS_CONN_STRING: ${{ matrix.pgx-test-tls-conn-string }}
|
||||
# PGX_SSL_PASSWORD: ${{ matrix.pgx-ssl-password }}
|
||||
# PGX_TEST_TLS_CLIENT_CONN_STRING: ${{ matrix.pgx-test-tls-client-conn-string }}
|
||||
|
||||
- name: Check formatting
|
||||
run: |
|
||||
gofmt -l -s -w .
|
||||
git status
|
||||
git diff --exit-code
|
||||
|
||||
- name: Test
|
||||
# parallel testing is disabled because somehow parallel testing causes Github Actions to kill the runner.
|
||||
run: go test -parallel=1 -race ./...
|
||||
env:
|
||||
PGX_TEST_DATABASE: ${{ matrix.pgx-test-database }}
|
||||
PGX_TEST_UNIX_SOCKET_CONN_STRING: ${{ matrix.pgx-test-unix-socket-conn-string }}
|
||||
PGX_TEST_TCP_CONN_STRING: ${{ matrix.pgx-test-tcp-conn-string }}
|
||||
PGX_TEST_SCRAM_PASSWORD_CONN_STRING: ${{ matrix.pgx-test-scram-password-conn-string }}
|
||||
PGX_TEST_MD5_PASSWORD_CONN_STRING: ${{ matrix.pgx-test-md5-password-conn-string }}
|
||||
PGX_TEST_PLAIN_PASSWORD_CONN_STRING: ${{ matrix.pgx-test-plain-password-conn-string }}
|
||||
# TestConnectTLS fails. However, it succeeds if I connect to the CI server with upterm and run it. Give up on that test for now.
|
||||
# PGX_TEST_TLS_CONN_STRING: ${{ matrix.pgx-test-tls-conn-string }}
|
||||
PGX_SSL_PASSWORD: ${{ matrix.pgx-ssl-password }}
|
||||
PGX_TEST_TLS_CLIENT_CONN_STRING: ${{ matrix.pgx-test-tls-client-conn-string }}
|
||||
|
||||
test-windows:
|
||||
name: Test Windows
|
||||
runs-on: windows-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: ["1.23", "1.24"]
|
||||
|
||||
steps:
|
||||
- name: Setup PostgreSQL
|
||||
id: postgres
|
||||
uses: ikalnytskyi/action-setup-postgres@v4
|
||||
with:
|
||||
database: pgx_test
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go ${{ matrix.go-version }}
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
- name: Initialize test database
|
||||
run: |
|
||||
psql -f testsetup/postgresql_setup.sql pgx_test
|
||||
env:
|
||||
PGSERVICE: ${{ steps.postgres.outputs.service-name }}
|
||||
shell: bash
|
||||
|
||||
- name: Test
|
||||
# parallel testing is disabled because somehow parallel testing causes Github Actions to kill the runner.
|
||||
run: go test -parallel=1 -race ./...
|
||||
env:
|
||||
PGX_TEST_DATABASE: ${{ steps.postgres.outputs.connection-uri }}
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -22,6 +22,3 @@ _testmain.go
|
||||
*.exe
|
||||
|
||||
.envrc
|
||||
/.testdb
|
||||
|
||||
.DS_Store
|
||||
|
@ -1,21 +0,0 @@
|
||||
# See for configurations: https://golangci-lint.run/usage/configuration/
|
||||
version: 2
|
||||
|
||||
# See: https://golangci-lint.run/usage/formatters/
|
||||
formatters:
|
||||
default: none
|
||||
enable:
|
||||
- gofmt # https://pkg.go.dev/cmd/gofmt
|
||||
- gofumpt # https://github.com/mvdan/gofumpt
|
||||
|
||||
settings:
|
||||
gofmt:
|
||||
simplify: true # Simplify code: gofmt with `-s` option.
|
||||
|
||||
gofumpt:
|
||||
# Module path which contains the source code being formatted.
|
||||
# Default: ""
|
||||
module-path: github.com/jackc/pgx/v5 # Should match with module in go.mod
|
||||
# Choose whether to use the extra rules.
|
||||
# Default: false
|
||||
extra-rules: true
|
33
.travis.yml
Normal file
33
.travis.yml
Normal file
@ -0,0 +1,33 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.15.x
|
||||
- 1.14.x
|
||||
- tip
|
||||
|
||||
# Derived from https://github.com/lib/pq/blob/master/.travis.yml
|
||||
before_install:
|
||||
- ./travis/before_install.bash
|
||||
|
||||
env:
|
||||
global:
|
||||
- GO111MODULE=on
|
||||
- PGX_TEST_DATABASE=postgres://pgx_md5:secret@127.0.0.1/pgx_test
|
||||
|
||||
matrix:
|
||||
- CRATEVERSION=2.1 PGX_TEST_CRATEDB_CONN_STRING="host=127.0.0.1 port=6543 user=pgx database=pgx_test"
|
||||
- PGVERSION=12
|
||||
- PGVERSION=11
|
||||
- PGVERSION=10
|
||||
- PGVERSION=9.6
|
||||
- PGVERSION=9.5
|
||||
|
||||
before_script:
|
||||
- ./travis/before_script.bash
|
||||
|
||||
script:
|
||||
- ./travis/script.bash
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
541
CHANGELOG.md
541
CHANGELOG.md
@ -1,462 +1,177 @@
|
||||
# 5.7.5 (May 17, 2025)
|
||||
|
||||
* Support sslnegotiation connection option (divyam234)
|
||||
* Update golang.org/x/crypto to v0.37.0. This placates security scanners that were unable to see that pgx did not use the behavior affected by https://pkg.go.dev/vuln/GO-2025-3487.
|
||||
* TraceLog now logs Acquire and Release at the debug level (dave sinclair)
|
||||
* Add support for PGTZ environment variable
|
||||
* Add support for PGOPTIONS environment variable
|
||||
* Unpin memory used by Rows quicker
|
||||
* Remove PlanScan memoization. This resolves a rare issue where scanning could be broken for one type by first scanning another. The problem was in the memoization system and benchmarking revealed that memoization was not providing any meaningful benefit.
|
||||
|
||||
# 5.7.4 (March 24, 2025)
|
||||
|
||||
* Fix / revert change to scanning JSON `null` (Felix Röhrich)
|
||||
|
||||
# 5.7.3 (March 21, 2025)
|
||||
|
||||
* Expose EmptyAcquireWaitTime in pgxpool.Stat (vamshiaruru32)
|
||||
* Improve SQL sanitizer performance (ninedraft)
|
||||
* Fix Scan confusion with json(b), sql.Scanner, and automatic dereferencing (moukoublen, felix-roehrich)
|
||||
* Fix Values() for xml type always returning nil instead of []byte
|
||||
* Add ability to send Flush message in pipeline mode (zenkovev)
|
||||
* Fix pgtype.Timestamp's JSON behavior to match PostgreSQL (pconstantinou)
|
||||
* Better error messages when scanning structs (logicbomb)
|
||||
* Fix handling of error on batch write (bonnefoa)
|
||||
* Match libpq's connection fallback behavior more closely (felix-roehrich)
|
||||
* Add MinIdleConns to pgxpool (djahandarie)
|
||||
|
||||
# 5.7.2 (December 21, 2024)
|
||||
|
||||
* Fix prepared statement already exists on batch prepare failure
|
||||
* Add commit query to tx options (Lucas Hild)
|
||||
* Fix pgtype.Timestamp json unmarshal (Shean de Montigny-Desautels)
|
||||
* Add message body size limits in frontend and backend (zene)
|
||||
* Add xid8 type
|
||||
* Ensure planning encodes and scans cannot infinitely recurse
|
||||
* Implement pgtype.UUID.String() (Konstantin Grachev)
|
||||
* Switch from ExecParams to Exec in ValidateConnectTargetSessionAttrs functions (Alexander Rumyantsev)
|
||||
* Update golang.org/x/crypto
|
||||
* Fix json(b) columns prefer sql.Scanner interface like database/sql (Ludovico Russo)
|
||||
|
||||
# 5.7.1 (September 10, 2024)
|
||||
|
||||
* Fix data race in tracelog.TraceLog
|
||||
* Update puddle to v2.2.2. This removes the import of nanotime via linkname.
|
||||
* Update golang.org/x/crypto and golang.org/x/text
|
||||
|
||||
# 5.7.0 (September 7, 2024)
|
||||
|
||||
* Add support for sslrootcert=system (Yann Soubeyrand)
|
||||
* Add LoadTypes to load multiple types in a single SQL query (Nick Farrell)
|
||||
* Add XMLCodec supports encoding + scanning XML column type like json (nickcruess-soda)
|
||||
* Add MultiTrace (Stepan Rabotkin)
|
||||
* Add TraceLogConfig with customizable TimeKey (stringintech)
|
||||
* pgx.ErrNoRows wraps sql.ErrNoRows to aid in database/sql compatibility with native pgx functions (merlin)
|
||||
* Support scanning binary formatted uint32 into string / TextScanner (jennifersp)
|
||||
* Fix interval encoding to allow 0s and avoid extra spaces (Carlos Pérez-Aradros Herce)
|
||||
* Update pgservicefile - fixes panic when parsing invalid file
|
||||
* Better error message when reading past end of batch
|
||||
* Don't print url when url.Parse returns an error (Kevin Biju)
|
||||
* Fix snake case name normalization collision in RowToStructByName with db tag (nolandseigler)
|
||||
* Fix: Scan and encode types with underlying types of arrays
|
||||
|
||||
# 5.6.0 (May 25, 2024)
|
||||
|
||||
* Add StrictNamedArgs (Tomas Zahradnicek)
|
||||
* Add support for macaddr8 type (Carlos Pérez-Aradros Herce)
|
||||
* Add SeverityUnlocalized field to PgError / Notice
|
||||
* Performance optimization of RowToStructByPos/Name (Zach Olstein)
|
||||
* Allow customizing context canceled behavior for pgconn
|
||||
* Add ScanLocation to pgtype.Timestamp[tz]Codec
|
||||
* Add custom data to pgconn.PgConn
|
||||
* Fix ResultReader.Read() to handle nil values
|
||||
* Do not encode interval microseconds when they are 0 (Carlos Pérez-Aradros Herce)
|
||||
* pgconn.SafeToRetry checks for wrapped errors (tjasko)
|
||||
* Failed connection attempts include all errors
|
||||
* Optimize LargeObject.Read (Mitar)
|
||||
* Add tracing for connection acquire and release from pool (ngavinsir)
|
||||
* Fix encode driver.Valuer not called when nil
|
||||
* Add support for custom JSON marshal and unmarshal (Mitar)
|
||||
* Use Go default keepalive for TCP connections (Hans-Joachim Kliemeck)
|
||||
|
||||
# 5.5.5 (March 9, 2024)
|
||||
|
||||
Use spaces instead of parentheses for SQL sanitization.
|
||||
|
||||
This still solves the problem of negative numbers creating a line comment, but this avoids breaking edge cases such as
|
||||
`set foo to $1` where the substitution is taking place in a location where an arbitrary expression is not allowed.
|
||||
|
||||
# 5.5.4 (March 4, 2024)
|
||||
|
||||
Fix CVE-2024-27304
|
||||
|
||||
SQL injection can occur if an attacker can cause a single query or bind message to exceed 4 GB in size. An integer
|
||||
overflow in the calculated message size can cause the one large message to be sent as multiple messages under the
|
||||
attacker's control.
|
||||
|
||||
Thanks to Paul Gerste for reporting this issue.
|
||||
|
||||
* Fix behavior of CollectRows to return empty slice if Rows are empty (Felix)
|
||||
* Fix simple protocol encoding of json.RawMessage
|
||||
* Fix *Pipeline.getResults should close pipeline on error
|
||||
* Fix panic in TryFindUnderlyingTypeScanPlan (David Kurman)
|
||||
* Fix deallocation of invalidated cached statements in a transaction
|
||||
* Handle invalid sslkey file
|
||||
* Fix scan float4 into sql.Scanner
|
||||
* Fix pgtype.Bits not making copy of data from read buffer. This would cause the data to be corrupted by future reads.
|
||||
|
||||
# 5.5.3 (February 3, 2024)
|
||||
|
||||
* Fix: prepared statement already exists
|
||||
* Improve CopyFrom auto-conversion of text-ish values
|
||||
* Add ltree type support (Florent Viel)
|
||||
* Make some properties of Batch and QueuedQuery public (Pavlo Golub)
|
||||
* Add AppendRows function (Edoardo Spadolini)
|
||||
* Optimize convert UUID [16]byte to string (Kirill Malikov)
|
||||
* Fix: LargeObject Read and Write of more than ~1GB at a time (Mitar)
|
||||
|
||||
# 5.5.2 (January 13, 2024)
|
||||
|
||||
* Allow NamedArgs to start with underscore
|
||||
* pgproto3: Maximum message body length support (jeremy.spriet)
|
||||
* Upgrade golang.org/x/crypto to v0.17.0
|
||||
* Add snake_case support to RowToStructByName (Tikhon Fedulov)
|
||||
* Fix: update description cache after exec prepare (James Hartig)
|
||||
* Fix: pipeline checks if it is closed (James Hartig and Ryan Fowler)
|
||||
* Fix: normalize timeout / context errors during TLS startup (Samuel Stauffer)
|
||||
* Add OnPgError for easier centralized error handling (James Hartig)
|
||||
|
||||
# 5.5.1 (December 9, 2023)
|
||||
|
||||
* Add CopyFromFunc helper function. (robford)
|
||||
* Add PgConn.Deallocate method that uses PostgreSQL protocol Close message.
|
||||
* pgx uses new PgConn.Deallocate method. This allows deallocating statements to work in a failed transaction. This fixes a case where the prepared statement map could become invalid.
|
||||
* Fix: Prefer driver.Valuer over json.Marshaler for json fields. (Jacopo)
|
||||
* Fix: simple protocol SQL sanitizer previously panicked if an invalid $0 placeholder was used. This now returns an error instead. (maksymnevajdev)
|
||||
* Add pgtype.Numeric.ScanScientific (Eshton Robateau)
|
||||
|
||||
# 5.5.0 (November 4, 2023)
|
||||
|
||||
* Add CollectExactlyOneRow. (Julien GOTTELAND)
|
||||
* Add OpenDBFromPool to create *database/sql.DB from *pgxpool.Pool. (Lev Zakharov)
|
||||
* Prepare can automatically choose statement name based on sql. This makes it easier to explicitly manage prepared statements.
|
||||
* Statement cache now uses deterministic, stable statement names.
|
||||
* database/sql prepared statement names are deterministically generated.
|
||||
* Fix: SendBatch wasn't respecting context cancellation.
|
||||
* Fix: Timeout error from pipeline is now normalized.
|
||||
* Fix: database/sql encoding json.RawMessage to []byte.
|
||||
* CancelRequest: Wait for the cancel request to be acknowledged by the server. This should improve PgBouncer compatibility. (Anton Levakin)
|
||||
* stdlib: Use Ping instead of CheckConn in ResetSession
|
||||
* Add json.Marshaler and json.Unmarshaler for Float4, Float8 (Kirill Mironov)
|
||||
|
||||
# 5.4.3 (August 5, 2023)
|
||||
|
||||
* Fix: QCharArrayOID was defined with the wrong OID (Christoph Engelbert)
|
||||
* Fix: connect_timeout for sslmode=allow|prefer (smaher-edb)
|
||||
* Fix: pgxpool: background health check cannot overflow pool
|
||||
* Fix: Check for nil in defer when sending batch (recover properly from panic)
|
||||
* Fix: json scan of non-string pointer to pointer
|
||||
* Fix: zeronull.Timestamptz should use pgtype.Timestamptz
|
||||
* Fix: NewConnsCount was not correctly counting connections created by Acquire directly. (James Hartig)
|
||||
* RowTo(AddrOf)StructByPos ignores fields with "-" db tag
|
||||
* Optimization: improve text format numeric parsing (horpto)
|
||||
|
||||
# 5.4.2 (July 11, 2023)
|
||||
|
||||
* Fix: RowScanner errors are fatal to Rows
|
||||
* Fix: Enable failover efforts when pg_hba.conf disallows non-ssl connections (Brandon Kauffman)
|
||||
* Hstore text codec internal improvements (Evan Jones)
|
||||
* Fix: Stop timers for background reader when not in use. Fixes memory leak when closing connections (Adrian-Stefan Mares)
|
||||
* Fix: Stop background reader as soon as possible.
|
||||
* Add PgConn.SyncConn(). This combined with the above fix makes it safe to directly use the underlying net.Conn.
|
||||
|
||||
# 5.4.1 (June 18, 2023)
|
||||
|
||||
* Fix: concurrency bug with pgtypeDefaultMap and simple protocol (Lev Zakharov)
|
||||
* Add TxOptions.BeginQuery to allow overriding the default BEGIN query
|
||||
|
||||
# 5.4.0 (June 14, 2023)
|
||||
|
||||
* Replace platform specific syscalls for non-blocking IO with more traditional goroutines and deadlines. This returns to the v4 approach with some additional improvements and fixes. This restores the ability to use a pgx.Conn over an ssh.Conn as well as other non-TCP or Unix socket connections. In addition, it is a significantly simpler implementation that is less likely to have cross platform issues.
|
||||
* Optimization: The default type registrations are now shared among all connections. This saves about 100KB of memory per connection. `pgtype.Type` and `pgtype.Codec` values are now required to be immutable after registration. This was already necessary in most cases but wasn't documented until now. (Lev Zakharov)
|
||||
* Fix: Ensure pgxpool.Pool.QueryRow.Scan releases connection on panic
|
||||
* CancelRequest: don't try to read the reply (Nicola Murino)
|
||||
* Fix: correctly handle bool type aliases (Wichert Akkerman)
|
||||
* Fix: pgconn.CancelRequest: Fix unix sockets: don't use RemoteAddr()
|
||||
* Fix: pgx.Conn memory leak with prepared statement caching (Evan Jones)
|
||||
* Add BeforeClose to pgxpool.Pool (Evan Cordell)
|
||||
* Fix: various hstore fixes and optimizations (Evan Jones)
|
||||
* Fix: RowToStructByPos with embedded unexported struct
|
||||
* Support different bool string representations (Lev Zakharov)
|
||||
* Fix: error when using BatchResults.Exec on a select that returns an error after some rows.
|
||||
* Fix: pipelineBatchResults.Exec() not returning error from ResultReader
|
||||
* Fix: pipeline batch results not closing pipeline when error occurs while reading directly from results instead of using
|
||||
a callback.
|
||||
* Fix: scanning a table type into a struct
|
||||
* Fix: scan array of record to pointer to slice of struct
|
||||
* Fix: handle null for json (Cemre Mengu)
|
||||
* Batch Query callback is called even when there is an error
|
||||
* Add RowTo(AddrOf)StructByNameLax (Audi P. Risa P)
|
||||
|
||||
# 5.3.1 (February 27, 2023)
|
||||
|
||||
* Fix: Support v4 and v5 stdlib in same program (Tomáš Procházka)
|
||||
* Fix: sql.Scanner not being used in certain cases
|
||||
* Add text format jsonpath support
|
||||
* Fix: fake non-blocking read adaptive wait time
|
||||
|
||||
# 5.3.0 (February 11, 2023)
|
||||
|
||||
* Fix: json values work with sql.Scanner
|
||||
* Fixed / improved error messages (Mark Chambers and Yevgeny Pats)
|
||||
* Fix: support scan into single dimensional arrays
|
||||
* Fix: MaxConnLifetimeJitter setting actually jitter (Ben Weintraub)
|
||||
* Fix: driver.Value representation of bytea should be []byte not string
|
||||
* Fix: better handling of unregistered OIDs
|
||||
* CopyFrom can use query cache to avoid extra round trip to get OIDs (Alejandro Do Nascimento Mora)
|
||||
* Fix: encode to json ignoring driver.Valuer
|
||||
* Support sql.Scanner on renamed base type
|
||||
* Fix: pgtype.Numeric text encoding of negative numbers (Mark Chambers)
|
||||
* Fix: connect with multiple hostnames when one can't be resolved
|
||||
* Upgrade puddle to remove dependency on uber/atomic and fix alignment issue on 32-bit platform
|
||||
* Fix: scanning json column into **string
|
||||
* Multiple reductions in memory allocations
|
||||
* Fake non-blocking read adapts its max wait time
|
||||
* Improve CopyFrom performance and reduce memory usage
|
||||
* Fix: encode []any to array
|
||||
* Fix: LoadType for composite with dropped attributes (Felix Röhrich)
|
||||
* Support v4 and v5 stdlib in same program
|
||||
* Fix: text format array decoding with string of "NULL"
|
||||
* Prefer binary format for arrays
|
||||
|
||||
# 5.2.0 (December 5, 2022)
|
||||
|
||||
* `tracelog.TraceLog` implements the pgx.PrepareTracer interface. (Vitalii Solodilov)
|
||||
* Optimize creating begin transaction SQL string (Petr Evdokimov and ksco)
|
||||
* `Conn.LoadType` supports range and multirange types (Vitalii Solodilov)
|
||||
* Fix scan `uint` and `uint64` `ScanNumeric`. This resolves a PostgreSQL `numeric` being incorrectly scanned into `uint` and `uint64`.
|
||||
# 4.10.1 (December 19, 2020)
|
||||
|
||||
# 5.1.1 (November 17, 2022)
|
||||
* Fix panic on Query error with nil stmtcache.
|
||||
|
||||
* Fix simple query sanitizer where query text contains a Unicode replacement character.
|
||||
* Remove erroneous `name` argument from `DeallocateAll()`. Technically, this is a breaking change, but given that method was only added 5 days ago this change was accepted. (Bodo Kaiser)
|
||||
# 4.10.0 (December 3, 2020)
|
||||
|
||||
# 5.1.0 (November 12, 2022)
|
||||
* Add CopyFromSlice to simplify CopyFrom usage (Egon Elbre)
|
||||
* Remove broken prepared statements from stmtcache (Ethan Pailes)
|
||||
* stdlib: consider any Ping error as fatal
|
||||
* Update puddle to v1.1.3 - this fixes an issue where concurrent Acquires can hang when a connection cannot be established
|
||||
* Update pgtype to v1.6.2
|
||||
|
||||
* Update puddle to v2.1.2. This resolves a race condition and a deadlock in pgxpool.
|
||||
* `QueryRewriter.RewriteQuery` now returns an error. Technically, this is a breaking change for any external implementers, but given the minimal likelihood that there are actually any external implementers this change was accepted.
|
||||
* Expose `GetSSLPassword` support to pgx.
|
||||
* Fix encode `ErrorResponse` unknown field handling. This would only affect pgproto3 being used directly as a proxy with a non-PostgreSQL server that included additional error fields.
|
||||
* Fix date text format encoding with 5 digit years.
|
||||
* Fix date values passed to a `sql.Scanner` as `string` instead of `time.Time`.
|
||||
* DateCodec.DecodeValue can return `pgtype.InfinityModifier` instead of `string` for infinite values. This now matches the behavior of the timestamp types.
|
||||
* Add domain type support to `Conn.LoadType()`.
|
||||
* Add `RowToStructByName` and `RowToAddrOfStructByName`. (Pavlo Golub)
|
||||
* Add `Conn.DeallocateAll()` to clear all prepared statements including the statement cache. (Bodo Kaiser)
|
||||
# 4.9.2 (November 3, 2020)
|
||||
|
||||
# 5.0.4 (October 24, 2022)
|
||||
The underlying library updates fix an issue where appending to a scanned slice could corrupt other data.
|
||||
|
||||
* Fix: CollectOneRow prefers PostgreSQL error over pgx.ErrorNoRows
|
||||
* Fix: some reflect Kind checks to first check for nil
|
||||
* Bump golang.org/x/text dependency to placate snyk
|
||||
* Fix: RowToStructByPos on structs with multiple anonymous sub-structs (Baptiste Fontaine)
|
||||
* Fix: Exec checks if tx is closed
|
||||
* Update pgconn to v1.7.2
|
||||
* Update pgproto3 to v2.0.6
|
||||
|
||||
# 5.0.3 (October 14, 2022)
|
||||
# 4.9.1 (October 31, 2020)
|
||||
|
||||
* Fix `driver.Valuer` handling edge cases that could cause infinite loop or crash
|
||||
* Update pgconn to v1.7.1
|
||||
* Update pgtype to v1.6.1
|
||||
* Fix SendBatch of all prepared statements with statement cache disabled
|
||||
|
||||
# v5.0.2 (October 8, 2022)
|
||||
# 4.9.0 (September 26, 2020)
|
||||
|
||||
* Fix date encoding in text format to always use 2 digits for month and day
|
||||
* Prefer driver.Valuer over wrap plans when encoding
|
||||
* Fix scan to pointer to pointer to renamed type
|
||||
* Allow scanning NULL even if PG and Go types are incompatible
|
||||
* pgxpool now waits for connection cleanup to finish before making room in pool for another connection. This prevents temporarily exceeding max pool size.
|
||||
* Fix when scanning a column to nil to skip it on the first row but scanning it to a real value on a subsequent row.
|
||||
* Fix prefer simple protocol with prepared statements. (Jinzhu)
|
||||
* Fix FieldDescriptions not being available on Rows before calling Next the first time.
|
||||
* Various minor fixes in updated versions of pgconn, pgtype, and puddle.
|
||||
|
||||
# v5.0.1 (September 24, 2022)
|
||||
# 4.8.1 (July 29, 2020)
|
||||
|
||||
* Fix 32-bit atomic usage
|
||||
* Add MarshalJSON for Float8 (yogipristiawan)
|
||||
* Add `[` and `]` to text encoding of `Lseg`
|
||||
* Fix sqlScannerWrapper NULL handling
|
||||
* Update pgconn to v1.6.4
|
||||
* Fix deadlock on error after CommandComplete but before ReadyForQuery
|
||||
* Fix panic on parsing DSN with trailing '='
|
||||
|
||||
# v5.0.0 (September 17, 2022)
|
||||
# 4.8.0 (July 22, 2020)
|
||||
|
||||
## Merged Packages
|
||||
* All argument types supported by native pgx should now also work through database/sql
|
||||
* Update pgconn to v1.6.3
|
||||
* Update pgtype to v1.4.2
|
||||
|
||||
`github.com/jackc/pgtype`, `github.com/jackc/pgconn`, and `github.com/jackc/pgproto3` are now included in the main
|
||||
`github.com/jackc/pgx` repository. Previously there was confusion as to where issues should be reported, additional
|
||||
release work due to releasing multiple packages, and less clear changelogs.
|
||||
# 4.7.2 (July 14, 2020)
|
||||
|
||||
## pgconn
|
||||
* Improve performance of Columns() (zikaeroh)
|
||||
* Fix fatal Commit() failure not being considered fatal
|
||||
* Update pgconn to v1.6.2
|
||||
* Update pgtype to v1.4.1
|
||||
|
||||
`CommandTag` is now an opaque type instead of directly exposing an underlying `[]byte`.
|
||||
# 4.7.1 (June 29, 2020)
|
||||
|
||||
The return value `ResultReader.Values()` is no longer safe to retain a reference to after a subsequent call to `NextRow()` or `Close()`.
|
||||
* Fix stdlib decoding error with certain order and combination of fields
|
||||
|
||||
`Trace()` method adds low level message tracing similar to the `PQtrace` function in `libpq`.
|
||||
# 4.7.0 (June 27, 2020)
|
||||
|
||||
pgconn now uses non-blocking IO. This is a significant internal restructuring, but it should not cause any visible changes on its own. However, it is important in implementing other new features.
|
||||
* Update pgtype to v1.4.0
|
||||
* Update pgconn to v1.6.1
|
||||
* Update puddle to v1.1.1
|
||||
* Fix context propagation with Tx commit and Rollback (georgysavva)
|
||||
* Add lazy connect option to pgxpool (georgysavva)
|
||||
* Fix connection leak if pgxpool.BeginTx() fail (Jean-Baptiste Bronisz)
|
||||
* Add native Go slice support for strings and numbers to simple protocol
|
||||
* stdlib add default timeouts for Conn.Close() and Stmt.Close() (georgysavva)
|
||||
* Assorted performance improvements especially with large result sets
|
||||
* Fix close pool on not lazy connect failure (Yegor Myskin)
|
||||
* Add Config copy (georgysavva)
|
||||
* Support SendBatch with Simple Protocol (Jordan Lewis)
|
||||
* Better error logging on rows close (Igor V. Kozinov)
|
||||
* Expose stdlib.Conn.Conn() to enable database/sql.Conn.Raw()
|
||||
* Improve unknown type support for database/sql
|
||||
* Fix transaction commit failure closing connection
|
||||
|
||||
`CheckConn()` checks a connection's liveness by doing a non-blocking read. This can be used to detect database restarts or network interruptions without executing a query or a ping.
|
||||
# 4.6.0 (March 30, 2020)
|
||||
|
||||
pgconn now supports pipeline mode.
|
||||
* stdlib: Bail early if preloading rows.Next() results in rows.Err() (Bas van Beek)
|
||||
* Sanitize time to microsecond accuracy (Andrew Nicoll)
|
||||
* Update pgtype to v1.3.0
|
||||
* Update pgconn to v1.5.0
|
||||
* Update golang.org/x/crypto for security fix
|
||||
* Implement "verify-ca" SSL mode
|
||||
|
||||
`*PgConn.ReceiveResults` removed. Use pipeline mode instead.
|
||||
# 4.5.0 (March 7, 2020)
|
||||
|
||||
`Timeout()` no longer considers `context.Canceled` as a timeout error. `context.DeadlineExceeded` still is considered a timeout error.
|
||||
* Update to pgconn v1.4.0
|
||||
* Fixes QueryRow with empty SQL
|
||||
* Adds PostgreSQL service file support
|
||||
* Add Len() to *pgx.Batch (WGH)
|
||||
* Better logging for individual batch items (Ben Bader)
|
||||
|
||||
## pgxpool
|
||||
# 4.4.1 (February 14, 2020)
|
||||
|
||||
`Connect` and `ConnectConfig` have been renamed to `New` and `NewWithConfig` respectively. The `LazyConnect` option has been removed. Pools always lazily connect.
|
||||
* Update pgconn to v1.3.2 - better default read buffer size
|
||||
* Fix race in CopyFrom
|
||||
|
||||
## pgtype
|
||||
# 4.4.0 (February 5, 2020)
|
||||
|
||||
The `pgtype` package has been significantly changed.
|
||||
* Update puddle to v1.1.0 - fixes possible deadlock when acquire is cancelled
|
||||
* Update pgconn to v1.3.1 - fixes CopyFrom deadlock when multiple NoticeResponse received during copy
|
||||
* Update pgtype to v1.2.0
|
||||
* Add MaxConnIdleTime to pgxpool (Patrick Ellul)
|
||||
* Add MinConns to pgxpool (Patrick Ellul)
|
||||
* Fix: stdlib.ReleaseConn closes connections left in invalid state
|
||||
|
||||
### NULL Representation
|
||||
# 4.3.0 (January 23, 2020)
|
||||
|
||||
Previously, types had a `Status` field that could be `Undefined`, `Null`, or `Present`. This has been changed to a
|
||||
`Valid` `bool` field to harmonize with how `database/sql` represents `NULL` and to make the zero value useable.
|
||||
* Fix Rows.Values panic when unable to decode
|
||||
* Add Rows.Values support for unknown types
|
||||
* Add DriverContext support for stdlib (Alex Gaynor)
|
||||
* Update pgproto3 to v2.0.1 to never return an io.EOF as it would be misinterpreted by database/sql. Instead return io.UnexpectedEOF.
|
||||
|
||||
Previously, a type that implemented `driver.Valuer` would have the `Value` method called even on a nil pointer. All nils
|
||||
whether typed or untyped now represent `NULL`.
|
||||
# 4.2.1 (January 13, 2020)
|
||||
|
||||
### Codec and Value Split
|
||||
* Update pgconn to v1.2.1 (fixes context cancellation data race introduced in v1.2.0))
|
||||
|
||||
Previously, the type system combined decoding and encoding values with the value types. e.g. Type `Int8` both handled
|
||||
encoding and decoding the PostgreSQL representation and acted as a value object. This caused some difficulties when
|
||||
there was not an exact 1 to 1 relationship between the Go types and the PostgreSQL types For example, scanning a
|
||||
PostgreSQL binary `numeric` into a Go `float64` was awkward (see https://github.com/jackc/pgtype/issues/147). This
|
||||
concepts have been separated. A `Codec` only has responsibility for encoding and decoding values. Value types are
|
||||
generally defined by implementing an interface that a particular `Codec` understands (e.g. `PointScanner` and
|
||||
`PointValuer` for the PostgreSQL `point` type).
|
||||
# 4.2.0 (January 11, 2020)
|
||||
|
||||
### Array Types
|
||||
* Update pgconn to v1.2.0.
|
||||
* Update pgtype to v1.1.0.
|
||||
* Return error instead of panic when wrong number of arguments passed to Exec. (malstoun)
|
||||
* Fix large objects functionality when PreferSimpleProtocol = true.
|
||||
* Restore GetDefaultDriver which existed in v3. (Johan Brandhorst)
|
||||
* Add RegisterConnConfig to stdlib which replaces the removed RegisterDriverConfig from v3.
|
||||
|
||||
All array types are now handled by `ArrayCodec` instead of using code generation for each new array type. This also
|
||||
means that less common array types such as `point[]` are now supported. `Array[T]` supports PostgreSQL multi-dimensional
|
||||
arrays.
|
||||
# 4.1.2 (October 22, 2019)
|
||||
|
||||
### Composite Types
|
||||
* Fix dbSavepoint.Begin recursive self call
|
||||
* Upgrade pgtype to v1.0.2 - fix scan pointer to pointer
|
||||
|
||||
Composite types must be registered before use. `CompositeFields` may still be used to construct and destruct composite
|
||||
values, but any type may now implement `CompositeIndexGetter` and `CompositeIndexScanner` to be used as a composite.
|
||||
# 4.1.1 (October 21, 2019)
|
||||
|
||||
### Range Types
|
||||
* Fix pgxpool Rows.CommandTag() infinite loop / typo
|
||||
|
||||
Range types are now handled with types `RangeCodec` and `Range[T]`. This allows additional user defined range types to
|
||||
easily be handled. Multirange types are handled similarly with `MultirangeCodec` and `Multirange[T]`.
|
||||
# 4.1.0 (October 12, 2019)
|
||||
|
||||
### pgxtype
|
||||
## Potentially Breaking Changes
|
||||
|
||||
`LoadDataType` moved to `*Conn` as `LoadType`.
|
||||
Technically, two changes are breaking changes, but in practice these are extremely unlikely to break existing code.
|
||||
|
||||
### Bytea
|
||||
* Conn.Begin and Conn.BeginTx return a Tx interface instead of the internal dbTx struct. This is necessary for the Conn.Begin method to signature as other methods that begin a transaction.
|
||||
* Add Conn() to Tx interface. This is necessary to allow code using a Tx to access the *Conn (and pgconn.PgConn) on which the Tx is executing.
|
||||
|
||||
The `Bytea` and `GenericBinary` types have been replaced. Use the following instead:
|
||||
## Fixes
|
||||
|
||||
* `[]byte` - For normal usage directly use `[]byte`.
|
||||
* `DriverBytes` - Uses driver memory only available until next database method call. Avoids a copy and an allocation.
|
||||
* `PreallocBytes` - Uses preallocated byte slice to avoid an allocation.
|
||||
* `UndecodedBytes` - Avoids any decoding. Allows working with raw bytes.
|
||||
* Releasing a busy connection closes the connection instead of returning an unusable connection to the pool
|
||||
* Do not mutate config.Config.OnNotification in connect
|
||||
|
||||
### Dropped lib/pq Support
|
||||
# 4.0.1 (September 19, 2019)
|
||||
|
||||
`pgtype` previously supported and was tested against [lib/pq](https://github.com/lib/pq). While it will continue to work
|
||||
in most cases this is no longer supported.
|
||||
* Fix statement cache cleanup.
|
||||
* Corrected daterange OID.
|
||||
* Fix Tx when committing or rolling back multiple times in certain cases.
|
||||
* Improve documentation.
|
||||
|
||||
### database/sql Scan
|
||||
# 4.0.0 (September 14, 2019)
|
||||
|
||||
Previously, most `Scan` implementations would convert `[]byte` to `string` automatically to decode a text value. Now
|
||||
only `string` is handled. This is to allow the possibility of future binary support in `database/sql` mode by
|
||||
considering `[]byte` to be binary format and `string` text format. This change should have no effect for any use with
|
||||
`pgx`. The previous behavior was only necessary for `lib/pq` compatibility.
|
||||
v4 is a major release with many significant changes some of which are breaking changes. The most significant are
|
||||
included below.
|
||||
|
||||
Added `*Map.SQLScanner` to create a `sql.Scanner` for types such as `[]int32` and `Range[T]` that do not implement
|
||||
`sql.Scanner` directly.
|
||||
|
||||
### Number Type Fields Include Bit size
|
||||
|
||||
`Int2`, `Int4`, `Int8`, `Float4`, `Float8`, and `Uint32` fields now include bit size. e.g. `Int` is renamed to `Int64`.
|
||||
This matches the convention set by `database/sql`. In addition, for comparable types like `pgtype.Int8` and
|
||||
`sql.NullInt64` the structures are identical. This means they can be directly converted one to another.
|
||||
|
||||
### 3rd Party Type Integrations
|
||||
|
||||
* Extracted integrations with https://github.com/shopspring/decimal and https://github.com/gofrs/uuid to
|
||||
https://github.com/jackc/pgx-shopspring-decimal and https://github.com/jackc/pgx-gofrs-uuid respectively. This trims
|
||||
the pgx dependency tree.
|
||||
|
||||
### Other Changes
|
||||
|
||||
* `Bit` and `Varbit` are both replaced by the `Bits` type.
|
||||
* `CID`, `OID`, `OIDValue`, and `XID` are replaced by the `Uint32` type.
|
||||
* `Hstore` is now defined as `map[string]*string`.
|
||||
* `JSON` and `JSONB` types removed. Use `[]byte` or `string` directly.
|
||||
* `QChar` type removed. Use `rune` or `byte` directly.
|
||||
* `Inet` and `Cidr` types removed. Use `netip.Addr` and `netip.Prefix` directly. These types are more memory efficient than the previous `net.IPNet`.
|
||||
* `Macaddr` type removed. Use `net.HardwareAddr` directly.
|
||||
* Renamed `pgtype.ConnInfo` to `pgtype.Map`.
|
||||
* Renamed `pgtype.DataType` to `pgtype.Type`.
|
||||
* Renamed `pgtype.None` to `pgtype.Finite`.
|
||||
* `RegisterType` now accepts a `*Type` instead of `Type`.
|
||||
* Assorted array helper methods and types made private.
|
||||
|
||||
## stdlib
|
||||
|
||||
* Removed `AcquireConn` and `ReleaseConn` as that functionality has been built in since Go 1.13.
|
||||
|
||||
## Reduced Memory Usage by Reusing Read Buffers
|
||||
|
||||
Previously, the connection read buffer would allocate large chunks of memory and never reuse them. This allowed
|
||||
transferring ownership to anything such as scanned values without incurring an additional allocation and memory copy.
|
||||
However, this came at the cost of overall increased memory allocation size. But worse it was also possible to pin large
|
||||
chunks of memory by retaining a reference to a small value that originally came directly from the read buffer. Now
|
||||
ownership remains with the read buffer and anything needing to retain a value must make a copy.
|
||||
|
||||
## Query Execution Modes
|
||||
|
||||
Control over automatic prepared statement caching and simple protocol use are now combined into query execution mode.
|
||||
See documentation for `QueryExecMode`.
|
||||
|
||||
## QueryRewriter Interface and NamedArgs
|
||||
|
||||
pgx now supports named arguments with the `NamedArgs` type. This is implemented via the new `QueryRewriter` interface which
|
||||
allows arbitrary rewriting of query SQL and arguments.
|
||||
|
||||
## RowScanner Interface
|
||||
|
||||
The `RowScanner` interface allows a single argument to Rows.Scan to scan the entire row.
|
||||
|
||||
## Rows Result Helpers
|
||||
|
||||
* `CollectRows` and `RowTo*` functions simplify collecting results into a slice.
|
||||
* `CollectOneRow` collects one row using `RowTo*` functions.
|
||||
* `ForEachRow` simplifies scanning each row and executing code using the scanned values. `ForEachRow` replaces `QueryFunc`.
|
||||
|
||||
## Tx Helpers
|
||||
|
||||
Rather than every type that implemented `Begin` or `BeginTx` methods also needing to implement `BeginFunc` and
|
||||
`BeginTxFunc` these methods have been converted to functions that take a db that implements `Begin` or `BeginTx`.
|
||||
|
||||
## Improved Batch Query Ergonomics
|
||||
|
||||
Previously, the code for building a batch went in one place before the call to `SendBatch`, and the code for reading the
|
||||
results went in one place after the call to `SendBatch`. This could make it difficult to match up the query and the code
|
||||
to handle the results. Now `Queue` returns a `QueuedQuery` which has methods `Query`, `QueryRow`, and `Exec` which can
|
||||
be used to register a callback function that will handle the result. Callback functions are called automatically when
|
||||
`BatchResults.Close` is called.
|
||||
|
||||
## SendBatch Uses Pipeline Mode When Appropriate
|
||||
|
||||
Previously, a batch with 10 unique parameterized statements executed 100 times would entail 11 network round trips. 1
|
||||
for each prepare / describe and 1 for executing them all. Now pipeline mode is used to prepare / describe all statements
|
||||
in a single network round trip. So it would only take 2 round trips.
|
||||
|
||||
## Tracing and Logging
|
||||
|
||||
Internal logging support has been replaced with tracing hooks. This allows custom tracing integration with tools like OpenTelemetry. Package tracelog provides an adapter for pgx v4 loggers to act as a tracer.
|
||||
|
||||
All integrations with 3rd party loggers have been extracted to separate repositories. This trims the pgx dependency
|
||||
tree.
|
||||
* Simplified establishing a connection with a connection string.
|
||||
* All potentially blocking operations now require a context.Context. The non-context aware functions have been removed.
|
||||
* OIDs are hard-coded for known types. This saves the query on connection.
|
||||
* Context cancellations while network activity is in progress is now always fatal. Previously, it was sometimes recoverable. This led to increased complexity in pgx itself and in application code.
|
||||
* Go modules are required.
|
||||
* Errors are now implemented in the Go 1.13 style.
|
||||
* `Rows` and `Tx` are now interfaces.
|
||||
* The connection pool as been decoupled from pgx and is now a separate, included package (github.com/jackc/pgx/v4/pgxpool).
|
||||
* pgtype has been spun off to a separate package (github.com/jackc/pgtype).
|
||||
* pgproto3 has been spun off to a separate package (github.com/jackc/pgproto3/v2).
|
||||
* Logical replication support has been spun off to a separate package (github.com/jackc/pglogrepl).
|
||||
* Lower level PostgreSQL functionality is now implemented in a separate package (github.com/jackc/pgconn).
|
||||
* Tests are now configured with environment variables.
|
||||
* Conn has an automatic statement cache by default.
|
||||
* Batch interface has been simplified.
|
||||
* QueryArgs has been removed.
|
||||
|
121
CONTRIBUTING.md
121
CONTRIBUTING.md
@ -1,121 +0,0 @@
|
||||
# Contributing
|
||||
|
||||
## Discuss Significant Changes
|
||||
|
||||
Before you invest a significant amount of time on a change, please create a discussion or issue describing your
|
||||
proposal. This will help to ensure your proposed change has a reasonable chance of being merged.
|
||||
|
||||
## Avoid Dependencies
|
||||
|
||||
Adding a dependency is a big deal. While on occasion a new dependency may be accepted, the default answer to any change
|
||||
that adds a dependency is no.
|
||||
|
||||
## Development Environment Setup
|
||||
|
||||
pgx tests naturally require a PostgreSQL database. It will connect to the database specified in the `PGX_TEST_DATABASE`
|
||||
environment variable. The `PGX_TEST_DATABASE` environment variable can either be a URL or key-value pairs. In addition,
|
||||
the standard `PG*` environment variables will be respected. Consider using [direnv](https://github.com/direnv/direnv) to
|
||||
simplify environment variable handling.
|
||||
|
||||
### Using an Existing PostgreSQL Cluster
|
||||
|
||||
If you already have a PostgreSQL development server this is the quickest way to start and run the majority of the pgx
|
||||
test suite. Some tests will be skipped that require server configuration changes (e.g. those testing different
|
||||
authentication methods).
|
||||
|
||||
Create and setup a test database:
|
||||
|
||||
```
|
||||
export PGDATABASE=pgx_test
|
||||
createdb
|
||||
psql -c 'create extension hstore;'
|
||||
psql -c 'create extension ltree;'
|
||||
psql -c 'create domain uint64 as numeric(20,0);'
|
||||
```
|
||||
|
||||
Ensure a `postgres` user exists. This happens by default in normal PostgreSQL installs, but some installation methods
|
||||
such as Homebrew do not.
|
||||
|
||||
```
|
||||
createuser -s postgres
|
||||
```
|
||||
|
||||
Ensure your `PGX_TEST_DATABASE` environment variable points to the database you just created and run the tests.
|
||||
|
||||
```
|
||||
export PGX_TEST_DATABASE="host=/private/tmp database=pgx_test"
|
||||
go test ./...
|
||||
```
|
||||
|
||||
This will run the vast majority of the tests, but some tests will be skipped (e.g. those testing different connection methods).
|
||||
|
||||
### Creating a New PostgreSQL Cluster Exclusively for Testing
|
||||
|
||||
The following environment variables need to be set both for initial setup and whenever the tests are run. (direnv is
|
||||
highly recommended). Depending on your platform, you may need to change the host for `PGX_TEST_UNIX_SOCKET_CONN_STRING`.
|
||||
|
||||
```
|
||||
export PGPORT=5015
|
||||
export PGUSER=postgres
|
||||
export PGDATABASE=pgx_test
|
||||
export POSTGRESQL_DATA_DIR=postgresql
|
||||
|
||||
export PGX_TEST_DATABASE="host=127.0.0.1 database=pgx_test user=pgx_md5 password=secret"
|
||||
export PGX_TEST_UNIX_SOCKET_CONN_STRING="host=/private/tmp database=pgx_test"
|
||||
export PGX_TEST_TCP_CONN_STRING="host=127.0.0.1 database=pgx_test user=pgx_md5 password=secret"
|
||||
export PGX_TEST_SCRAM_PASSWORD_CONN_STRING="host=127.0.0.1 user=pgx_scram password=secret database=pgx_test"
|
||||
export PGX_TEST_MD5_PASSWORD_CONN_STRING="host=127.0.0.1 database=pgx_test user=pgx_md5 password=secret"
|
||||
export PGX_TEST_PLAIN_PASSWORD_CONN_STRING="host=127.0.0.1 user=pgx_pw password=secret"
|
||||
export PGX_TEST_TLS_CONN_STRING="host=localhost user=pgx_ssl password=secret sslmode=verify-full sslrootcert=`pwd`/.testdb/ca.pem"
|
||||
export PGX_SSL_PASSWORD=certpw
|
||||
export PGX_TEST_TLS_CLIENT_CONN_STRING="host=localhost user=pgx_sslcert sslmode=verify-full sslrootcert=`pwd`/.testdb/ca.pem database=pgx_test sslcert=`pwd`/.testdb/pgx_sslcert.crt sslkey=`pwd`/.testdb/pgx_sslcert.key"
|
||||
```
|
||||
|
||||
Create a new database cluster.
|
||||
|
||||
```
|
||||
initdb --locale=en_US -E UTF-8 --username=postgres .testdb/$POSTGRESQL_DATA_DIR
|
||||
|
||||
echo "listen_addresses = '127.0.0.1'" >> .testdb/$POSTGRESQL_DATA_DIR/postgresql.conf
|
||||
echo "port = $PGPORT" >> .testdb/$POSTGRESQL_DATA_DIR/postgresql.conf
|
||||
cat testsetup/postgresql_ssl.conf >> .testdb/$POSTGRESQL_DATA_DIR/postgresql.conf
|
||||
cp testsetup/pg_hba.conf .testdb/$POSTGRESQL_DATA_DIR/pg_hba.conf
|
||||
|
||||
cd .testdb
|
||||
|
||||
# Generate CA, server, and encrypted client certificates.
|
||||
go run ../testsetup/generate_certs.go
|
||||
|
||||
# Copy certificates to server directory and set permissions.
|
||||
cp ca.pem $POSTGRESQL_DATA_DIR/root.crt
|
||||
cp localhost.key $POSTGRESQL_DATA_DIR/server.key
|
||||
chmod 600 $POSTGRESQL_DATA_DIR/server.key
|
||||
cp localhost.crt $POSTGRESQL_DATA_DIR/server.crt
|
||||
|
||||
cd ..
|
||||
```
|
||||
|
||||
|
||||
Start the new cluster. This will be necessary whenever you are running pgx tests.
|
||||
|
||||
```
|
||||
postgres -D .testdb/$POSTGRESQL_DATA_DIR
|
||||
```
|
||||
|
||||
Setup the test database in the new cluster.
|
||||
|
||||
```
|
||||
createdb
|
||||
psql --no-psqlrc -f testsetup/postgresql_setup.sql
|
||||
```
|
||||
|
||||
### PgBouncer
|
||||
|
||||
There are tests specific for PgBouncer that will be executed if `PGX_TEST_PGBOUNCER_CONN_STRING` is set.
|
||||
|
||||
### Optional Tests
|
||||
|
||||
pgx supports multiple connection types and means of authentication. These tests are optional. They will only run if the
|
||||
appropriate environment variables are set. In addition, there may be tests specific to particular PostgreSQL versions,
|
||||
non-PostgreSQL servers (e.g. CockroachDB), or connection poolers (e.g. PgBouncer). `go test ./... -v | grep SKIP` to see
|
||||
if any tests are being skipped.
|
4
LICENSE
4
LICENSE
@ -1,4 +1,4 @@
|
||||
Copyright (c) 2013-2021 Jack Christensen
|
||||
Copyright (c) 2013 Jack Christensen
|
||||
|
||||
MIT License
|
||||
|
||||
@ -19,4 +19,4 @@ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
197
README.md
197
README.md
@ -1,17 +1,20 @@
|
||||
[](https://pkg.go.dev/github.com/jackc/pgx/v5)
|
||||
[](https://github.com/jackc/pgx/actions/workflows/ci.yml)
|
||||
[](https://pkg.go.dev/github.com/jackc/pgx/v4)
|
||||
[](https://travis-ci.org/jackc/pgx)
|
||||
|
||||
# pgx - PostgreSQL Driver and Toolkit
|
||||
|
||||
pgx is a pure Go driver and toolkit for PostgreSQL.
|
||||
|
||||
The pgx driver is a low-level, high performance interface that exposes PostgreSQL-specific features such as `LISTEN` /
|
||||
`NOTIFY` and `COPY`. It also includes an adapter for the standard `database/sql` interface.
|
||||
pgx aims to be low-level, fast, and performant, while also enabling PostgreSQL-specific features that the standard `database/sql` package does not allow for.
|
||||
|
||||
The driver component of pgx can be used alongside the standard `database/sql` package.
|
||||
|
||||
The toolkit component is a related set of packages that implement PostgreSQL functionality such as parsing the wire protocol
|
||||
and type mapping between PostgreSQL and Go. These underlying packages can be used to implement alternative drivers,
|
||||
proxies, load balancers, logical replication clients, etc.
|
||||
|
||||
The current release of `pgx v4` requires Go modules. To use the previous version, checkout and vendor the `v3` branch.
|
||||
|
||||
## Example Usage
|
||||
|
||||
```go
|
||||
@ -22,11 +25,10 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/jackc/pgx/v4"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// urlExample := "postgres://username:password@localhost:5432/database_name"
|
||||
conn, err := pgx.Connect(context.Background(), os.Getenv("DATABASE_URL"))
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Unable to connect to database: %v\n", err)
|
||||
@ -48,58 +50,135 @@ func main() {
|
||||
|
||||
See the [getting started guide](https://github.com/jackc/pgx/wiki/Getting-started-with-pgx) for more information.
|
||||
|
||||
## Choosing Between the pgx and database/sql Interfaces
|
||||
|
||||
It is recommended to use the pgx interface if:
|
||||
1. The application only targets PostgreSQL.
|
||||
2. No other libraries that require `database/sql` are in use.
|
||||
|
||||
The pgx interface is faster and exposes more features.
|
||||
|
||||
The `database/sql` interface only allows the underlying driver to return or receive the following types: `int64`,
|
||||
`float64`, `bool`, `[]byte`, `string`, `time.Time`, or `nil`. Handling other types requires implementing the
|
||||
`database/sql.Scanner` and the `database/sql/driver/driver.Valuer` interfaces which require transmission of values in text format. The binary format can be substantially faster, which is what the pgx interface uses.
|
||||
|
||||
## Features
|
||||
|
||||
pgx supports many features beyond what is available through `database/sql`:
|
||||
|
||||
* Support for approximately 70 different PostgreSQL types
|
||||
* Automatic statement preparation and caching
|
||||
* Batch queries
|
||||
* Single-round trip query mode
|
||||
* Full TLS connection control
|
||||
* Binary format support for custom types (allows for much quicker encoding/decoding)
|
||||
* `COPY` protocol support for faster bulk data loads
|
||||
* Tracing and logging support
|
||||
* Copy protocol support for faster bulk data loads
|
||||
* Extendable logging support including built-in support for `log15adapter`, [`logrus`](https://github.com/sirupsen/logrus), [`zap`](https://github.com/uber-go/zap), and [`zerolog`](https://github.com/rs/zerolog)
|
||||
* Connection pool with after-connect hook for arbitrary connection setup
|
||||
* `LISTEN` / `NOTIFY`
|
||||
* Listen / notify
|
||||
* Conversion of PostgreSQL arrays to Go slice mappings for integers, floats, and strings
|
||||
* `hstore` support
|
||||
* `json` and `jsonb` support
|
||||
* Maps `inet` and `cidr` PostgreSQL types to `netip.Addr` and `netip.Prefix`
|
||||
* Hstore support
|
||||
* JSON and JSONB support
|
||||
* Maps `inet` and `cidr` PostgreSQL types to `net.IPNet` and `net.IP`
|
||||
* Large object support
|
||||
* NULL mapping to pointer to pointer
|
||||
* NULL mapping to Null* struct or pointer to pointer
|
||||
* Supports `database/sql.Scanner` and `database/sql/driver.Valuer` interfaces for custom types
|
||||
* Notice response handling
|
||||
* Simulated nested transactions with savepoints
|
||||
|
||||
## Choosing Between the pgx and database/sql Interfaces
|
||||
## Performance
|
||||
|
||||
The pgx interface is faster. Many PostgreSQL specific features such as `LISTEN` / `NOTIFY` and `COPY` are not available
|
||||
through the `database/sql` interface.
|
||||
There are three areas in particular where pgx can provide a significant performance advantage over the standard
|
||||
`database/sql` interface and other drivers:
|
||||
|
||||
The pgx interface is recommended when:
|
||||
1. PostgreSQL specific types - Types such as arrays can be parsed much quicker because pgx uses the binary format.
|
||||
2. Automatic statement preparation and caching - pgx will prepare and cache statements by default. This can provide an
|
||||
significant free improvement to code that does not explicitly use prepared statements. Under certain workloads, it can
|
||||
perform nearly 3x the number of queries per second.
|
||||
3. Batched queries - Multiple queries can be batched together to minimize network round trips.
|
||||
|
||||
1. The application only targets PostgreSQL.
|
||||
2. No other libraries that require `database/sql` are in use.
|
||||
## Comparison with Alternatives
|
||||
|
||||
It is also possible to use the `database/sql` interface and convert a connection to the lower-level pgx interface as needed.
|
||||
* [pq](http://godoc.org/github.com/lib/pq)
|
||||
* [go-pg](https://github.com/go-pg/pg)
|
||||
|
||||
For prepared queries with small sets of simple data types, all drivers will have have similar performance. However, if prepared statements aren't being explicitly used, pgx can have a significant performance advantage due to automatic statement preparation.
|
||||
pgx also can perform better when using PostgreSQL-specific data types or query batching. See
|
||||
[go_db_bench](https://github.com/jackc/go_db_bench) for some database driver benchmarks.
|
||||
|
||||
### Compatibility with `database/sql`
|
||||
|
||||
pq is exclusively used with `database/sql`. go-pg does not use `database/sql` at all. pgx supports `database/sql` as well as
|
||||
its own interface.
|
||||
|
||||
### Level of access, ORM
|
||||
|
||||
go-pg is a PostgreSQL client and ORM. It includes many features that traditionally sit above the database driver, such as ORM, struct mapping, soft deletes, schema migrations, and sharding support.
|
||||
|
||||
pgx is "closer to the metal" and such abstractions are beyond the scope of the pgx project, which first and foremost, aims to be a performant driver and toolkit.
|
||||
|
||||
## Testing
|
||||
|
||||
See [CONTRIBUTING.md](./CONTRIBUTING.md) for setup instructions.
|
||||
pgx tests naturally require a PostgreSQL database. It will connect to the database specified in the `PGX_TEST_DATABASE` environment
|
||||
variable. The `PGX_TEST_DATABASE` environment variable can either be a URL or DSN. In addition, the standard `PG*` environment
|
||||
variables will be respected. Consider using [direnv](https://github.com/direnv/direnv) to simplify environment variable
|
||||
handling.
|
||||
|
||||
## Architecture
|
||||
### Example Test Environment
|
||||
|
||||
See the presentation at Golang Estonia, [PGX Top to Bottom](https://www.youtube.com/watch?v=sXMSWhcHCf8) for a description of pgx architecture.
|
||||
Connect to your PostgreSQL server and run:
|
||||
|
||||
```
|
||||
create database pgx_test;
|
||||
```
|
||||
|
||||
Connect to the newly-created database and run:
|
||||
|
||||
```
|
||||
create domain uint64 as numeric(20,0);
|
||||
```
|
||||
|
||||
Now, you can run the tests:
|
||||
|
||||
```
|
||||
PGX_TEST_DATABASE="host=/var/run/postgresql database=pgx_test" go test ./...
|
||||
```
|
||||
|
||||
In addition, there are tests specific for PgBouncer that will be executed if `PGX_TEST_PGBOUNCER_CONN_STRING` is set.
|
||||
|
||||
## Supported Go and PostgreSQL Versions
|
||||
|
||||
pgx supports the same versions of Go and PostgreSQL that are supported by their respective teams. For [Go](https://golang.org/doc/devel/release.html#policy) that is the two most recent major releases and for [PostgreSQL](https://www.postgresql.org/support/versioning/) the major releases in the last 5 years. This means pgx supports Go 1.23 and higher and PostgreSQL 13 and higher. pgx also is tested against the latest version of [CockroachDB](https://www.cockroachlabs.com/product/).
|
||||
pgx supports the same versions of Go and PostgreSQL that are supported by their respective teams. For [Go](https://golang.org/doc/devel/release.html#policy) that is the two most recent major releases and for [PostgreSQL](https://www.postgresql.org/support/versioning/) the major releases in the last 5 years. This means pgx supports Go 1.13 and higher and PostgreSQL 9.5 and higher.
|
||||
|
||||
## Version Policy
|
||||
|
||||
pgx follows semantic versioning for the documented public API on stable releases. `v5` is the latest stable major version.
|
||||
pgx follows semantic versioning for the documented public API on stable releases. `v4` is the latest stable major version.
|
||||
|
||||
## PGX Family Libraries
|
||||
|
||||
pgx is the head of a family of PostgreSQL libraries. Many of these can be used independently. Many can also be accessed
|
||||
from pgx for lower-level control.
|
||||
|
||||
### [github.com/jackc/pgconn](https://github.com/jackc/pgconn)
|
||||
|
||||
`pgconn` is a lower-level PostgreSQL database driver that operates at nearly the same level as the C library `libpq`.
|
||||
|
||||
### [github.com/jackc/pgx/v4/pgxpool](https://github.com/jackc/pgx/tree/master/pgxpool)
|
||||
|
||||
`pgxpool` is a connection pool for pgx. pgx is entirely decoupled from its default pool implementation. This means that pgx can be used with a different pool or without any pool at all.
|
||||
|
||||
### [github.com/jackc/pgx/v4/stdlib](https://github.com/jackc/pgx/tree/master/stdlib)
|
||||
|
||||
This is a `database/sql` compatibility layer for pgx. pgx can be used as a normal `database/sql` driver, but at any time, the native interface can be acquired for more performance or PostgreSQL specific functionality.
|
||||
|
||||
### [github.com/jackc/pgtype](https://github.com/jackc/pgtype)
|
||||
|
||||
Over 70 PostgreSQL types are supported including `uuid`, `hstore`, `json`, `bytea`, `numeric`, `interval`, `inet`, and arrays. These types support `database/sql` interfaces and are usable outside of pgx. They are fully tested in pgx and pq. They also support a higher performance interface when used with the pgx driver.
|
||||
|
||||
### [github.com/jackc/pgproto3](https://github.com/jackc/pgproto3)
|
||||
|
||||
pgproto3 provides standalone encoding and decoding of the PostgreSQL v3 wire protocol. This is useful for implementing very low level PostgreSQL tooling.
|
||||
|
||||
### [github.com/jackc/pglogrepl](https://github.com/jackc/pglogrepl)
|
||||
|
||||
pglogrepl provides functionality to act as a client for PostgreSQL logical replication.
|
||||
@ -116,76 +195,8 @@ tern is a stand-alone SQL migration system.
|
||||
|
||||
pgerrcode contains constants for the PostgreSQL error codes.
|
||||
|
||||
## Adapters for 3rd Party Types
|
||||
|
||||
* [github.com/jackc/pgx-gofrs-uuid](https://github.com/jackc/pgx-gofrs-uuid)
|
||||
* [github.com/jackc/pgx-shopspring-decimal](https://github.com/jackc/pgx-shopspring-decimal)
|
||||
* [github.com/twpayne/pgx-geos](https://github.com/twpayne/pgx-geos) ([PostGIS](https://postgis.net/) and [GEOS](https://libgeos.org/) via [go-geos](https://github.com/twpayne/go-geos))
|
||||
* [github.com/vgarvardt/pgx-google-uuid](https://github.com/vgarvardt/pgx-google-uuid)
|
||||
|
||||
|
||||
## Adapters for 3rd Party Tracers
|
||||
|
||||
* [github.com/jackhopner/pgx-xray-tracer](https://github.com/jackhopner/pgx-xray-tracer)
|
||||
* [github.com/exaring/otelpgx](https://github.com/exaring/otelpgx)
|
||||
|
||||
## Adapters for 3rd Party Loggers
|
||||
|
||||
These adapters can be used with the tracelog package.
|
||||
|
||||
* [github.com/jackc/pgx-go-kit-log](https://github.com/jackc/pgx-go-kit-log)
|
||||
* [github.com/jackc/pgx-log15](https://github.com/jackc/pgx-log15)
|
||||
* [github.com/jackc/pgx-logrus](https://github.com/jackc/pgx-logrus)
|
||||
* [github.com/jackc/pgx-zap](https://github.com/jackc/pgx-zap)
|
||||
* [github.com/jackc/pgx-zerolog](https://github.com/jackc/pgx-zerolog)
|
||||
* [github.com/mcosta74/pgx-slog](https://github.com/mcosta74/pgx-slog)
|
||||
* [github.com/kataras/pgx-golog](https://github.com/kataras/pgx-golog)
|
||||
|
||||
## 3rd Party Libraries with PGX Support
|
||||
|
||||
### [github.com/pashagolub/pgxmock](https://github.com/pashagolub/pgxmock)
|
||||
|
||||
pgxmock is a mock library implementing pgx interfaces.
|
||||
pgxmock has one and only purpose - to simulate pgx behavior in tests, without needing a real database connection.
|
||||
|
||||
### [github.com/georgysavva/scany](https://github.com/georgysavva/scany)
|
||||
|
||||
Library for scanning data from a database into Go structs and more.
|
||||
|
||||
### [github.com/vingarcia/ksql](https://github.com/vingarcia/ksql)
|
||||
|
||||
A carefully designed SQL client for making using SQL easier,
|
||||
more productive, and less error-prone on Golang.
|
||||
|
||||
### [github.com/otan/gopgkrb5](https://github.com/otan/gopgkrb5)
|
||||
|
||||
Adds GSSAPI / Kerberos authentication support.
|
||||
|
||||
### [github.com/wcamarao/pmx](https://github.com/wcamarao/pmx)
|
||||
|
||||
Explicit data mapping and scanning library for Go structs and slices.
|
||||
|
||||
### [github.com/stephenafamo/scan](https://github.com/stephenafamo/scan)
|
||||
|
||||
Type safe and flexible package for scanning database data into Go types.
|
||||
Supports, structs, maps, slices and custom mapping functions.
|
||||
|
||||
### [github.com/z0ne-dev/mgx](https://github.com/z0ne-dev/mgx)
|
||||
|
||||
Code first migration library for native pgx (no database/sql abstraction).
|
||||
|
||||
### [github.com/amirsalarsafaei/sqlc-pgx-monitoring](https://github.com/amirsalarsafaei/sqlc-pgx-monitoring)
|
||||
|
||||
A database monitoring/metrics library for pgx and sqlc. Trace, log and monitor your sqlc query performance using OpenTelemetry.
|
||||
|
||||
### [https://github.com/nikolayk812/pgx-outbox](https://github.com/nikolayk812/pgx-outbox)
|
||||
|
||||
Simple Golang implementation for transactional outbox pattern for PostgreSQL using jackc/pgx driver.
|
||||
|
||||
### [https://github.com/Arlandaren/pgxWrappy](https://github.com/Arlandaren/pgxWrappy)
|
||||
|
||||
Simplifies working with the pgx library, providing convenient scanning of nested structures.
|
||||
|
||||
## [https://github.com/KoNekoD/pgx-colon-query-rewriter](https://github.com/KoNekoD/pgx-colon-query-rewriter)
|
||||
|
||||
Implementation of the pgx query rewriter to use ':' instead of '@' in named query parameters.
|
||||
|
18
Rakefile
18
Rakefile
@ -1,18 +0,0 @@
|
||||
require "erb"
|
||||
|
||||
rule '.go' => '.go.erb' do |task|
|
||||
erb = ERB.new(File.read(task.source))
|
||||
File.write(task.name, "// Code generated from #{task.source}. DO NOT EDIT.\n\n" + erb.result(binding))
|
||||
sh "goimports", "-w", task.name
|
||||
end
|
||||
|
||||
generated_code_files = [
|
||||
"pgtype/int.go",
|
||||
"pgtype/int_test.go",
|
||||
"pgtype/integration_benchmark_test.go",
|
||||
"pgtype/zeronull/int.go",
|
||||
"pgtype/zeronull/int_test.go"
|
||||
]
|
||||
|
||||
desc "Generate code"
|
||||
task generate: generated_code_files
|
446
batch.go
446
batch.go
@ -2,136 +2,64 @@ package pgx
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
"github.com/jackc/pgconn"
|
||||
errors "golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// QueuedQuery is a query that has been queued for execution via a Batch.
|
||||
type QueuedQuery struct {
|
||||
SQL string
|
||||
Arguments []any
|
||||
Fn batchItemFunc
|
||||
sd *pgconn.StatementDescription
|
||||
}
|
||||
|
||||
type batchItemFunc func(br BatchResults) error
|
||||
|
||||
// Query sets fn to be called when the response to qq is received.
|
||||
func (qq *QueuedQuery) Query(fn func(rows Rows) error) {
|
||||
qq.Fn = func(br BatchResults) error {
|
||||
rows, _ := br.Query()
|
||||
defer rows.Close()
|
||||
|
||||
err := fn(rows)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rows.Close()
|
||||
|
||||
return rows.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// Query sets fn to be called when the response to qq is received.
|
||||
func (qq *QueuedQuery) QueryRow(fn func(row Row) error) {
|
||||
qq.Fn = func(br BatchResults) error {
|
||||
row := br.QueryRow()
|
||||
return fn(row)
|
||||
}
|
||||
}
|
||||
|
||||
// Exec sets fn to be called when the response to qq is received.
|
||||
//
|
||||
// Note: for simple batch insert uses where it is not required to handle
|
||||
// each potential error individually, it's sufficient to not set any callbacks,
|
||||
// and just handle the return value of BatchResults.Close.
|
||||
func (qq *QueuedQuery) Exec(fn func(ct pgconn.CommandTag) error) {
|
||||
qq.Fn = func(br BatchResults) error {
|
||||
ct, err := br.Exec()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return fn(ct)
|
||||
}
|
||||
type batchItem struct {
|
||||
query string
|
||||
arguments []interface{}
|
||||
}
|
||||
|
||||
// Batch queries are a way of bundling multiple queries together to avoid
|
||||
// unnecessary network round trips. A Batch must only be sent once.
|
||||
// unnecessary network round trips.
|
||||
type Batch struct {
|
||||
QueuedQueries []*QueuedQuery
|
||||
items []*batchItem
|
||||
}
|
||||
|
||||
// Queue queues a query to batch b. query can be an SQL query or the name of a prepared statement. The only pgx option
|
||||
// argument that is supported is QueryRewriter. Queries are executed using the connection's DefaultQueryExecMode.
|
||||
//
|
||||
// While query can contain multiple statements if the connection's DefaultQueryExecMode is QueryModeSimple, this should
|
||||
// be avoided. QueuedQuery.Fn must not be set as it will only be called for the first query. That is, QueuedQuery.Query,
|
||||
// QueuedQuery.QueryRow, and QueuedQuery.Exec must not be called. In addition, any error messages or tracing that
|
||||
// include the current query may reference the wrong query.
|
||||
func (b *Batch) Queue(query string, arguments ...any) *QueuedQuery {
|
||||
qq := &QueuedQuery{
|
||||
SQL: query,
|
||||
Arguments: arguments,
|
||||
}
|
||||
b.QueuedQueries = append(b.QueuedQueries, qq)
|
||||
return qq
|
||||
// Queue queues a query to batch b. query can be an SQL query or the name of a prepared statement.
|
||||
func (b *Batch) Queue(query string, arguments ...interface{}) {
|
||||
b.items = append(b.items, &batchItem{
|
||||
query: query,
|
||||
arguments: arguments,
|
||||
})
|
||||
}
|
||||
|
||||
// Len returns number of queries that have been queued so far.
|
||||
func (b *Batch) Len() int {
|
||||
return len(b.QueuedQueries)
|
||||
return len(b.items)
|
||||
}
|
||||
|
||||
type BatchResults interface {
|
||||
// Exec reads the results from the next query in the batch as if the query has been sent with Conn.Exec. Prefer
|
||||
// calling Exec on the QueuedQuery, or just calling Close.
|
||||
// Exec reads the results from the next query in the batch as if the query has been sent with Conn.Exec.
|
||||
Exec() (pgconn.CommandTag, error)
|
||||
|
||||
// Query reads the results from the next query in the batch as if the query has been sent with Conn.Query. Prefer
|
||||
// calling Query on the QueuedQuery.
|
||||
// Query reads the results from the next query in the batch as if the query has been sent with Conn.Query.
|
||||
Query() (Rows, error)
|
||||
|
||||
// QueryRow reads the results from the next query in the batch as if the query has been sent with Conn.QueryRow.
|
||||
// Prefer calling QueryRow on the QueuedQuery.
|
||||
QueryRow() Row
|
||||
|
||||
// Close closes the batch operation. All unread results are read and any callback functions registered with
|
||||
// QueuedQuery.Query, QueuedQuery.QueryRow, or QueuedQuery.Exec will be called. If a callback function returns an
|
||||
// error or the batch encounters an error subsequent callback functions will not be called.
|
||||
//
|
||||
// For simple batch inserts inside a transaction or similar queries, it's sufficient to not set any callbacks,
|
||||
// and just handle the return value of Close.
|
||||
//
|
||||
// Close must be called before the underlying connection can be used again. Any error that occurred during a batch
|
||||
// operation may have made it impossible to resyncronize the connection with the server. In this case the underlying
|
||||
// connection will have been closed.
|
||||
//
|
||||
// Close is safe to call multiple times. If it returns an error subsequent calls will return the same error. Callback
|
||||
// functions will not be rerun.
|
||||
// Close closes the batch operation. This must be called before the underlying connection can be used again. Any error
|
||||
// that occurred during a batch operation may have made it impossible to resyncronize the connection with the server.
|
||||
// In this case the underlying connection will have been closed.
|
||||
Close() error
|
||||
}
|
||||
|
||||
type batchResults struct {
|
||||
ctx context.Context
|
||||
conn *Conn
|
||||
mrr *pgconn.MultiResultReader
|
||||
err error
|
||||
b *Batch
|
||||
qqIdx int
|
||||
closed bool
|
||||
endTraced bool
|
||||
ctx context.Context
|
||||
conn *Conn
|
||||
mrr *pgconn.MultiResultReader
|
||||
err error
|
||||
b *Batch
|
||||
ix int
|
||||
}
|
||||
|
||||
// Exec reads the results from the next query in the batch as if the query has been sent with Exec.
|
||||
func (br *batchResults) Exec() (pgconn.CommandTag, error) {
|
||||
if br.err != nil {
|
||||
return pgconn.CommandTag{}, br.err
|
||||
}
|
||||
if br.closed {
|
||||
return pgconn.CommandTag{}, fmt.Errorf("batch already closed")
|
||||
return nil, br.err
|
||||
}
|
||||
|
||||
query, arguments, _ := br.nextQueryAndArgs()
|
||||
@ -139,34 +67,37 @@ func (br *batchResults) Exec() (pgconn.CommandTag, error) {
|
||||
if !br.mrr.NextResult() {
|
||||
err := br.mrr.Close()
|
||||
if err == nil {
|
||||
err = errors.New("no more results in batch")
|
||||
err = errors.New("no result")
|
||||
}
|
||||
if br.conn.batchTracer != nil {
|
||||
br.conn.batchTracer.TraceBatchQuery(br.ctx, br.conn, TraceBatchQueryData{
|
||||
SQL: query,
|
||||
Args: arguments,
|
||||
Err: err,
|
||||
if br.conn.shouldLog(LogLevelError) {
|
||||
br.conn.log(br.ctx, LogLevelError, "BatchResult.Exec", map[string]interface{} {
|
||||
"sql": query,
|
||||
"args": logQueryArgs(arguments),
|
||||
"err": err,
|
||||
})
|
||||
}
|
||||
return pgconn.CommandTag{}, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
commandTag, err := br.mrr.ResultReader().Close()
|
||||
if err != nil {
|
||||
br.err = err
|
||||
br.mrr.Close()
|
||||
}
|
||||
|
||||
if br.conn.batchTracer != nil {
|
||||
br.conn.batchTracer.TraceBatchQuery(br.ctx, br.conn, TraceBatchQueryData{
|
||||
SQL: query,
|
||||
Args: arguments,
|
||||
CommandTag: commandTag,
|
||||
Err: br.err,
|
||||
if err != nil {
|
||||
if br.conn.shouldLog(LogLevelError) {
|
||||
br.conn.log(br.ctx, LogLevelError, "BatchResult.Exec", map[string]interface{}{
|
||||
"sql": query,
|
||||
"args": logQueryArgs(arguments),
|
||||
"err": err,
|
||||
})
|
||||
}
|
||||
} else if br.conn.shouldLog(LogLevelInfo) {
|
||||
br.conn.log(br.ctx, LogLevelInfo, "BatchResult.Exec", map[string]interface{} {
|
||||
"sql": query,
|
||||
"args": logQueryArgs(arguments),
|
||||
"commandTag": commandTag,
|
||||
})
|
||||
}
|
||||
|
||||
return commandTag, br.err
|
||||
return commandTag, err
|
||||
}
|
||||
|
||||
// Query reads the results from the next query in the batch as if the query has been sent with Query.
|
||||
@ -176,30 +107,26 @@ func (br *batchResults) Query() (Rows, error) {
|
||||
query = "batch query"
|
||||
}
|
||||
|
||||
if br.err != nil {
|
||||
return &baseRows{err: br.err, closed: true}, br.err
|
||||
}
|
||||
|
||||
if br.closed {
|
||||
alreadyClosedErr := fmt.Errorf("batch already closed")
|
||||
return &baseRows{err: alreadyClosedErr, closed: true}, alreadyClosedErr
|
||||
}
|
||||
|
||||
rows := br.conn.getRows(br.ctx, query, arguments)
|
||||
rows.batchTracer = br.conn.batchTracer
|
||||
|
||||
if br.err != nil {
|
||||
rows.err = br.err
|
||||
rows.closed = true
|
||||
return rows, br.err
|
||||
}
|
||||
|
||||
if !br.mrr.NextResult() {
|
||||
rows.err = br.mrr.Close()
|
||||
if rows.err == nil {
|
||||
rows.err = errors.New("no more results in batch")
|
||||
rows.err = errors.New("no result")
|
||||
}
|
||||
rows.closed = true
|
||||
|
||||
if br.conn.batchTracer != nil {
|
||||
br.conn.batchTracer.TraceBatchQuery(br.ctx, br.conn, TraceBatchQueryData{
|
||||
SQL: query,
|
||||
Args: arguments,
|
||||
Err: rows.err,
|
||||
if br.conn.shouldLog(LogLevelError) {
|
||||
br.conn.log(br.ctx, LogLevelError, "BatchResult.Query", map[string]interface{} {
|
||||
"sql": query,
|
||||
"args": logQueryArgs(arguments),
|
||||
"err": rows.err,
|
||||
})
|
||||
}
|
||||
|
||||
@ -213,257 +140,42 @@ func (br *batchResults) Query() (Rows, error) {
|
||||
// QueryRow reads the results from the next query in the batch as if the query has been sent with QueryRow.
|
||||
func (br *batchResults) QueryRow() Row {
|
||||
rows, _ := br.Query()
|
||||
return (*connRow)(rows.(*baseRows))
|
||||
return (*connRow)(rows.(*connRows))
|
||||
|
||||
}
|
||||
|
||||
// Close closes the batch operation. Any error that occurred during a batch operation may have made it impossible to
|
||||
// resyncronize the connection with the server. In this case the underlying connection will have been closed.
|
||||
func (br *batchResults) Close() error {
|
||||
defer func() {
|
||||
if !br.endTraced {
|
||||
if br.conn != nil && br.conn.batchTracer != nil {
|
||||
br.conn.batchTracer.TraceBatchEnd(br.ctx, br.conn, TraceBatchEndData{Err: br.err})
|
||||
}
|
||||
br.endTraced = true
|
||||
}
|
||||
|
||||
invalidateCachesOnBatchResultsError(br.conn, br.b, br.err)
|
||||
}()
|
||||
|
||||
if br.err != nil {
|
||||
return br.err
|
||||
}
|
||||
|
||||
if br.closed {
|
||||
return nil
|
||||
}
|
||||
// log any queries that haven't yet been logged by Exec or Query
|
||||
for {
|
||||
query, args, ok := br.nextQueryAndArgs()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
|
||||
// Read and run fn for all remaining items
|
||||
for br.err == nil && !br.closed && br.b != nil && br.qqIdx < len(br.b.QueuedQueries) {
|
||||
if br.b.QueuedQueries[br.qqIdx].Fn != nil {
|
||||
err := br.b.QueuedQueries[br.qqIdx].Fn(br)
|
||||
if err != nil {
|
||||
br.err = err
|
||||
}
|
||||
} else {
|
||||
br.Exec()
|
||||
if br.conn.shouldLog(LogLevelInfo) {
|
||||
br.conn.log(br.ctx, LogLevelInfo, "BatchResult.Close", map[string]interface{} {
|
||||
"sql": query,
|
||||
"args": logQueryArgs(args),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
br.closed = true
|
||||
|
||||
err := br.mrr.Close()
|
||||
if br.err == nil {
|
||||
br.err = err
|
||||
}
|
||||
|
||||
return br.err
|
||||
return br.mrr.Close()
|
||||
}
|
||||
|
||||
func (br *batchResults) earlyError() error {
|
||||
return br.err
|
||||
}
|
||||
|
||||
func (br *batchResults) nextQueryAndArgs() (query string, args []any, ok bool) {
|
||||
if br.b != nil && br.qqIdx < len(br.b.QueuedQueries) {
|
||||
bi := br.b.QueuedQueries[br.qqIdx]
|
||||
query = bi.SQL
|
||||
args = bi.Arguments
|
||||
func (br *batchResults) nextQueryAndArgs() (query string, args []interface{}, ok bool) {
|
||||
if br.b != nil && br.ix < len(br.b.items) {
|
||||
bi := br.b.items[br.ix]
|
||||
query = bi.query
|
||||
args = bi.arguments
|
||||
ok = true
|
||||
br.qqIdx++
|
||||
br.ix++
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type pipelineBatchResults struct {
|
||||
ctx context.Context
|
||||
conn *Conn
|
||||
pipeline *pgconn.Pipeline
|
||||
lastRows *baseRows
|
||||
err error
|
||||
b *Batch
|
||||
qqIdx int
|
||||
closed bool
|
||||
endTraced bool
|
||||
}
|
||||
|
||||
// Exec reads the results from the next query in the batch as if the query has been sent with Exec.
|
||||
func (br *pipelineBatchResults) Exec() (pgconn.CommandTag, error) {
|
||||
if br.err != nil {
|
||||
return pgconn.CommandTag{}, br.err
|
||||
}
|
||||
if br.closed {
|
||||
return pgconn.CommandTag{}, fmt.Errorf("batch already closed")
|
||||
}
|
||||
if br.lastRows != nil && br.lastRows.err != nil {
|
||||
return pgconn.CommandTag{}, br.err
|
||||
}
|
||||
|
||||
query, arguments, err := br.nextQueryAndArgs()
|
||||
if err != nil {
|
||||
return pgconn.CommandTag{}, err
|
||||
}
|
||||
|
||||
results, err := br.pipeline.GetResults()
|
||||
if err != nil {
|
||||
br.err = err
|
||||
return pgconn.CommandTag{}, br.err
|
||||
}
|
||||
var commandTag pgconn.CommandTag
|
||||
switch results := results.(type) {
|
||||
case *pgconn.ResultReader:
|
||||
commandTag, br.err = results.Close()
|
||||
default:
|
||||
return pgconn.CommandTag{}, fmt.Errorf("unexpected pipeline result: %T", results)
|
||||
}
|
||||
|
||||
if br.conn.batchTracer != nil {
|
||||
br.conn.batchTracer.TraceBatchQuery(br.ctx, br.conn, TraceBatchQueryData{
|
||||
SQL: query,
|
||||
Args: arguments,
|
||||
CommandTag: commandTag,
|
||||
Err: br.err,
|
||||
})
|
||||
}
|
||||
|
||||
return commandTag, br.err
|
||||
}
|
||||
|
||||
// Query reads the results from the next query in the batch as if the query has been sent with Query.
|
||||
func (br *pipelineBatchResults) Query() (Rows, error) {
|
||||
if br.err != nil {
|
||||
return &baseRows{err: br.err, closed: true}, br.err
|
||||
}
|
||||
|
||||
if br.closed {
|
||||
alreadyClosedErr := fmt.Errorf("batch already closed")
|
||||
return &baseRows{err: alreadyClosedErr, closed: true}, alreadyClosedErr
|
||||
}
|
||||
|
||||
if br.lastRows != nil && br.lastRows.err != nil {
|
||||
br.err = br.lastRows.err
|
||||
return &baseRows{err: br.err, closed: true}, br.err
|
||||
}
|
||||
|
||||
query, arguments, err := br.nextQueryAndArgs()
|
||||
if err != nil {
|
||||
return &baseRows{err: err, closed: true}, err
|
||||
}
|
||||
|
||||
rows := br.conn.getRows(br.ctx, query, arguments)
|
||||
rows.batchTracer = br.conn.batchTracer
|
||||
br.lastRows = rows
|
||||
|
||||
results, err := br.pipeline.GetResults()
|
||||
if err != nil {
|
||||
br.err = err
|
||||
rows.err = err
|
||||
rows.closed = true
|
||||
|
||||
if br.conn.batchTracer != nil {
|
||||
br.conn.batchTracer.TraceBatchQuery(br.ctx, br.conn, TraceBatchQueryData{
|
||||
SQL: query,
|
||||
Args: arguments,
|
||||
Err: err,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
switch results := results.(type) {
|
||||
case *pgconn.ResultReader:
|
||||
rows.resultReader = results
|
||||
default:
|
||||
err = fmt.Errorf("unexpected pipeline result: %T", results)
|
||||
br.err = err
|
||||
rows.err = err
|
||||
rows.closed = true
|
||||
}
|
||||
}
|
||||
|
||||
return rows, rows.err
|
||||
}
|
||||
|
||||
// QueryRow reads the results from the next query in the batch as if the query has been sent with QueryRow.
|
||||
func (br *pipelineBatchResults) QueryRow() Row {
|
||||
rows, _ := br.Query()
|
||||
return (*connRow)(rows.(*baseRows))
|
||||
}
|
||||
|
||||
// Close closes the batch operation. Any error that occurred during a batch operation may have made it impossible to
|
||||
// resyncronize the connection with the server. In this case the underlying connection will have been closed.
|
||||
func (br *pipelineBatchResults) Close() error {
|
||||
defer func() {
|
||||
if !br.endTraced {
|
||||
if br.conn.batchTracer != nil {
|
||||
br.conn.batchTracer.TraceBatchEnd(br.ctx, br.conn, TraceBatchEndData{Err: br.err})
|
||||
}
|
||||
br.endTraced = true
|
||||
}
|
||||
|
||||
invalidateCachesOnBatchResultsError(br.conn, br.b, br.err)
|
||||
}()
|
||||
|
||||
if br.err == nil && br.lastRows != nil && br.lastRows.err != nil {
|
||||
br.err = br.lastRows.err
|
||||
return br.err
|
||||
}
|
||||
|
||||
if br.closed {
|
||||
return br.err
|
||||
}
|
||||
|
||||
// Read and run fn for all remaining items
|
||||
for br.err == nil && !br.closed && br.b != nil && br.qqIdx < len(br.b.QueuedQueries) {
|
||||
if br.b.QueuedQueries[br.qqIdx].Fn != nil {
|
||||
err := br.b.QueuedQueries[br.qqIdx].Fn(br)
|
||||
if err != nil {
|
||||
br.err = err
|
||||
}
|
||||
} else {
|
||||
br.Exec()
|
||||
}
|
||||
}
|
||||
|
||||
br.closed = true
|
||||
|
||||
err := br.pipeline.Close()
|
||||
if br.err == nil {
|
||||
br.err = err
|
||||
}
|
||||
|
||||
return br.err
|
||||
}
|
||||
|
||||
func (br *pipelineBatchResults) earlyError() error {
|
||||
return br.err
|
||||
}
|
||||
|
||||
func (br *pipelineBatchResults) nextQueryAndArgs() (query string, args []any, err error) {
|
||||
if br.b == nil {
|
||||
return "", nil, errors.New("no reference to batch")
|
||||
}
|
||||
|
||||
if br.qqIdx >= len(br.b.QueuedQueries) {
|
||||
return "", nil, errors.New("no more results in batch")
|
||||
}
|
||||
|
||||
bi := br.b.QueuedQueries[br.qqIdx]
|
||||
br.qqIdx++
|
||||
return bi.SQL, bi.Arguments, nil
|
||||
}
|
||||
|
||||
// invalidates statement and description caches on batch results error
|
||||
func invalidateCachesOnBatchResultsError(conn *Conn, b *Batch, err error) {
|
||||
if err != nil && conn != nil && b != nil {
|
||||
if sc := conn.statementCache; sc != nil {
|
||||
for _, bi := range b.QueuedQueries {
|
||||
sc.Invalidate(bi.SQL)
|
||||
}
|
||||
}
|
||||
|
||||
if sc := conn.descriptionCache; sc != nil {
|
||||
for _, bi := range b.QueuedQueries {
|
||||
sc.Invalidate(bi.SQL)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
1287
batch_test.go
1287
batch_test.go
File diff suppressed because it is too large
Load Diff
480
bench_test.go
480
bench_test.go
@ -12,31 +12,16 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
"github.com/jackc/pgx/v5/pgtype"
|
||||
"github.com/jackc/pgconn"
|
||||
"github.com/jackc/pgconn/stmtcache"
|
||||
"github.com/jackc/pgtype"
|
||||
"github.com/jackc/pgx/v4"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func BenchmarkConnectClose(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
conn, err := pgx.Connect(context.Background(), os.Getenv("PGX_TEST_DATABASE"))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
err = conn.Close(context.Background())
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMinimalUnpreparedSelectWithoutStatementCache(b *testing.B) {
|
||||
config := mustParseConfig(b, os.Getenv("PGX_TEST_DATABASE"))
|
||||
config.DefaultQueryExecMode = pgx.QueryExecModeDescribeExec
|
||||
config.StatementCacheCapacity = 0
|
||||
config.DescriptionCacheCapacity = 0
|
||||
config.BuildStatementCache = nil
|
||||
|
||||
conn := mustConnect(b, config)
|
||||
defer closeConn(b, conn)
|
||||
@ -58,9 +43,9 @@ func BenchmarkMinimalUnpreparedSelectWithoutStatementCache(b *testing.B) {
|
||||
|
||||
func BenchmarkMinimalUnpreparedSelectWithStatementCacheModeDescribe(b *testing.B) {
|
||||
config := mustParseConfig(b, os.Getenv("PGX_TEST_DATABASE"))
|
||||
config.DefaultQueryExecMode = pgx.QueryExecModeCacheDescribe
|
||||
config.StatementCacheCapacity = 0
|
||||
config.DescriptionCacheCapacity = 32
|
||||
config.BuildStatementCache = func(conn *pgconn.PgConn) stmtcache.Cache {
|
||||
return stmtcache.New(conn, stmtcache.ModeDescribe, 32)
|
||||
}
|
||||
|
||||
conn := mustConnect(b, config)
|
||||
defer closeConn(b, conn)
|
||||
@ -82,9 +67,9 @@ func BenchmarkMinimalUnpreparedSelectWithStatementCacheModeDescribe(b *testing.B
|
||||
|
||||
func BenchmarkMinimalUnpreparedSelectWithStatementCacheModePrepare(b *testing.B) {
|
||||
config := mustParseConfig(b, os.Getenv("PGX_TEST_DATABASE"))
|
||||
config.DefaultQueryExecMode = pgx.QueryExecModeCacheStatement
|
||||
config.StatementCacheCapacity = 32
|
||||
config.DescriptionCacheCapacity = 0
|
||||
config.BuildStatementCache = func(conn *pgconn.PgConn) stmtcache.Cache {
|
||||
return stmtcache.New(conn, stmtcache.ModePrepare, 32)
|
||||
}
|
||||
|
||||
conn := mustConnect(b, config)
|
||||
defer closeConn(b, conn)
|
||||
@ -151,7 +136,7 @@ func BenchmarkMinimalPgConnPreparedSelect(b *testing.B) {
|
||||
|
||||
for rr.NextRow() {
|
||||
for i := range rr.Values() {
|
||||
if !bytes.Equal(rr.Values()[0], encodedBytes) {
|
||||
if bytes.Compare(rr.Values()[0], encodedBytes) != 0 {
|
||||
b.Fatalf("unexpected values: %s %s", rr.Values()[i], encodedBytes)
|
||||
}
|
||||
}
|
||||
@ -283,6 +268,123 @@ func BenchmarkPointerPointerWithPresentValues(b *testing.B) {
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSelectWithoutLogging(b *testing.B) {
|
||||
conn := mustConnect(b, mustParseConfig(b, os.Getenv("PGX_TEST_DATABASE")))
|
||||
defer closeConn(b, conn)
|
||||
|
||||
benchmarkSelectWithLog(b, conn)
|
||||
}
|
||||
|
||||
type discardLogger struct{}
|
||||
|
||||
func (dl discardLogger) Log(ctx context.Context, level pgx.LogLevel, msg string, data map[string]interface{}) {
|
||||
}
|
||||
|
||||
func BenchmarkSelectWithLoggingTraceDiscard(b *testing.B) {
|
||||
var logger discardLogger
|
||||
config := mustParseConfig(b, os.Getenv("PGX_TEST_DATABASE"))
|
||||
config.Logger = logger
|
||||
config.LogLevel = pgx.LogLevelTrace
|
||||
|
||||
conn := mustConnect(b, config)
|
||||
defer closeConn(b, conn)
|
||||
|
||||
benchmarkSelectWithLog(b, conn)
|
||||
}
|
||||
|
||||
func BenchmarkSelectWithLoggingDebugWithDiscard(b *testing.B) {
|
||||
var logger discardLogger
|
||||
config := mustParseConfig(b, os.Getenv("PGX_TEST_DATABASE"))
|
||||
config.Logger = logger
|
||||
config.LogLevel = pgx.LogLevelDebug
|
||||
|
||||
conn := mustConnect(b, config)
|
||||
defer closeConn(b, conn)
|
||||
|
||||
benchmarkSelectWithLog(b, conn)
|
||||
}
|
||||
|
||||
func BenchmarkSelectWithLoggingInfoWithDiscard(b *testing.B) {
|
||||
var logger discardLogger
|
||||
config := mustParseConfig(b, os.Getenv("PGX_TEST_DATABASE"))
|
||||
config.Logger = logger
|
||||
config.LogLevel = pgx.LogLevelInfo
|
||||
|
||||
conn := mustConnect(b, config)
|
||||
defer closeConn(b, conn)
|
||||
|
||||
benchmarkSelectWithLog(b, conn)
|
||||
}
|
||||
|
||||
func BenchmarkSelectWithLoggingErrorWithDiscard(b *testing.B) {
|
||||
var logger discardLogger
|
||||
config := mustParseConfig(b, os.Getenv("PGX_TEST_DATABASE"))
|
||||
config.Logger = logger
|
||||
config.LogLevel = pgx.LogLevelError
|
||||
|
||||
conn := mustConnect(b, config)
|
||||
defer closeConn(b, conn)
|
||||
|
||||
benchmarkSelectWithLog(b, conn)
|
||||
}
|
||||
|
||||
func benchmarkSelectWithLog(b *testing.B, conn *pgx.Conn) {
|
||||
_, err := conn.Prepare(context.Background(), "test", "select 1::int4, 'johnsmith', 'johnsmith@example.com', 'John Smith', 'male', '1970-01-01'::date, '2015-01-01 00:00:00'::timestamptz")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
var record struct {
|
||||
id int32
|
||||
userName string
|
||||
email string
|
||||
name string
|
||||
sex string
|
||||
birthDate time.Time
|
||||
lastLoginTime time.Time
|
||||
}
|
||||
|
||||
err = conn.QueryRow(context.Background(), "test").Scan(
|
||||
&record.id,
|
||||
&record.userName,
|
||||
&record.email,
|
||||
&record.name,
|
||||
&record.sex,
|
||||
&record.birthDate,
|
||||
&record.lastLoginTime,
|
||||
)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// These checks both ensure that the correct data was returned
|
||||
// and provide a benchmark of accessing the returned values.
|
||||
if record.id != 1 {
|
||||
b.Fatalf("bad value for id: %v", record.id)
|
||||
}
|
||||
if record.userName != "johnsmith" {
|
||||
b.Fatalf("bad value for userName: %v", record.userName)
|
||||
}
|
||||
if record.email != "johnsmith@example.com" {
|
||||
b.Fatalf("bad value for email: %v", record.email)
|
||||
}
|
||||
if record.name != "John Smith" {
|
||||
b.Fatalf("bad value for name: %v", record.name)
|
||||
}
|
||||
if record.sex != "male" {
|
||||
b.Fatalf("bad value for sex: %v", record.sex)
|
||||
}
|
||||
if record.birthDate != time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC) {
|
||||
b.Fatalf("bad value for birthDate: %v", record.birthDate)
|
||||
}
|
||||
if record.lastLoginTime != time.Date(2015, 1, 1, 0, 0, 0, 0, time.Local) {
|
||||
b.Fatalf("bad value for lastLoginTime: %v", record.lastLoginTime)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const benchmarkWriteTableCreateSQL = `drop table if exists t;
|
||||
|
||||
create table t(
|
||||
@ -335,16 +437,15 @@ const benchmarkWriteTableInsertSQL = `insert into t(
|
||||
type benchmarkWriteTableCopyFromSrc struct {
|
||||
count int
|
||||
idx int
|
||||
row []any
|
||||
row []interface{}
|
||||
}
|
||||
|
||||
func (s *benchmarkWriteTableCopyFromSrc) Next() bool {
|
||||
next := s.idx < s.count
|
||||
s.idx++
|
||||
return next
|
||||
return s.idx < s.count
|
||||
}
|
||||
|
||||
func (s *benchmarkWriteTableCopyFromSrc) Values() ([]any, error) {
|
||||
func (s *benchmarkWriteTableCopyFromSrc) Values() ([]interface{}, error) {
|
||||
return s.row, nil
|
||||
}
|
||||
|
||||
@ -355,15 +456,15 @@ func (s *benchmarkWriteTableCopyFromSrc) Err() error {
|
||||
func newBenchmarkWriteTableCopyFromSrc(count int) pgx.CopyFromSource {
|
||||
return &benchmarkWriteTableCopyFromSrc{
|
||||
count: count,
|
||||
row: []any{
|
||||
row: []interface{}{
|
||||
"varchar_1",
|
||||
"varchar_2",
|
||||
&pgtype.Text{},
|
||||
&pgtype.Text{Status: pgtype.Null},
|
||||
time.Date(2000, 1, 1, 0, 0, 0, 0, time.Local),
|
||||
&pgtype.Date{},
|
||||
&pgtype.Date{Status: pgtype.Null},
|
||||
1,
|
||||
2,
|
||||
&pgtype.Int4{},
|
||||
&pgtype.Int4{Status: pgtype.Null},
|
||||
time.Date(2001, 1, 1, 0, 0, 0, 0, time.Local),
|
||||
time.Date(2002, 1, 1, 0, 0, 0, 0, time.Local),
|
||||
true,
|
||||
@ -407,37 +508,9 @@ func benchmarkWriteNRowsViaInsert(b *testing.B, n int) {
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkWriteNRowsViaBatchInsert(b *testing.B, n int) {
|
||||
conn := mustConnect(b, mustParseConfig(b, os.Getenv("PGX_TEST_DATABASE")))
|
||||
defer closeConn(b, conn)
|
||||
type queryArgs []interface{}
|
||||
|
||||
mustExec(b, conn, benchmarkWriteTableCreateSQL)
|
||||
_, err := conn.Prepare(context.Background(), "insert_t", benchmarkWriteTableInsertSQL)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
src := newBenchmarkWriteTableCopyFromSrc(n)
|
||||
|
||||
batch := &pgx.Batch{}
|
||||
for src.Next() {
|
||||
values, _ := src.Values()
|
||||
batch.Queue("insert_t", values...)
|
||||
}
|
||||
|
||||
err = conn.SendBatch(context.Background(), batch).Close()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type queryArgs []any
|
||||
|
||||
func (qa *queryArgs) Append(v any) string {
|
||||
func (qa *queryArgs) Append(v interface{}) string {
|
||||
*qa = append(*qa, v)
|
||||
return "$" + strconv.Itoa(len(*qa))
|
||||
}
|
||||
@ -512,10 +585,11 @@ func multiInsert(conn *pgx.Conn, tableName string, columnNames []string, rowSrc
|
||||
}
|
||||
|
||||
if err := tx.Commit(context.Background()); err != nil {
|
||||
return 0, err
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
return rowCount, nil
|
||||
|
||||
}
|
||||
|
||||
func benchmarkWriteNRowsViaMultiInsert(b *testing.B, n int) {
|
||||
@ -534,8 +608,7 @@ func benchmarkWriteNRowsViaMultiInsert(b *testing.B, n int) {
|
||||
src := newBenchmarkWriteTableCopyFromSrc(n)
|
||||
|
||||
_, err := multiInsert(conn, "t",
|
||||
[]string{
|
||||
"varchar_1",
|
||||
[]string{"varchar_1",
|
||||
"varchar_2",
|
||||
"varchar_null_1",
|
||||
"date_1",
|
||||
@ -547,8 +620,7 @@ func benchmarkWriteNRowsViaMultiInsert(b *testing.B, n int) {
|
||||
"tstz_2",
|
||||
"bool_1",
|
||||
"bool_2",
|
||||
"bool_3",
|
||||
},
|
||||
"bool_3"},
|
||||
src)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
@ -569,8 +641,7 @@ func benchmarkWriteNRowsViaCopy(b *testing.B, n int) {
|
||||
|
||||
_, err := conn.CopyFrom(context.Background(),
|
||||
pgx.Identifier{"t"},
|
||||
[]string{
|
||||
"varchar_1",
|
||||
[]string{"varchar_1",
|
||||
"varchar_2",
|
||||
"varchar_null_1",
|
||||
"date_1",
|
||||
@ -582,8 +653,7 @@ func benchmarkWriteNRowsViaCopy(b *testing.B, n int) {
|
||||
"tstz_2",
|
||||
"bool_1",
|
||||
"bool_2",
|
||||
"bool_3",
|
||||
},
|
||||
"bool_3"},
|
||||
src)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
@ -591,22 +661,6 @@ func benchmarkWriteNRowsViaCopy(b *testing.B, n int) {
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkWrite2RowsViaInsert(b *testing.B) {
|
||||
benchmarkWriteNRowsViaInsert(b, 2)
|
||||
}
|
||||
|
||||
func BenchmarkWrite2RowsViaMultiInsert(b *testing.B) {
|
||||
benchmarkWriteNRowsViaMultiInsert(b, 2)
|
||||
}
|
||||
|
||||
func BenchmarkWrite2RowsViaBatchInsert(b *testing.B) {
|
||||
benchmarkWriteNRowsViaBatchInsert(b, 2)
|
||||
}
|
||||
|
||||
func BenchmarkWrite2RowsViaCopy(b *testing.B) {
|
||||
benchmarkWriteNRowsViaCopy(b, 2)
|
||||
}
|
||||
|
||||
func BenchmarkWrite5RowsViaInsert(b *testing.B) {
|
||||
benchmarkWriteNRowsViaInsert(b, 5)
|
||||
}
|
||||
@ -615,10 +669,6 @@ func BenchmarkWrite5RowsViaMultiInsert(b *testing.B) {
|
||||
benchmarkWriteNRowsViaMultiInsert(b, 5)
|
||||
}
|
||||
|
||||
func BenchmarkWrite5RowsViaBatchInsert(b *testing.B) {
|
||||
benchmarkWriteNRowsViaBatchInsert(b, 5)
|
||||
}
|
||||
|
||||
func BenchmarkWrite5RowsViaCopy(b *testing.B) {
|
||||
benchmarkWriteNRowsViaCopy(b, 5)
|
||||
}
|
||||
@ -631,10 +681,6 @@ func BenchmarkWrite10RowsViaMultiInsert(b *testing.B) {
|
||||
benchmarkWriteNRowsViaMultiInsert(b, 10)
|
||||
}
|
||||
|
||||
func BenchmarkWrite10RowsViaBatchInsert(b *testing.B) {
|
||||
benchmarkWriteNRowsViaBatchInsert(b, 10)
|
||||
}
|
||||
|
||||
func BenchmarkWrite10RowsViaCopy(b *testing.B) {
|
||||
benchmarkWriteNRowsViaCopy(b, 10)
|
||||
}
|
||||
@ -647,10 +693,6 @@ func BenchmarkWrite100RowsViaMultiInsert(b *testing.B) {
|
||||
benchmarkWriteNRowsViaMultiInsert(b, 100)
|
||||
}
|
||||
|
||||
func BenchmarkWrite100RowsViaBatchInsert(b *testing.B) {
|
||||
benchmarkWriteNRowsViaBatchInsert(b, 100)
|
||||
}
|
||||
|
||||
func BenchmarkWrite100RowsViaCopy(b *testing.B) {
|
||||
benchmarkWriteNRowsViaCopy(b, 100)
|
||||
}
|
||||
@ -663,10 +705,6 @@ func BenchmarkWrite1000RowsViaMultiInsert(b *testing.B) {
|
||||
benchmarkWriteNRowsViaMultiInsert(b, 1000)
|
||||
}
|
||||
|
||||
func BenchmarkWrite1000RowsViaBatchInsert(b *testing.B) {
|
||||
benchmarkWriteNRowsViaBatchInsert(b, 1000)
|
||||
}
|
||||
|
||||
func BenchmarkWrite1000RowsViaCopy(b *testing.B) {
|
||||
benchmarkWriteNRowsViaCopy(b, 1000)
|
||||
}
|
||||
@ -679,19 +717,13 @@ func BenchmarkWrite10000RowsViaMultiInsert(b *testing.B) {
|
||||
benchmarkWriteNRowsViaMultiInsert(b, 10000)
|
||||
}
|
||||
|
||||
func BenchmarkWrite10000RowsViaBatchInsert(b *testing.B) {
|
||||
benchmarkWriteNRowsViaBatchInsert(b, 10000)
|
||||
}
|
||||
|
||||
func BenchmarkWrite10000RowsViaCopy(b *testing.B) {
|
||||
benchmarkWriteNRowsViaCopy(b, 10000)
|
||||
}
|
||||
|
||||
func BenchmarkMultipleQueriesNonBatchNoStatementCache(b *testing.B) {
|
||||
config := mustParseConfig(b, os.Getenv("PGX_TEST_DATABASE"))
|
||||
config.DefaultQueryExecMode = pgx.QueryExecModeDescribeExec
|
||||
config.StatementCacheCapacity = 0
|
||||
config.DescriptionCacheCapacity = 0
|
||||
config.BuildStatementCache = nil
|
||||
|
||||
conn := mustConnect(b, config)
|
||||
defer closeConn(b, conn)
|
||||
@ -701,9 +733,9 @@ func BenchmarkMultipleQueriesNonBatchNoStatementCache(b *testing.B) {
|
||||
|
||||
func BenchmarkMultipleQueriesNonBatchPrepareStatementCache(b *testing.B) {
|
||||
config := mustParseConfig(b, os.Getenv("PGX_TEST_DATABASE"))
|
||||
config.DefaultQueryExecMode = pgx.QueryExecModeCacheStatement
|
||||
config.StatementCacheCapacity = 32
|
||||
config.DescriptionCacheCapacity = 0
|
||||
config.BuildStatementCache = func(conn *pgconn.PgConn) stmtcache.Cache {
|
||||
return stmtcache.New(conn, stmtcache.ModePrepare, 32)
|
||||
}
|
||||
|
||||
conn := mustConnect(b, config)
|
||||
defer closeConn(b, conn)
|
||||
@ -713,9 +745,9 @@ func BenchmarkMultipleQueriesNonBatchPrepareStatementCache(b *testing.B) {
|
||||
|
||||
func BenchmarkMultipleQueriesNonBatchDescribeStatementCache(b *testing.B) {
|
||||
config := mustParseConfig(b, os.Getenv("PGX_TEST_DATABASE"))
|
||||
config.DefaultQueryExecMode = pgx.QueryExecModeCacheDescribe
|
||||
config.StatementCacheCapacity = 0
|
||||
config.DescriptionCacheCapacity = 32
|
||||
config.BuildStatementCache = func(conn *pgconn.PgConn) stmtcache.Cache {
|
||||
return stmtcache.New(conn, stmtcache.ModeDescribe, 32)
|
||||
}
|
||||
|
||||
conn := mustConnect(b, config)
|
||||
defer closeConn(b, conn)
|
||||
@ -751,9 +783,7 @@ func benchmarkMultipleQueriesNonBatch(b *testing.B, conn *pgx.Conn, queryCount i
|
||||
|
||||
func BenchmarkMultipleQueriesBatchNoStatementCache(b *testing.B) {
|
||||
config := mustParseConfig(b, os.Getenv("PGX_TEST_DATABASE"))
|
||||
config.DefaultQueryExecMode = pgx.QueryExecModeDescribeExec
|
||||
config.StatementCacheCapacity = 0
|
||||
config.DescriptionCacheCapacity = 0
|
||||
config.BuildStatementCache = nil
|
||||
|
||||
conn := mustConnect(b, config)
|
||||
defer closeConn(b, conn)
|
||||
@ -763,9 +793,9 @@ func BenchmarkMultipleQueriesBatchNoStatementCache(b *testing.B) {
|
||||
|
||||
func BenchmarkMultipleQueriesBatchPrepareStatementCache(b *testing.B) {
|
||||
config := mustParseConfig(b, os.Getenv("PGX_TEST_DATABASE"))
|
||||
config.DefaultQueryExecMode = pgx.QueryExecModeCacheStatement
|
||||
config.StatementCacheCapacity = 32
|
||||
config.DescriptionCacheCapacity = 0
|
||||
config.BuildStatementCache = func(conn *pgconn.PgConn) stmtcache.Cache {
|
||||
return stmtcache.New(conn, stmtcache.ModePrepare, 32)
|
||||
}
|
||||
|
||||
conn := mustConnect(b, config)
|
||||
defer closeConn(b, conn)
|
||||
@ -775,9 +805,9 @@ func BenchmarkMultipleQueriesBatchPrepareStatementCache(b *testing.B) {
|
||||
|
||||
func BenchmarkMultipleQueriesBatchDescribeStatementCache(b *testing.B) {
|
||||
config := mustParseConfig(b, os.Getenv("PGX_TEST_DATABASE"))
|
||||
config.DefaultQueryExecMode = pgx.QueryExecModeCacheDescribe
|
||||
config.StatementCacheCapacity = 0
|
||||
config.DescriptionCacheCapacity = 32
|
||||
config.BuildStatementCache = func(conn *pgconn.PgConn) stmtcache.Cache {
|
||||
return stmtcache.New(conn, stmtcache.ModeDescribe, 32)
|
||||
}
|
||||
|
||||
conn := mustConnect(b, config)
|
||||
defer closeConn(b, conn)
|
||||
@ -888,7 +918,8 @@ func BenchmarkSelectManyRegisteredEnum(b *testing.B) {
|
||||
err = conn.QueryRow(context.Background(), "select oid from pg_type where typname=$1;", "color").Scan(&oid)
|
||||
require.NoError(b, err)
|
||||
|
||||
conn.TypeMap().RegisterType(&pgtype.Type{Name: "color", OID: oid, Codec: &pgtype.EnumCodec{}})
|
||||
et := pgtype.NewEnumType("color", []string{"blue", "green", "orange"})
|
||||
conn.ConnInfo().RegisterDataType(pgtype.DataType{Value: et, Name: "color", OID: oid})
|
||||
|
||||
b.ResetTimer()
|
||||
var x, y, z string
|
||||
@ -951,7 +982,6 @@ type BenchRowSimple struct {
|
||||
BirthDate time.Time
|
||||
Weight int32
|
||||
Height int32
|
||||
Tags []string
|
||||
UpdateTime time.Time
|
||||
}
|
||||
|
||||
@ -965,13 +995,13 @@ func BenchmarkSelectRowsScanSimple(b *testing.B) {
|
||||
b.Run(fmt.Sprintf("%d rows", rowCount), func(b *testing.B) {
|
||||
br := &BenchRowSimple{}
|
||||
for i := 0; i < b.N; i++ {
|
||||
rows, err := conn.Query(context.Background(), "select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '{foo,bar,baz}'::text[], '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + $1) n", rowCount)
|
||||
rows, err := conn.Query(context.Background(), "select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + $1) n", rowCount)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
for rows.Next() {
|
||||
rows.Scan(&br.ID, &br.FirstName, &br.LastName, &br.Sex, &br.BirthDate, &br.Weight, &br.Height, &br.Tags, &br.UpdateTime)
|
||||
rows.Scan(&br.ID, &br.FirstName, &br.LastName, &br.Sex, &br.BirthDate, &br.Weight, &br.Height, &br.UpdateTime)
|
||||
}
|
||||
|
||||
if rows.Err() != nil {
|
||||
@ -990,7 +1020,6 @@ type BenchRowStringBytes struct {
|
||||
BirthDate time.Time
|
||||
Weight int32
|
||||
Height int32
|
||||
Tags []string
|
||||
UpdateTime time.Time
|
||||
}
|
||||
|
||||
@ -1004,13 +1033,13 @@ func BenchmarkSelectRowsScanStringBytes(b *testing.B) {
|
||||
b.Run(fmt.Sprintf("%d rows", rowCount), func(b *testing.B) {
|
||||
br := &BenchRowStringBytes{}
|
||||
for i := 0; i < b.N; i++ {
|
||||
rows, err := conn.Query(context.Background(), "select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '{foo,bar,baz}'::text[], '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + $1) n", rowCount)
|
||||
rows, err := conn.Query(context.Background(), "select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + $1) n", rowCount)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
for rows.Next() {
|
||||
rows.Scan(&br.ID, &br.FirstName, &br.LastName, &br.Sex, &br.BirthDate, &br.Weight, &br.Height, &br.Tags, &br.UpdateTime)
|
||||
rows.Scan(&br.ID, &br.FirstName, &br.LastName, &br.Sex, &br.BirthDate, &br.Weight, &br.Height, &br.UpdateTime)
|
||||
}
|
||||
|
||||
if rows.Err() != nil {
|
||||
@ -1029,7 +1058,6 @@ type BenchRowDecoder struct {
|
||||
BirthDate pgtype.Date
|
||||
Weight pgtype.Int4
|
||||
Height pgtype.Int4
|
||||
Tags pgtype.FlatArray[string]
|
||||
UpdateTime pgtype.Timestamptz
|
||||
}
|
||||
|
||||
@ -1050,11 +1078,12 @@ func BenchmarkSelectRowsScanDecoder(b *testing.B) {
|
||||
}
|
||||
for _, format := range formats {
|
||||
b.Run(format.name, func(b *testing.B) {
|
||||
|
||||
br := &BenchRowDecoder{}
|
||||
for i := 0; i < b.N; i++ {
|
||||
rows, err := conn.Query(
|
||||
context.Background(),
|
||||
"select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '{foo,bar,baz}'::text[], '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + $1) n",
|
||||
"select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + $1) n",
|
||||
pgx.QueryResultFormats{format.code},
|
||||
rowCount,
|
||||
)
|
||||
@ -1063,7 +1092,7 @@ func BenchmarkSelectRowsScanDecoder(b *testing.B) {
|
||||
}
|
||||
|
||||
for rows.Next() {
|
||||
rows.Scan(&br.ID, &br.FirstName, &br.LastName, &br.Sex, &br.BirthDate, &br.Weight, &br.Height, &br.Tags, &br.UpdateTime)
|
||||
rows.Scan(&br.ID, &br.FirstName, &br.LastName, &br.Sex, &br.BirthDate, &br.Weight, &br.Height, &br.UpdateTime)
|
||||
}
|
||||
|
||||
if rows.Err() != nil {
|
||||
@ -1076,6 +1105,73 @@ func BenchmarkSelectRowsScanDecoder(b *testing.B) {
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSelectRowsExplicitDecoding(b *testing.B) {
|
||||
conn := mustConnectString(b, os.Getenv("PGX_TEST_DATABASE"))
|
||||
defer closeConn(b, conn)
|
||||
|
||||
rowCounts := getSelectRowsCounts(b)
|
||||
|
||||
for _, rowCount := range rowCounts {
|
||||
b.Run(fmt.Sprintf("%d rows", rowCount), func(b *testing.B) {
|
||||
br := &BenchRowDecoder{}
|
||||
for i := 0; i < b.N; i++ {
|
||||
rows, err := conn.Query(context.Background(), "select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + $1) n", rowCount)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
for rows.Next() {
|
||||
rawValues := rows.RawValues()
|
||||
|
||||
err = br.ID.DecodeBinary(conn.ConnInfo(), rawValues[0])
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
err = br.FirstName.DecodeText(conn.ConnInfo(), rawValues[1])
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
err = br.LastName.DecodeText(conn.ConnInfo(), rawValues[2])
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
err = br.Sex.DecodeText(conn.ConnInfo(), rawValues[3])
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
err = br.BirthDate.DecodeBinary(conn.ConnInfo(), rawValues[4])
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
err = br.Weight.DecodeBinary(conn.ConnInfo(), rawValues[5])
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
err = br.Height.DecodeBinary(conn.ConnInfo(), rawValues[6])
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
err = br.UpdateTime.DecodeBinary(conn.ConnInfo(), rawValues[7])
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if rows.Err() != nil {
|
||||
b.Fatal(rows.Err())
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSelectRowsPgConnExecText(b *testing.B) {
|
||||
conn := mustConnectString(b, os.Getenv("PGX_TEST_DATABASE"))
|
||||
defer closeConn(b, conn)
|
||||
@ -1085,7 +1181,7 @@ func BenchmarkSelectRowsPgConnExecText(b *testing.B) {
|
||||
for _, rowCount := range rowCounts {
|
||||
b.Run(fmt.Sprintf("%d rows", rowCount), func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
mrr := conn.PgConn().Exec(context.Background(), fmt.Sprintf("select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '{foo,bar,baz}'::text[], '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + %d) n", rowCount))
|
||||
mrr := conn.PgConn().Exec(context.Background(), fmt.Sprintf("select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + %d) n", rowCount))
|
||||
for mrr.NextResult() {
|
||||
rr := mrr.ResultReader()
|
||||
for rr.NextRow() {
|
||||
@ -1122,11 +1218,11 @@ func BenchmarkSelectRowsPgConnExecParams(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
rr := conn.PgConn().ExecParams(
|
||||
context.Background(),
|
||||
"select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '{foo,bar,baz}'::text[], '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + $1) n",
|
||||
"select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + $1) n",
|
||||
[][]byte{[]byte(strconv.FormatInt(rowCount, 10))},
|
||||
nil,
|
||||
nil,
|
||||
[]int16{format.code, pgx.TextFormatCode, pgx.TextFormatCode, pgx.TextFormatCode, format.code, format.code, format.code, format.code, format.code},
|
||||
[]int16{format.code, pgx.TextFormatCode, pgx.TextFormatCode, pgx.TextFormatCode, format.code, format.code, format.code, format.code},
|
||||
)
|
||||
for rr.NextRow() {
|
||||
rr.Values()
|
||||
@ -1143,107 +1239,13 @@ func BenchmarkSelectRowsPgConnExecParams(b *testing.B) {
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSelectRowsSimpleCollectRowsRowToStructByPos(b *testing.B) {
|
||||
conn := mustConnectString(b, os.Getenv("PGX_TEST_DATABASE"))
|
||||
defer closeConn(b, conn)
|
||||
|
||||
rowCounts := getSelectRowsCounts(b)
|
||||
|
||||
for _, rowCount := range rowCounts {
|
||||
b.Run(fmt.Sprintf("%d rows", rowCount), func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
rows, _ := conn.Query(context.Background(), "select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '{foo,bar,baz}'::text[], '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + $1) n", rowCount)
|
||||
benchRows, err := pgx.CollectRows(rows, pgx.RowToStructByPos[BenchRowSimple])
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if len(benchRows) != int(rowCount) {
|
||||
b.Fatalf("Expected %d rows, got %d", rowCount, len(benchRows))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSelectRowsSimpleAppendRowsRowToStructByPos(b *testing.B) {
|
||||
conn := mustConnectString(b, os.Getenv("PGX_TEST_DATABASE"))
|
||||
defer closeConn(b, conn)
|
||||
|
||||
rowCounts := getSelectRowsCounts(b)
|
||||
|
||||
for _, rowCount := range rowCounts {
|
||||
b.Run(fmt.Sprintf("%d rows", rowCount), func(b *testing.B) {
|
||||
benchRows := make([]BenchRowSimple, 0, rowCount)
|
||||
for i := 0; i < b.N; i++ {
|
||||
benchRows = benchRows[:0]
|
||||
rows, _ := conn.Query(context.Background(), "select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '{foo,bar,baz}'::text[], '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + $1) n", rowCount)
|
||||
var err error
|
||||
benchRows, err = pgx.AppendRows(benchRows, rows, pgx.RowToStructByPos[BenchRowSimple])
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if len(benchRows) != int(rowCount) {
|
||||
b.Fatalf("Expected %d rows, got %d", rowCount, len(benchRows))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSelectRowsSimpleCollectRowsRowToStructByName(b *testing.B) {
|
||||
conn := mustConnectString(b, os.Getenv("PGX_TEST_DATABASE"))
|
||||
defer closeConn(b, conn)
|
||||
|
||||
rowCounts := getSelectRowsCounts(b)
|
||||
|
||||
for _, rowCount := range rowCounts {
|
||||
b.Run(fmt.Sprintf("%d rows", rowCount), func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
rows, _ := conn.Query(context.Background(), "select n as id, 'Adam' as first_name, 'Smith ' || n as last_name, 'male' as sex, '1952-06-16'::date as birth_date, 258 as weight, 72 as height, '{foo,bar,baz}'::text[] as tags, '2001-01-28 01:02:03-05'::timestamptz as update_time from generate_series(100001, 100000 + $1) n", rowCount)
|
||||
benchRows, err := pgx.CollectRows(rows, pgx.RowToStructByName[BenchRowSimple])
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if len(benchRows) != int(rowCount) {
|
||||
b.Fatalf("Expected %d rows, got %d", rowCount, len(benchRows))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSelectRowsSimpleAppendRowsRowToStructByName(b *testing.B) {
|
||||
conn := mustConnectString(b, os.Getenv("PGX_TEST_DATABASE"))
|
||||
defer closeConn(b, conn)
|
||||
|
||||
rowCounts := getSelectRowsCounts(b)
|
||||
|
||||
for _, rowCount := range rowCounts {
|
||||
b.Run(fmt.Sprintf("%d rows", rowCount), func(b *testing.B) {
|
||||
benchRows := make([]BenchRowSimple, 0, rowCount)
|
||||
for i := 0; i < b.N; i++ {
|
||||
benchRows = benchRows[:0]
|
||||
rows, _ := conn.Query(context.Background(), "select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '{foo,bar,baz}'::text[], '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + $1) n", rowCount)
|
||||
var err error
|
||||
benchRows, err = pgx.AppendRows(benchRows, rows, pgx.RowToStructByPos[BenchRowSimple])
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if len(benchRows) != int(rowCount) {
|
||||
b.Fatalf("Expected %d rows, got %d", rowCount, len(benchRows))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSelectRowsPgConnExecPrepared(b *testing.B) {
|
||||
conn := mustConnectString(b, os.Getenv("PGX_TEST_DATABASE"))
|
||||
defer closeConn(b, conn)
|
||||
|
||||
rowCounts := getSelectRowsCounts(b)
|
||||
|
||||
_, err := conn.PgConn().Prepare(context.Background(), "ps1", "select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '{foo,bar,baz}'::text[], '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + $1) n", nil)
|
||||
_, err := conn.PgConn().Prepare(context.Background(), "ps1", "select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + $1) n", nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@ -1265,7 +1267,7 @@ func BenchmarkSelectRowsPgConnExecPrepared(b *testing.B) {
|
||||
"ps1",
|
||||
[][]byte{[]byte(strconv.FormatInt(rowCount, 10))},
|
||||
nil,
|
||||
[]int16{format.code, pgx.TextFormatCode, pgx.TextFormatCode, pgx.TextFormatCode, format.code, format.code, format.code, format.code, format.code},
|
||||
[]int16{format.code, pgx.TextFormatCode, pgx.TextFormatCode, pgx.TextFormatCode, format.code, format.code, format.code, format.code},
|
||||
)
|
||||
for rr.NextRow() {
|
||||
rr.Values()
|
||||
@ -1344,7 +1346,7 @@ func BenchmarkSelectRowsRawPrepared(b *testing.B) {
|
||||
conn := mustConnectString(b, os.Getenv("PGX_TEST_DATABASE")).PgConn()
|
||||
defer conn.Close(context.Background())
|
||||
|
||||
_, err := conn.Prepare(context.Background(), "ps1", "select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '{foo,bar,baz}'::text[], '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + $1) n", nil)
|
||||
_, err := conn.Prepare(context.Background(), "ps1", "select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + $1) n", nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@ -1367,7 +1369,7 @@ func BenchmarkSelectRowsRawPrepared(b *testing.B) {
|
||||
"ps1",
|
||||
[][]byte{[]byte(strconv.FormatInt(rowCount, 10))},
|
||||
nil,
|
||||
[]int16{format.code, pgx.TextFormatCode, pgx.TextFormatCode, pgx.TextFormatCode, format.code, format.code, format.code, format.code, format.code},
|
||||
[]int16{format.code, pgx.TextFormatCode, pgx.TextFormatCode, pgx.TextFormatCode, format.code, format.code, format.code, format.code},
|
||||
)
|
||||
_, err := rr.Close()
|
||||
require.NoError(b, err)
|
||||
|
@ -1,61 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -eux
|
||||
|
||||
if [[ "${PGVERSION-}" =~ ^[0-9.]+$ ]]
|
||||
then
|
||||
sudo apt-get remove -y --purge postgresql libpq-dev libpq5 postgresql-client-common postgresql-common
|
||||
sudo rm -rf /var/lib/postgresql
|
||||
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
|
||||
sudo sh -c "echo deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main $PGVERSION >> /etc/apt/sources.list.d/postgresql.list"
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get -y -o Dpkg::Options::=--force-confdef -o Dpkg::Options::="--force-confnew" install postgresql-$PGVERSION postgresql-server-dev-$PGVERSION postgresql-contrib-$PGVERSION
|
||||
|
||||
sudo cp testsetup/pg_hba.conf /etc/postgresql/$PGVERSION/main/pg_hba.conf
|
||||
sudo sh -c "echo \"listen_addresses = '127.0.0.1'\" >> /etc/postgresql/$PGVERSION/main/postgresql.conf"
|
||||
sudo sh -c "cat testsetup/postgresql_ssl.conf >> /etc/postgresql/$PGVERSION/main/postgresql.conf"
|
||||
|
||||
cd testsetup
|
||||
|
||||
# Generate CA, server, and encrypted client certificates.
|
||||
go run generate_certs.go
|
||||
|
||||
# Copy certificates to server directory and set permissions.
|
||||
sudo cp ca.pem /var/lib/postgresql/$PGVERSION/main/root.crt
|
||||
sudo chown postgres:postgres /var/lib/postgresql/$PGVERSION/main/root.crt
|
||||
sudo cp localhost.key /var/lib/postgresql/$PGVERSION/main/server.key
|
||||
sudo chown postgres:postgres /var/lib/postgresql/$PGVERSION/main/server.key
|
||||
sudo chmod 600 /var/lib/postgresql/$PGVERSION/main/server.key
|
||||
sudo cp localhost.crt /var/lib/postgresql/$PGVERSION/main/server.crt
|
||||
sudo chown postgres:postgres /var/lib/postgresql/$PGVERSION/main/server.crt
|
||||
|
||||
cp ca.pem /tmp
|
||||
cp pgx_sslcert.key /tmp
|
||||
cp pgx_sslcert.crt /tmp
|
||||
|
||||
cd ..
|
||||
|
||||
sudo /etc/init.d/postgresql restart
|
||||
|
||||
createdb -U postgres pgx_test
|
||||
psql -U postgres -f testsetup/postgresql_setup.sql pgx_test
|
||||
fi
|
||||
|
||||
if [[ "${PGVERSION-}" =~ ^cockroach ]]
|
||||
then
|
||||
wget -qO- https://binaries.cockroachdb.com/cockroach-v24.3.3.linux-amd64.tgz | tar xvz
|
||||
sudo mv cockroach-v24.3.3.linux-amd64/cockroach /usr/local/bin/
|
||||
cockroach start-single-node --insecure --background --listen-addr=localhost
|
||||
cockroach sql --insecure -e 'create database pgx_test'
|
||||
fi
|
||||
|
||||
if [ "${CRATEVERSION-}" != "" ]
|
||||
then
|
||||
docker run \
|
||||
-p "6543:5432" \
|
||||
-d \
|
||||
crate:"$CRATEVERSION" \
|
||||
crate \
|
||||
-Cnetwork.host=0.0.0.0 \
|
||||
-Ctransport.host=localhost \
|
||||
-Clicense.enterprise=false
|
||||
fi
|
@ -1,55 +0,0 @@
|
||||
package pgx
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func mustParseConfig(t testing.TB, connString string) *ConnConfig {
|
||||
config, err := ParseConfig(connString)
|
||||
require.Nil(t, err)
|
||||
return config
|
||||
}
|
||||
|
||||
func mustConnect(t testing.TB, config *ConnConfig) *Conn {
|
||||
conn, err := ConnectConfig(context.Background(), config)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to establish connection: %v", err)
|
||||
}
|
||||
return conn
|
||||
}
|
||||
|
||||
// Ensures the connection limits the size of its cached objects.
|
||||
// This test examines the internals of *Conn so must be in the same package.
|
||||
func TestStmtCacheSizeLimit(t *testing.T) {
|
||||
const cacheLimit = 16
|
||||
|
||||
connConfig := mustParseConfig(t, os.Getenv("PGX_TEST_DATABASE"))
|
||||
connConfig.StatementCacheCapacity = cacheLimit
|
||||
conn := mustConnect(t, connConfig)
|
||||
defer func() {
|
||||
err := conn.Close(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// run a set of unique queries that should overflow the cache
|
||||
ctx := context.Background()
|
||||
for i := 0; i < cacheLimit*2; i++ {
|
||||
uniqueString := fmt.Sprintf("unique %d", i)
|
||||
uniqueSQL := fmt.Sprintf("select '%s'", uniqueString)
|
||||
var output string
|
||||
err := conn.QueryRow(ctx, uniqueSQL).Scan(&output)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uniqueString, output)
|
||||
}
|
||||
// preparedStatements contains cacheLimit+1 because deallocation happens before the query
|
||||
assert.Len(t, conn.preparedStatements, cacheLimit+1)
|
||||
assert.Equal(t, cacheLimit, conn.statementCache.Len())
|
||||
}
|
1001
conn_test.go
1001
conn_test.go
File diff suppressed because it is too large
Load Diff
119
copy_from.go
119
copy_from.go
@ -6,18 +6,19 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/jackc/pgx/v5/internal/pgio"
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
"github.com/jackc/pgconn"
|
||||
"github.com/jackc/pgio"
|
||||
errors "golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// CopyFromRows returns a CopyFromSource interface over the provided rows slice
|
||||
// making it usable by *Conn.CopyFrom.
|
||||
func CopyFromRows(rows [][]any) CopyFromSource {
|
||||
func CopyFromRows(rows [][]interface{}) CopyFromSource {
|
||||
return ©FromRows{rows: rows, idx: -1}
|
||||
}
|
||||
|
||||
type copyFromRows struct {
|
||||
rows [][]any
|
||||
rows [][]interface{}
|
||||
idx int
|
||||
}
|
||||
|
||||
@ -26,7 +27,7 @@ func (ctr *copyFromRows) Next() bool {
|
||||
return ctr.idx < len(ctr.rows)
|
||||
}
|
||||
|
||||
func (ctr *copyFromRows) Values() ([]any, error) {
|
||||
func (ctr *copyFromRows) Values() ([]interface{}, error) {
|
||||
return ctr.rows[ctr.idx], nil
|
||||
}
|
||||
|
||||
@ -36,12 +37,12 @@ func (ctr *copyFromRows) Err() error {
|
||||
|
||||
// CopyFromSlice returns a CopyFromSource interface over a dynamic func
|
||||
// making it usable by *Conn.CopyFrom.
|
||||
func CopyFromSlice(length int, next func(int) ([]any, error)) CopyFromSource {
|
||||
func CopyFromSlice(length int, next func(int) ([]interface{}, error)) CopyFromSource {
|
||||
return ©FromSlice{next: next, idx: -1, len: length}
|
||||
}
|
||||
|
||||
type copyFromSlice struct {
|
||||
next func(int) ([]any, error)
|
||||
next func(int) ([]interface{}, error)
|
||||
idx int
|
||||
len int
|
||||
err error
|
||||
@ -52,7 +53,7 @@ func (cts *copyFromSlice) Next() bool {
|
||||
return cts.idx < cts.len
|
||||
}
|
||||
|
||||
func (cts *copyFromSlice) Values() ([]any, error) {
|
||||
func (cts *copyFromSlice) Values() ([]interface{}, error) {
|
||||
values, err := cts.next(cts.idx)
|
||||
if err != nil {
|
||||
cts.err = err
|
||||
@ -64,33 +65,6 @@ func (cts *copyFromSlice) Err() error {
|
||||
return cts.err
|
||||
}
|
||||
|
||||
// CopyFromFunc returns a CopyFromSource interface that relies on nxtf for values.
|
||||
// nxtf returns rows until it either signals an 'end of data' by returning row=nil and err=nil,
|
||||
// or it returns an error. If nxtf returns an error, the copy is aborted.
|
||||
func CopyFromFunc(nxtf func() (row []any, err error)) CopyFromSource {
|
||||
return ©FromFunc{next: nxtf}
|
||||
}
|
||||
|
||||
type copyFromFunc struct {
|
||||
next func() ([]any, error)
|
||||
valueRow []any
|
||||
err error
|
||||
}
|
||||
|
||||
func (g *copyFromFunc) Next() bool {
|
||||
g.valueRow, g.err = g.next()
|
||||
// only return true if valueRow exists and no error
|
||||
return g.valueRow != nil && g.err == nil
|
||||
}
|
||||
|
||||
func (g *copyFromFunc) Values() ([]any, error) {
|
||||
return g.valueRow, g.err
|
||||
}
|
||||
|
||||
func (g *copyFromFunc) Err() error {
|
||||
return g.err
|
||||
}
|
||||
|
||||
// CopyFromSource is the interface used by *Conn.CopyFrom as the source for copy data.
|
||||
type CopyFromSource interface {
|
||||
// Next returns true if there is another row and makes the next row data
|
||||
@ -99,7 +73,7 @@ type CopyFromSource interface {
|
||||
Next() bool
|
||||
|
||||
// Values returns the values for the current row.
|
||||
Values() ([]any, error)
|
||||
Values() ([]interface{}, error)
|
||||
|
||||
// Err returns any error that has been encountered by the CopyFromSource. If
|
||||
// this is not nil *Conn.CopyFrom will abort the copy.
|
||||
@ -112,17 +86,9 @@ type copyFrom struct {
|
||||
columnNames []string
|
||||
rowSrc CopyFromSource
|
||||
readerErrChan chan error
|
||||
mode QueryExecMode
|
||||
}
|
||||
|
||||
func (ct *copyFrom) run(ctx context.Context) (int64, error) {
|
||||
if ct.conn.copyFromTracer != nil {
|
||||
ctx = ct.conn.copyFromTracer.TraceCopyFromStart(ctx, ct.conn, TraceCopyFromStartData{
|
||||
TableName: ct.tableName,
|
||||
ColumnNames: ct.columnNames,
|
||||
})
|
||||
}
|
||||
|
||||
quotedTableName := ct.tableName.Sanitize()
|
||||
cbuf := &bytes.Buffer{}
|
||||
for i, cn := range ct.columnNames {
|
||||
@ -133,29 +99,9 @@ func (ct *copyFrom) run(ctx context.Context) (int64, error) {
|
||||
}
|
||||
quotedColumnNames := cbuf.String()
|
||||
|
||||
var sd *pgconn.StatementDescription
|
||||
switch ct.mode {
|
||||
case QueryExecModeExec, QueryExecModeSimpleProtocol:
|
||||
// These modes don't support the binary format. Before the inclusion of the
|
||||
// QueryExecModes, Conn.Prepare was called on every COPY operation to get
|
||||
// the OIDs. These prepared statements were not cached.
|
||||
//
|
||||
// Since that's the same behavior provided by QueryExecModeDescribeExec,
|
||||
// we'll default to that mode.
|
||||
ct.mode = QueryExecModeDescribeExec
|
||||
fallthrough
|
||||
case QueryExecModeCacheStatement, QueryExecModeCacheDescribe, QueryExecModeDescribeExec:
|
||||
var err error
|
||||
sd, err = ct.conn.getStatementDescription(
|
||||
ctx,
|
||||
ct.mode,
|
||||
fmt.Sprintf("select %s from %s", quotedColumnNames, quotedTableName),
|
||||
)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("statement description failed: %w", err)
|
||||
}
|
||||
default:
|
||||
return 0, fmt.Errorf("unknown QueryExecMode: %v", ct.mode)
|
||||
sd, err := ct.conn.Prepare(ctx, "", fmt.Sprintf("select %s from %s", quotedColumnNames, quotedTableName))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
r, w := io.Pipe()
|
||||
@ -204,49 +150,29 @@ func (ct *copyFrom) run(ctx context.Context) (int64, error) {
|
||||
r.Close()
|
||||
<-doneChan
|
||||
|
||||
if ct.conn.copyFromTracer != nil {
|
||||
ct.conn.copyFromTracer.TraceCopyFromEnd(ctx, ct.conn, TraceCopyFromEndData{
|
||||
CommandTag: commandTag,
|
||||
Err: err,
|
||||
})
|
||||
}
|
||||
|
||||
return commandTag.RowsAffected(), err
|
||||
}
|
||||
|
||||
func (ct *copyFrom) buildCopyBuf(buf []byte, sd *pgconn.StatementDescription) (bool, []byte, error) {
|
||||
const sendBufSize = 65536 - 5 // The packet has a 5-byte header
|
||||
lastBufLen := 0
|
||||
largestRowLen := 0
|
||||
|
||||
for ct.rowSrc.Next() {
|
||||
lastBufLen = len(buf)
|
||||
|
||||
values, err := ct.rowSrc.Values()
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
if len(values) != len(ct.columnNames) {
|
||||
return false, nil, fmt.Errorf("expected %d values, got %d values", len(ct.columnNames), len(values))
|
||||
return false, nil, errors.Errorf("expected %d values, got %d values", len(ct.columnNames), len(values))
|
||||
}
|
||||
|
||||
buf = pgio.AppendInt16(buf, int16(len(ct.columnNames)))
|
||||
for i, val := range values {
|
||||
buf, err = encodeCopyValue(ct.conn.typeMap, buf, sd.Fields[i].DataTypeOID, val)
|
||||
buf, err = encodePreparedStatementArgument(ct.conn.connInfo, buf, sd.Fields[i].DataTypeOID, val)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
rowLen := len(buf) - lastBufLen
|
||||
if rowLen > largestRowLen {
|
||||
largestRowLen = rowLen
|
||||
}
|
||||
|
||||
// Try not to overflow size of the buffer PgConn.CopyFrom will be reading into. If that happens then the nature of
|
||||
// io.Pipe means that the next Read will be short. This can lead to pathological send sizes such as 65531, 13, 65531
|
||||
// 13, 65531, 13, 65531, 13.
|
||||
if len(buf) > sendBufSize-largestRowLen {
|
||||
if len(buf) > 65536 {
|
||||
return true, buf, nil
|
||||
}
|
||||
}
|
||||
@ -254,14 +180,12 @@ func (ct *copyFrom) buildCopyBuf(buf []byte, sd *pgconn.StatementDescription) (b
|
||||
return false, buf, nil
|
||||
}
|
||||
|
||||
// CopyFrom uses the PostgreSQL copy protocol to perform bulk data insertion. It returns the number of rows copied and
|
||||
// an error.
|
||||
// CopyFrom uses the PostgreSQL copy protocol to perform bulk data insertion.
|
||||
// It returns the number of rows copied and an error.
|
||||
//
|
||||
// CopyFrom requires all values use the binary format. A pgtype.Type that supports the binary format must be registered
|
||||
// for the type of each column. Almost all types implemented by pgx support the binary format.
|
||||
//
|
||||
// Even though enum types appear to be strings they still must be registered to use with CopyFrom. This can be done with
|
||||
// Conn.LoadType and pgtype.Map.RegisterType.
|
||||
// CopyFrom requires all values use the binary format. Almost all types
|
||||
// implemented by pgx use the binary format by default. Types implementing
|
||||
// Encoder can only be used if they encode to the binary format.
|
||||
func (c *Conn) CopyFrom(ctx context.Context, tableName Identifier, columnNames []string, rowSrc CopyFromSource) (int64, error) {
|
||||
ct := ©From{
|
||||
conn: c,
|
||||
@ -269,7 +193,6 @@ func (c *Conn) CopyFrom(ctx context.Context, tableName Identifier, columnNames [
|
||||
columnNames: columnNames,
|
||||
rowSrc: rowSrc,
|
||||
readerErrChan: make(chan error),
|
||||
mode: c.config.DefaultQueryExecMode,
|
||||
}
|
||||
|
||||
return ct.run(ctx)
|
||||
|
@ -2,148 +2,20 @@ package pgx_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
"github.com/jackc/pgx/v5/pgxtest"
|
||||
"github.com/jackc/pgconn"
|
||||
"github.com/jackc/pgx/v4"
|
||||
"github.com/stretchr/testify/require"
|
||||
errors "golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
func TestConnCopyWithAllQueryExecModes(t *testing.T) {
|
||||
for _, mode := range pgxtest.AllQueryExecModes {
|
||||
t.Run(mode.String(), func(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
|
||||
defer cancel()
|
||||
|
||||
cfg := mustParseConfig(t, os.Getenv("PGX_TEST_DATABASE"))
|
||||
cfg.DefaultQueryExecMode = mode
|
||||
conn := mustConnect(t, cfg)
|
||||
defer closeConn(t, conn)
|
||||
|
||||
mustExec(t, conn, `create temporary table foo(
|
||||
a int2,
|
||||
b int4,
|
||||
c int8,
|
||||
d text,
|
||||
e timestamptz
|
||||
)`)
|
||||
|
||||
tzedTime := time.Date(2010, 2, 3, 4, 5, 6, 0, time.Local)
|
||||
|
||||
inputRows := [][]any{
|
||||
{int16(0), int32(1), int64(2), "abc", tzedTime},
|
||||
{nil, nil, nil, nil, nil},
|
||||
}
|
||||
|
||||
copyCount, err := conn.CopyFrom(ctx, pgx.Identifier{"foo"}, []string{"a", "b", "c", "d", "e"}, pgx.CopyFromRows(inputRows))
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error for CopyFrom: %v", err)
|
||||
}
|
||||
if int(copyCount) != len(inputRows) {
|
||||
t.Errorf("Expected CopyFrom to return %d copied rows, but got %d", len(inputRows), copyCount)
|
||||
}
|
||||
|
||||
rows, err := conn.Query(ctx, "select * from foo")
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error for Query: %v", err)
|
||||
}
|
||||
|
||||
var outputRows [][]any
|
||||
for rows.Next() {
|
||||
row, err := rows.Values()
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error for rows.Values(): %v", err)
|
||||
}
|
||||
outputRows = append(outputRows, row)
|
||||
}
|
||||
|
||||
if rows.Err() != nil {
|
||||
t.Errorf("Unexpected error for rows.Err(): %v", rows.Err())
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(inputRows, outputRows) {
|
||||
t.Errorf("Input rows and output rows do not equal: %v -> %v", inputRows, outputRows)
|
||||
}
|
||||
|
||||
ensureConnValid(t, conn)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConnCopyWithKnownOIDQueryExecModes(t *testing.T) {
|
||||
for _, mode := range pgxtest.KnownOIDQueryExecModes {
|
||||
t.Run(mode.String(), func(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
|
||||
defer cancel()
|
||||
|
||||
cfg := mustParseConfig(t, os.Getenv("PGX_TEST_DATABASE"))
|
||||
cfg.DefaultQueryExecMode = mode
|
||||
conn := mustConnect(t, cfg)
|
||||
defer closeConn(t, conn)
|
||||
|
||||
mustExec(t, conn, `create temporary table foo(
|
||||
a int2,
|
||||
b int4,
|
||||
c int8,
|
||||
d varchar,
|
||||
e text,
|
||||
f date,
|
||||
g timestamptz
|
||||
)`)
|
||||
|
||||
tzedTime := time.Date(2010, 2, 3, 4, 5, 6, 0, time.Local)
|
||||
|
||||
inputRows := [][]any{
|
||||
{int16(0), int32(1), int64(2), "abc", "efg", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), tzedTime},
|
||||
{nil, nil, nil, nil, nil, nil, nil},
|
||||
}
|
||||
|
||||
copyCount, err := conn.CopyFrom(ctx, pgx.Identifier{"foo"}, []string{"a", "b", "c", "d", "e", "f", "g"}, pgx.CopyFromRows(inputRows))
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error for CopyFrom: %v", err)
|
||||
}
|
||||
if int(copyCount) != len(inputRows) {
|
||||
t.Errorf("Expected CopyFrom to return %d copied rows, but got %d", len(inputRows), copyCount)
|
||||
}
|
||||
|
||||
rows, err := conn.Query(ctx, "select * from foo")
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error for Query: %v", err)
|
||||
}
|
||||
|
||||
var outputRows [][]any
|
||||
for rows.Next() {
|
||||
row, err := rows.Values()
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error for rows.Values(): %v", err)
|
||||
}
|
||||
outputRows = append(outputRows, row)
|
||||
}
|
||||
|
||||
if rows.Err() != nil {
|
||||
t.Errorf("Unexpected error for rows.Err(): %v", rows.Err())
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(inputRows, outputRows) {
|
||||
t.Errorf("Input rows and output rows do not equal: %v -> %v", inputRows, outputRows)
|
||||
}
|
||||
|
||||
ensureConnValid(t, conn)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConnCopyFromSmall(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
|
||||
defer cancel()
|
||||
|
||||
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
|
||||
defer closeConn(t, conn)
|
||||
|
||||
@ -159,12 +31,12 @@ func TestConnCopyFromSmall(t *testing.T) {
|
||||
|
||||
tzedTime := time.Date(2010, 2, 3, 4, 5, 6, 0, time.Local)
|
||||
|
||||
inputRows := [][]any{
|
||||
inputRows := [][]interface{}{
|
||||
{int16(0), int32(1), int64(2), "abc", "efg", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), tzedTime},
|
||||
{nil, nil, nil, nil, nil, nil, nil},
|
||||
}
|
||||
|
||||
copyCount, err := conn.CopyFrom(ctx, pgx.Identifier{"foo"}, []string{"a", "b", "c", "d", "e", "f", "g"}, pgx.CopyFromRows(inputRows))
|
||||
copyCount, err := conn.CopyFrom(context.Background(), pgx.Identifier{"foo"}, []string{"a", "b", "c", "d", "e", "f", "g"}, pgx.CopyFromRows(inputRows))
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error for CopyFrom: %v", err)
|
||||
}
|
||||
@ -172,12 +44,12 @@ func TestConnCopyFromSmall(t *testing.T) {
|
||||
t.Errorf("Expected CopyFrom to return %d copied rows, but got %d", len(inputRows), copyCount)
|
||||
}
|
||||
|
||||
rows, err := conn.Query(ctx, "select * from foo")
|
||||
rows, err := conn.Query(context.Background(), "select * from foo")
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error for Query: %v", err)
|
||||
}
|
||||
|
||||
var outputRows [][]any
|
||||
var outputRows [][]interface{}
|
||||
for rows.Next() {
|
||||
row, err := rows.Values()
|
||||
if err != nil {
|
||||
@ -200,9 +72,6 @@ func TestConnCopyFromSmall(t *testing.T) {
|
||||
func TestConnCopyFromSliceSmall(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
|
||||
defer cancel()
|
||||
|
||||
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
|
||||
defer closeConn(t, conn)
|
||||
|
||||
@ -218,13 +87,13 @@ func TestConnCopyFromSliceSmall(t *testing.T) {
|
||||
|
||||
tzedTime := time.Date(2010, 2, 3, 4, 5, 6, 0, time.Local)
|
||||
|
||||
inputRows := [][]any{
|
||||
inputRows := [][]interface{}{
|
||||
{int16(0), int32(1), int64(2), "abc", "efg", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), tzedTime},
|
||||
{nil, nil, nil, nil, nil, nil, nil},
|
||||
}
|
||||
|
||||
copyCount, err := conn.CopyFrom(ctx, pgx.Identifier{"foo"}, []string{"a", "b", "c", "d", "e", "f", "g"},
|
||||
pgx.CopyFromSlice(len(inputRows), func(i int) ([]any, error) {
|
||||
copyCount, err := conn.CopyFrom(context.Background(), pgx.Identifier{"foo"}, []string{"a", "b", "c", "d", "e", "f", "g"},
|
||||
pgx.CopyFromSlice(len(inputRows), func(i int) ([]interface{}, error) {
|
||||
return inputRows[i], nil
|
||||
}))
|
||||
if err != nil {
|
||||
@ -234,12 +103,12 @@ func TestConnCopyFromSliceSmall(t *testing.T) {
|
||||
t.Errorf("Expected CopyFrom to return %d copied rows, but got %d", len(inputRows), copyCount)
|
||||
}
|
||||
|
||||
rows, err := conn.Query(ctx, "select * from foo")
|
||||
rows, err := conn.Query(context.Background(), "select * from foo")
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error for Query: %v", err)
|
||||
}
|
||||
|
||||
var outputRows [][]any
|
||||
var outputRows [][]interface{}
|
||||
for rows.Next() {
|
||||
row, err := rows.Values()
|
||||
if err != nil {
|
||||
@ -262,9 +131,6 @@ func TestConnCopyFromSliceSmall(t *testing.T) {
|
||||
func TestConnCopyFromLarge(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
|
||||
defer cancel()
|
||||
|
||||
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
|
||||
defer closeConn(t, conn)
|
||||
|
||||
@ -281,13 +147,13 @@ func TestConnCopyFromLarge(t *testing.T) {
|
||||
|
||||
tzedTime := time.Date(2010, 2, 3, 4, 5, 6, 0, time.Local)
|
||||
|
||||
inputRows := [][]any{}
|
||||
inputRows := [][]interface{}{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
inputRows = append(inputRows, []any{int16(0), int32(1), int64(2), "abc", "efg", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), tzedTime, []byte{111, 111, 111, 111}})
|
||||
inputRows = append(inputRows, []interface{}{int16(0), int32(1), int64(2), "abc", "efg", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), tzedTime, []byte{111, 111, 111, 111}})
|
||||
}
|
||||
|
||||
copyCount, err := conn.CopyFrom(ctx, pgx.Identifier{"foo"}, []string{"a", "b", "c", "d", "e", "f", "g", "h"}, pgx.CopyFromRows(inputRows))
|
||||
copyCount, err := conn.CopyFrom(context.Background(), pgx.Identifier{"foo"}, []string{"a", "b", "c", "d", "e", "f", "g", "h"}, pgx.CopyFromRows(inputRows))
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error for CopyFrom: %v", err)
|
||||
}
|
||||
@ -295,12 +161,12 @@ func TestConnCopyFromLarge(t *testing.T) {
|
||||
t.Errorf("Expected CopyFrom to return %d copied rows, but got %d", len(inputRows), copyCount)
|
||||
}
|
||||
|
||||
rows, err := conn.Query(ctx, "select * from foo")
|
||||
rows, err := conn.Query(context.Background(), "select * from foo")
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error for Query: %v", err)
|
||||
}
|
||||
|
||||
var outputRows [][]any
|
||||
var outputRows [][]interface{}
|
||||
for rows.Next() {
|
||||
row, err := rows.Values()
|
||||
if err != nil {
|
||||
@ -323,12 +189,10 @@ func TestConnCopyFromLarge(t *testing.T) {
|
||||
func TestConnCopyFromEnum(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
|
||||
defer cancel()
|
||||
|
||||
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
|
||||
defer closeConn(t, conn)
|
||||
|
||||
ctx := context.Background()
|
||||
tx, err := conn.Begin(ctx)
|
||||
require.NoError(t, err)
|
||||
defer tx.Rollback(ctx)
|
||||
@ -345,15 +209,7 @@ func TestConnCopyFromEnum(t *testing.T) {
|
||||
_, err = tx.Exec(ctx, `create type fruit as enum ('apple', 'orange', 'grape')`)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Obviously using conn while a tx is in use and registering a type after the connection has been established are
|
||||
// really bad practices, but for the sake of convenience we do it in the test here.
|
||||
for _, name := range []string{"fruit", "color"} {
|
||||
typ, err := conn.LoadType(ctx, name)
|
||||
require.NoError(t, err)
|
||||
conn.TypeMap().RegisterType(typ)
|
||||
}
|
||||
|
||||
_, err = tx.Exec(ctx, `create temporary table foo(
|
||||
_, err = tx.Exec(ctx, `create table foo(
|
||||
a text,
|
||||
b color,
|
||||
c fruit,
|
||||
@ -363,19 +219,19 @@ func TestConnCopyFromEnum(t *testing.T) {
|
||||
)`)
|
||||
require.NoError(t, err)
|
||||
|
||||
inputRows := [][]any{
|
||||
inputRows := [][]interface{}{
|
||||
{"abc", "blue", "grape", "orange", "orange", "def"},
|
||||
{nil, nil, nil, nil, nil, nil},
|
||||
}
|
||||
|
||||
copyCount, err := tx.CopyFrom(ctx, pgx.Identifier{"foo"}, []string{"a", "b", "c", "d", "e", "f"}, pgx.CopyFromRows(inputRows))
|
||||
copyCount, err := conn.CopyFrom(ctx, pgx.Identifier{"foo"}, []string{"a", "b", "c", "d", "e", "f"}, pgx.CopyFromRows(inputRows))
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, len(inputRows), copyCount)
|
||||
|
||||
rows, err := tx.Query(ctx, "select * from foo")
|
||||
rows, err := conn.Query(ctx, "select * from foo")
|
||||
require.NoError(t, err)
|
||||
|
||||
var outputRows [][]any
|
||||
var outputRows [][]interface{}
|
||||
for rows.Next() {
|
||||
row, err := rows.Values()
|
||||
require.NoError(t, err)
|
||||
@ -388,23 +244,17 @@ func TestConnCopyFromEnum(t *testing.T) {
|
||||
t.Errorf("Input rows and output rows do not equal: %v -> %v", inputRows, outputRows)
|
||||
}
|
||||
|
||||
err = tx.Rollback(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
ensureConnValid(t, conn)
|
||||
}
|
||||
|
||||
func TestConnCopyFromJSON(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
|
||||
defer cancel()
|
||||
|
||||
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
|
||||
defer closeConn(t, conn)
|
||||
|
||||
for _, typeName := range []string{"json", "jsonb"} {
|
||||
if _, ok := conn.TypeMap().TypeForName(typeName); !ok {
|
||||
if _, ok := conn.ConnInfo().DataTypeForName(typeName); !ok {
|
||||
return // No JSON/JSONB type -- must be running against old PostgreSQL
|
||||
}
|
||||
}
|
||||
@ -414,12 +264,12 @@ func TestConnCopyFromJSON(t *testing.T) {
|
||||
b jsonb
|
||||
)`)
|
||||
|
||||
inputRows := [][]any{
|
||||
{map[string]any{"foo": "bar"}, map[string]any{"bar": "quz"}},
|
||||
inputRows := [][]interface{}{
|
||||
{map[string]interface{}{"foo": "bar"}, map[string]interface{}{"bar": "quz"}},
|
||||
{nil, nil},
|
||||
}
|
||||
|
||||
copyCount, err := conn.CopyFrom(ctx, pgx.Identifier{"foo"}, []string{"a", "b"}, pgx.CopyFromRows(inputRows))
|
||||
copyCount, err := conn.CopyFrom(context.Background(), pgx.Identifier{"foo"}, []string{"a", "b"}, pgx.CopyFromRows(inputRows))
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error for CopyFrom: %v", err)
|
||||
}
|
||||
@ -427,12 +277,12 @@ func TestConnCopyFromJSON(t *testing.T) {
|
||||
t.Errorf("Expected CopyFrom to return %d copied rows, but got %d", len(inputRows), copyCount)
|
||||
}
|
||||
|
||||
rows, err := conn.Query(ctx, "select * from foo")
|
||||
rows, err := conn.Query(context.Background(), "select * from foo")
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error for Query: %v", err)
|
||||
}
|
||||
|
||||
var outputRows [][]any
|
||||
var outputRows [][]interface{}
|
||||
for rows.Next() {
|
||||
row, err := rows.Values()
|
||||
if err != nil {
|
||||
@ -462,12 +312,12 @@ func (cfs *clientFailSource) Next() bool {
|
||||
return cfs.count < 100
|
||||
}
|
||||
|
||||
func (cfs *clientFailSource) Values() ([]any, error) {
|
||||
func (cfs *clientFailSource) Values() ([]interface{}, error) {
|
||||
if cfs.count == 3 {
|
||||
cfs.err = fmt.Errorf("client error")
|
||||
cfs.err = errors.Errorf("client error")
|
||||
return nil, cfs.err
|
||||
}
|
||||
return []any{make([]byte, 100000)}, nil
|
||||
return []interface{}{make([]byte, 100000)}, nil
|
||||
}
|
||||
|
||||
func (cfs *clientFailSource) Err() error {
|
||||
@ -477,9 +327,6 @@ func (cfs *clientFailSource) Err() error {
|
||||
func TestConnCopyFromFailServerSideMidway(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
|
||||
defer cancel()
|
||||
|
||||
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
|
||||
defer closeConn(t, conn)
|
||||
|
||||
@ -488,13 +335,13 @@ func TestConnCopyFromFailServerSideMidway(t *testing.T) {
|
||||
b varchar not null
|
||||
)`)
|
||||
|
||||
inputRows := [][]any{
|
||||
inputRows := [][]interface{}{
|
||||
{int32(1), "abc"},
|
||||
{int32(2), nil}, // this row should trigger a failure
|
||||
{int32(3), "def"},
|
||||
}
|
||||
|
||||
copyCount, err := conn.CopyFrom(ctx, pgx.Identifier{"foo"}, []string{"a", "b"}, pgx.CopyFromRows(inputRows))
|
||||
copyCount, err := conn.CopyFrom(context.Background(), pgx.Identifier{"foo"}, []string{"a", "b"}, pgx.CopyFromRows(inputRows))
|
||||
if err == nil {
|
||||
t.Errorf("Expected CopyFrom return error, but it did not")
|
||||
}
|
||||
@ -505,12 +352,12 @@ func TestConnCopyFromFailServerSideMidway(t *testing.T) {
|
||||
t.Errorf("Expected CopyFrom to return 0 copied rows, but got %d", copyCount)
|
||||
}
|
||||
|
||||
rows, err := conn.Query(ctx, "select * from foo")
|
||||
rows, err := conn.Query(context.Background(), "select * from foo")
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error for Query: %v", err)
|
||||
}
|
||||
|
||||
var outputRows [][]any
|
||||
var outputRows [][]interface{}
|
||||
for rows.Next() {
|
||||
row, err := rows.Values()
|
||||
if err != nil {
|
||||
@ -542,11 +389,11 @@ func (fs *failSource) Next() bool {
|
||||
return fs.count < 100
|
||||
}
|
||||
|
||||
func (fs *failSource) Values() ([]any, error) {
|
||||
func (fs *failSource) Values() ([]interface{}, error) {
|
||||
if fs.count == 3 {
|
||||
return []any{nil}, nil
|
||||
return []interface{}{nil}, nil
|
||||
}
|
||||
return []any{make([]byte, 100000)}, nil
|
||||
return []interface{}{make([]byte, 100000)}, nil
|
||||
}
|
||||
|
||||
func (fs *failSource) Err() error {
|
||||
@ -556,21 +403,16 @@ func (fs *failSource) Err() error {
|
||||
func TestConnCopyFromFailServerSideMidwayAbortsWithoutWaiting(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
|
||||
defer cancel()
|
||||
|
||||
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
|
||||
defer closeConn(t, conn)
|
||||
|
||||
pgxtest.SkipCockroachDB(t, conn, "Server copy error does not fail fast")
|
||||
|
||||
mustExec(t, conn, `create temporary table foo(
|
||||
a bytea not null
|
||||
)`)
|
||||
|
||||
startTime := time.Now()
|
||||
|
||||
copyCount, err := conn.CopyFrom(ctx, pgx.Identifier{"foo"}, []string{"a"}, &failSource{})
|
||||
copyCount, err := conn.CopyFrom(context.Background(), pgx.Identifier{"foo"}, []string{"a"}, &failSource{})
|
||||
if err == nil {
|
||||
t.Errorf("Expected CopyFrom return error, but it did not")
|
||||
}
|
||||
@ -587,12 +429,12 @@ func TestConnCopyFromFailServerSideMidwayAbortsWithoutWaiting(t *testing.T) {
|
||||
t.Errorf("Failing CopyFrom shouldn't have taken so long: %v", copyTime)
|
||||
}
|
||||
|
||||
rows, err := conn.Query(ctx, "select * from foo")
|
||||
rows, err := conn.Query(context.Background(), "select * from foo")
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error for Query: %v", err)
|
||||
}
|
||||
|
||||
var outputRows [][]any
|
||||
var outputRows [][]interface{}
|
||||
for rows.Next() {
|
||||
row, err := rows.Values()
|
||||
if err != nil {
|
||||
@ -622,11 +464,11 @@ func (fs *slowFailRaceSource) Next() bool {
|
||||
return fs.count < 1000
|
||||
}
|
||||
|
||||
func (fs *slowFailRaceSource) Values() ([]any, error) {
|
||||
func (fs *slowFailRaceSource) Values() ([]interface{}, error) {
|
||||
if fs.count == 500 {
|
||||
return []any{nil, nil}, nil
|
||||
return []interface{}{nil, nil}, nil
|
||||
}
|
||||
return []any{1, make([]byte, 1000)}, nil
|
||||
return []interface{}{1, make([]byte, 1000)}, nil
|
||||
}
|
||||
|
||||
func (fs *slowFailRaceSource) Err() error {
|
||||
@ -636,9 +478,6 @@ func (fs *slowFailRaceSource) Err() error {
|
||||
func TestConnCopyFromSlowFailRace(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
|
||||
defer cancel()
|
||||
|
||||
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
|
||||
defer closeConn(t, conn)
|
||||
|
||||
@ -647,7 +486,7 @@ func TestConnCopyFromSlowFailRace(t *testing.T) {
|
||||
b bytea not null
|
||||
)`)
|
||||
|
||||
copyCount, err := conn.CopyFrom(ctx, pgx.Identifier{"foo"}, []string{"a", "b"}, &slowFailRaceSource{})
|
||||
copyCount, err := conn.CopyFrom(context.Background(), pgx.Identifier{"foo"}, []string{"a", "b"}, &slowFailRaceSource{})
|
||||
if err == nil {
|
||||
t.Errorf("Expected CopyFrom return error, but it did not")
|
||||
}
|
||||
@ -664,9 +503,6 @@ func TestConnCopyFromSlowFailRace(t *testing.T) {
|
||||
func TestConnCopyFromCopyFromSourceErrorMidway(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
|
||||
defer cancel()
|
||||
|
||||
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
|
||||
defer closeConn(t, conn)
|
||||
|
||||
@ -674,7 +510,7 @@ func TestConnCopyFromCopyFromSourceErrorMidway(t *testing.T) {
|
||||
a bytea not null
|
||||
)`)
|
||||
|
||||
copyCount, err := conn.CopyFrom(ctx, pgx.Identifier{"foo"}, []string{"a"}, &clientFailSource{})
|
||||
copyCount, err := conn.CopyFrom(context.Background(), pgx.Identifier{"foo"}, []string{"a"}, &clientFailSource{})
|
||||
if err == nil {
|
||||
t.Errorf("Expected CopyFrom return error, but it did not")
|
||||
}
|
||||
@ -682,12 +518,12 @@ func TestConnCopyFromCopyFromSourceErrorMidway(t *testing.T) {
|
||||
t.Errorf("Expected CopyFrom to return 0 copied rows, but got %d", copyCount)
|
||||
}
|
||||
|
||||
rows, err := conn.Query(ctx, "select * from foo")
|
||||
rows, err := conn.Query(context.Background(), "select * from foo")
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error for Query: %v", err)
|
||||
}
|
||||
|
||||
var outputRows [][]any
|
||||
var outputRows [][]interface{}
|
||||
for rows.Next() {
|
||||
row, err := rows.Values()
|
||||
if err != nil {
|
||||
@ -716,20 +552,17 @@ func (cfs *clientFinalErrSource) Next() bool {
|
||||
return cfs.count < 5
|
||||
}
|
||||
|
||||
func (cfs *clientFinalErrSource) Values() ([]any, error) {
|
||||
return []any{make([]byte, 100000)}, nil
|
||||
func (cfs *clientFinalErrSource) Values() ([]interface{}, error) {
|
||||
return []interface{}{make([]byte, 100000)}, nil
|
||||
}
|
||||
|
||||
func (cfs *clientFinalErrSource) Err() error {
|
||||
return fmt.Errorf("final error")
|
||||
return errors.Errorf("final error")
|
||||
}
|
||||
|
||||
func TestConnCopyFromCopyFromSourceErrorEnd(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
|
||||
defer cancel()
|
||||
|
||||
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
|
||||
defer closeConn(t, conn)
|
||||
|
||||
@ -737,7 +570,7 @@ func TestConnCopyFromCopyFromSourceErrorEnd(t *testing.T) {
|
||||
a bytea not null
|
||||
)`)
|
||||
|
||||
copyCount, err := conn.CopyFrom(ctx, pgx.Identifier{"foo"}, []string{"a"}, &clientFinalErrSource{})
|
||||
copyCount, err := conn.CopyFrom(context.Background(), pgx.Identifier{"foo"}, []string{"a"}, &clientFinalErrSource{})
|
||||
if err == nil {
|
||||
t.Errorf("Expected CopyFrom return error, but it did not")
|
||||
}
|
||||
@ -745,12 +578,12 @@ func TestConnCopyFromCopyFromSourceErrorEnd(t *testing.T) {
|
||||
t.Errorf("Expected CopyFrom to return 0 copied rows, but got %d", copyCount)
|
||||
}
|
||||
|
||||
rows, err := conn.Query(ctx, "select * from foo")
|
||||
rows, err := conn.Query(context.Background(), "select * from foo")
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error for Query: %v", err)
|
||||
}
|
||||
|
||||
var outputRows [][]any
|
||||
var outputRows [][]interface{}
|
||||
for rows.Next() {
|
||||
row, err := rows.Values()
|
||||
if err != nil {
|
||||
@ -769,125 +602,3 @@ func TestConnCopyFromCopyFromSourceErrorEnd(t *testing.T) {
|
||||
|
||||
ensureConnValid(t, conn)
|
||||
}
|
||||
|
||||
func TestConnCopyFromAutomaticStringConversion(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
|
||||
defer cancel()
|
||||
|
||||
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
|
||||
defer closeConn(t, conn)
|
||||
|
||||
mustExec(t, conn, `create temporary table foo(
|
||||
a int8
|
||||
)`)
|
||||
|
||||
inputRows := [][]interface{}{
|
||||
{"42"},
|
||||
{"7"},
|
||||
{8},
|
||||
}
|
||||
|
||||
copyCount, err := conn.CopyFrom(ctx, pgx.Identifier{"foo"}, []string{"a"}, pgx.CopyFromRows(inputRows))
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, len(inputRows), copyCount)
|
||||
|
||||
rows, _ := conn.Query(ctx, "select * from foo")
|
||||
nums, err := pgx.CollectRows(rows, pgx.RowTo[int64])
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, []int64{42, 7, 8}, nums)
|
||||
|
||||
ensureConnValid(t, conn)
|
||||
}
|
||||
|
||||
// https://github.com/jackc/pgx/discussions/1891
|
||||
func TestConnCopyFromAutomaticStringConversionArray(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
|
||||
defer cancel()
|
||||
|
||||
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
|
||||
defer closeConn(t, conn)
|
||||
|
||||
mustExec(t, conn, `create temporary table foo(
|
||||
a numeric[]
|
||||
)`)
|
||||
|
||||
inputRows := [][]interface{}{
|
||||
{[]string{"42"}},
|
||||
{[]string{"7"}},
|
||||
{[]string{"8", "9"}},
|
||||
{[][]string{{"10", "11"}, {"12", "13"}}},
|
||||
}
|
||||
|
||||
copyCount, err := conn.CopyFrom(ctx, pgx.Identifier{"foo"}, []string{"a"}, pgx.CopyFromRows(inputRows))
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, len(inputRows), copyCount)
|
||||
|
||||
// Test reads as int64 and flattened array for simplicity.
|
||||
rows, _ := conn.Query(ctx, "select * from foo")
|
||||
nums, err := pgx.CollectRows(rows, pgx.RowTo[[]int64])
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [][]int64{{42}, {7}, {8, 9}, {10, 11, 12, 13}}, nums)
|
||||
|
||||
ensureConnValid(t, conn)
|
||||
}
|
||||
|
||||
func TestCopyFromFunc(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
|
||||
defer closeConn(t, conn)
|
||||
|
||||
mustExec(t, conn, `create temporary table foo(
|
||||
a int
|
||||
)`)
|
||||
|
||||
dataCh := make(chan int, 1)
|
||||
|
||||
const channelItems = 10
|
||||
go func() {
|
||||
for i := 0; i < channelItems; i++ {
|
||||
dataCh <- i
|
||||
}
|
||||
close(dataCh)
|
||||
}()
|
||||
|
||||
copyCount, err := conn.CopyFrom(context.Background(), pgx.Identifier{"foo"}, []string{"a"},
|
||||
pgx.CopyFromFunc(func() ([]any, error) {
|
||||
v, ok := <-dataCh
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
return []any{v}, nil
|
||||
}))
|
||||
|
||||
require.ErrorIs(t, err, nil)
|
||||
require.EqualValues(t, channelItems, copyCount)
|
||||
|
||||
rows, err := conn.Query(context.Background(), "select * from foo order by a")
|
||||
require.NoError(t, err)
|
||||
nums, err := pgx.CollectRows(rows, pgx.RowTo[int64])
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []int64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, nums)
|
||||
|
||||
// simulate a failure
|
||||
copyCount, err = conn.CopyFrom(context.Background(), pgx.Identifier{"foo"}, []string{"a"},
|
||||
pgx.CopyFromFunc(func() func() ([]any, error) {
|
||||
x := 9
|
||||
return func() ([]any, error) {
|
||||
x++
|
||||
if x > 100 {
|
||||
return nil, fmt.Errorf("simulated error")
|
||||
}
|
||||
return []any{x}, nil
|
||||
}
|
||||
}()))
|
||||
require.NotErrorIs(t, err, nil)
|
||||
require.EqualValues(t, 0, copyCount) // no change, due to error
|
||||
|
||||
ensureConnValid(t, conn)
|
||||
}
|
||||
|
256
derived_types.go
256
derived_types.go
@ -1,256 +0,0 @@
|
||||
package pgx
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgtype"
|
||||
)
|
||||
|
||||
/*
|
||||
buildLoadDerivedTypesSQL generates the correct query for retrieving type information.
|
||||
|
||||
pgVersion: the major version of the PostgreSQL server
|
||||
typeNames: the names of the types to load. If nil, load all types.
|
||||
*/
|
||||
func buildLoadDerivedTypesSQL(pgVersion int64, typeNames []string) string {
|
||||
supportsMultirange := (pgVersion >= 14)
|
||||
var typeNamesClause string
|
||||
|
||||
if typeNames == nil {
|
||||
// This should not occur; this will not return any types
|
||||
typeNamesClause = "= ''"
|
||||
} else {
|
||||
typeNamesClause = "= ANY($1)"
|
||||
}
|
||||
parts := make([]string, 0, 10)
|
||||
|
||||
// Each of the type names provided might be found in pg_class or pg_type.
|
||||
// Additionally, it may or may not include a schema portion.
|
||||
parts = append(parts, `
|
||||
WITH RECURSIVE
|
||||
-- find the OIDs in pg_class which match one of the provided type names
|
||||
selected_classes(oid,reltype) AS (
|
||||
-- this query uses the namespace search path, so will match type names without a schema prefix
|
||||
SELECT pg_class.oid, pg_class.reltype
|
||||
FROM pg_catalog.pg_class
|
||||
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = pg_class.relnamespace
|
||||
WHERE pg_catalog.pg_table_is_visible(pg_class.oid)
|
||||
AND relname `, typeNamesClause, `
|
||||
UNION ALL
|
||||
-- this query will only match type names which include the schema prefix
|
||||
SELECT pg_class.oid, pg_class.reltype
|
||||
FROM pg_class
|
||||
INNER JOIN pg_namespace ON (pg_class.relnamespace = pg_namespace.oid)
|
||||
WHERE nspname || '.' || relname `, typeNamesClause, `
|
||||
),
|
||||
selected_types(oid) AS (
|
||||
-- collect the OIDs from pg_types which correspond to the selected classes
|
||||
SELECT reltype AS oid
|
||||
FROM selected_classes
|
||||
UNION ALL
|
||||
-- as well as any other type names which match our criteria
|
||||
SELECT pg_type.oid
|
||||
FROM pg_type
|
||||
LEFT OUTER JOIN pg_namespace ON (pg_type.typnamespace = pg_namespace.oid)
|
||||
WHERE typname `, typeNamesClause, `
|
||||
OR nspname || '.' || typname `, typeNamesClause, `
|
||||
),
|
||||
-- this builds a parent/child mapping of objects, allowing us to know
|
||||
-- all the child (ie: dependent) types that a parent (type) requires
|
||||
-- As can be seen, there are 3 ways this can occur (the last of which
|
||||
-- is due to being a composite class, where the composite fields are children)
|
||||
pc(parent, child) AS (
|
||||
SELECT parent.oid, parent.typelem
|
||||
FROM pg_type parent
|
||||
WHERE parent.typtype = 'b' AND parent.typelem != 0
|
||||
UNION ALL
|
||||
SELECT parent.oid, parent.typbasetype
|
||||
FROM pg_type parent
|
||||
WHERE parent.typtypmod = -1 AND parent.typbasetype != 0
|
||||
UNION ALL
|
||||
SELECT pg_type.oid, atttypid
|
||||
FROM pg_attribute
|
||||
INNER JOIN pg_class ON (pg_class.oid = pg_attribute.attrelid)
|
||||
INNER JOIN pg_type ON (pg_type.oid = pg_class.reltype)
|
||||
WHERE NOT attisdropped
|
||||
AND attnum > 0
|
||||
),
|
||||
-- Now construct a recursive query which includes a 'depth' element.
|
||||
-- This is used to ensure that the "youngest" children are registered before
|
||||
-- their parents.
|
||||
relationships(parent, child, depth) AS (
|
||||
SELECT DISTINCT 0::OID, selected_types.oid, 0
|
||||
FROM selected_types
|
||||
UNION ALL
|
||||
SELECT pg_type.oid AS parent, pg_attribute.atttypid AS child, 1
|
||||
FROM selected_classes c
|
||||
inner join pg_type ON (c.reltype = pg_type.oid)
|
||||
inner join pg_attribute on (c.oid = pg_attribute.attrelid)
|
||||
UNION ALL
|
||||
SELECT pc.parent, pc.child, relationships.depth + 1
|
||||
FROM pc
|
||||
INNER JOIN relationships ON (pc.parent = relationships.child)
|
||||
),
|
||||
-- composite fields need to be encapsulated as a couple of arrays to provide the required information for registration
|
||||
composite AS (
|
||||
SELECT pg_type.oid, ARRAY_AGG(attname ORDER BY attnum) AS attnames, ARRAY_AGG(atttypid ORDER BY ATTNUM) AS atttypids
|
||||
FROM pg_attribute
|
||||
INNER JOIN pg_class ON (pg_class.oid = pg_attribute.attrelid)
|
||||
INNER JOIN pg_type ON (pg_type.oid = pg_class.reltype)
|
||||
WHERE NOT attisdropped
|
||||
AND attnum > 0
|
||||
GROUP BY pg_type.oid
|
||||
)
|
||||
-- Bring together this information, showing all the information which might possibly be required
|
||||
-- to complete the registration, applying filters to only show the items which relate to the selected
|
||||
-- types/classes.
|
||||
SELECT typname,
|
||||
pg_namespace.nspname,
|
||||
typtype,
|
||||
typbasetype,
|
||||
typelem,
|
||||
pg_type.oid,`)
|
||||
if supportsMultirange {
|
||||
parts = append(parts, `
|
||||
COALESCE(multirange.rngtypid, 0) AS rngtypid,`)
|
||||
} else {
|
||||
parts = append(parts, `
|
||||
0 AS rngtypid,`)
|
||||
}
|
||||
parts = append(parts, `
|
||||
COALESCE(pg_range.rngsubtype, 0) AS rngsubtype,
|
||||
attnames, atttypids
|
||||
FROM relationships
|
||||
INNER JOIN pg_type ON (pg_type.oid = relationships.child)
|
||||
LEFT OUTER JOIN pg_range ON (pg_type.oid = pg_range.rngtypid)`)
|
||||
if supportsMultirange {
|
||||
parts = append(parts, `
|
||||
LEFT OUTER JOIN pg_range multirange ON (pg_type.oid = multirange.rngmultitypid)`)
|
||||
}
|
||||
|
||||
parts = append(parts, `
|
||||
LEFT OUTER JOIN composite USING (oid)
|
||||
LEFT OUTER JOIN pg_namespace ON (pg_type.typnamespace = pg_namespace.oid)
|
||||
WHERE NOT (typtype = 'b' AND typelem = 0)`)
|
||||
parts = append(parts, `
|
||||
GROUP BY typname, pg_namespace.nspname, typtype, typbasetype, typelem, pg_type.oid, pg_range.rngsubtype,`)
|
||||
if supportsMultirange {
|
||||
parts = append(parts, `
|
||||
multirange.rngtypid,`)
|
||||
}
|
||||
parts = append(parts, `
|
||||
attnames, atttypids
|
||||
ORDER BY MAX(depth) desc, typname;`)
|
||||
return strings.Join(parts, "")
|
||||
}
|
||||
|
||||
type derivedTypeInfo struct {
|
||||
Oid, Typbasetype, Typelem, Rngsubtype, Rngtypid uint32
|
||||
TypeName, Typtype, NspName string
|
||||
Attnames []string
|
||||
Atttypids []uint32
|
||||
}
|
||||
|
||||
// LoadTypes performs a single (complex) query, returning all the required
|
||||
// information to register the named types, as well as any other types directly
|
||||
// or indirectly required to complete the registration.
|
||||
// The result of this call can be passed into RegisterTypes to complete the process.
|
||||
func (c *Conn) LoadTypes(ctx context.Context, typeNames []string) ([]*pgtype.Type, error) {
|
||||
m := c.TypeMap()
|
||||
if len(typeNames) == 0 {
|
||||
return nil, fmt.Errorf("No type names were supplied.")
|
||||
}
|
||||
|
||||
// Disregard server version errors. This will result in
|
||||
// the SQL not support recent structures such as multirange
|
||||
serverVersion, _ := serverVersion(c)
|
||||
sql := buildLoadDerivedTypesSQL(serverVersion, typeNames)
|
||||
rows, err := c.Query(ctx, sql, QueryExecModeSimpleProtocol, typeNames)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("While generating load types query: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
result := make([]*pgtype.Type, 0, 100)
|
||||
for rows.Next() {
|
||||
ti := derivedTypeInfo{}
|
||||
err = rows.Scan(&ti.TypeName, &ti.NspName, &ti.Typtype, &ti.Typbasetype, &ti.Typelem, &ti.Oid, &ti.Rngtypid, &ti.Rngsubtype, &ti.Attnames, &ti.Atttypids)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("While scanning type information: %w", err)
|
||||
}
|
||||
var type_ *pgtype.Type
|
||||
switch ti.Typtype {
|
||||
case "b": // array
|
||||
dt, ok := m.TypeForOID(ti.Typelem)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Array element OID %v not registered while loading pgtype %q", ti.Typelem, ti.TypeName)
|
||||
}
|
||||
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.ArrayCodec{ElementType: dt}}
|
||||
case "c": // composite
|
||||
var fields []pgtype.CompositeCodecField
|
||||
for i, fieldName := range ti.Attnames {
|
||||
dt, ok := m.TypeForOID(ti.Atttypids[i])
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Unknown field for composite type %q: field %q (OID %v) is not already registered.", ti.TypeName, fieldName, ti.Atttypids[i])
|
||||
}
|
||||
fields = append(fields, pgtype.CompositeCodecField{Name: fieldName, Type: dt})
|
||||
}
|
||||
|
||||
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.CompositeCodec{Fields: fields}}
|
||||
case "d": // domain
|
||||
dt, ok := m.TypeForOID(ti.Typbasetype)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Domain base type OID %v was not already registered, needed for %q", ti.Typbasetype, ti.TypeName)
|
||||
}
|
||||
|
||||
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: dt.Codec}
|
||||
case "e": // enum
|
||||
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.EnumCodec{}}
|
||||
case "r": // range
|
||||
dt, ok := m.TypeForOID(ti.Rngsubtype)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Range element OID %v was not already registered, needed for %q", ti.Rngsubtype, ti.TypeName)
|
||||
}
|
||||
|
||||
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.RangeCodec{ElementType: dt}}
|
||||
case "m": // multirange
|
||||
dt, ok := m.TypeForOID(ti.Rngtypid)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Multirange element OID %v was not already registered, needed for %q", ti.Rngtypid, ti.TypeName)
|
||||
}
|
||||
|
||||
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.MultirangeCodec{ElementType: dt}}
|
||||
default:
|
||||
return nil, fmt.Errorf("Unknown typtype %q was found while registering %q", ti.Typtype, ti.TypeName)
|
||||
}
|
||||
|
||||
// the type_ is imposible to be null
|
||||
m.RegisterType(type_)
|
||||
if ti.NspName != "" {
|
||||
nspType := &pgtype.Type{Name: ti.NspName + "." + type_.Name, OID: type_.OID, Codec: type_.Codec}
|
||||
m.RegisterType(nspType)
|
||||
result = append(result, nspType)
|
||||
}
|
||||
result = append(result, type_)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// serverVersion returns the postgresql server version.
|
||||
func serverVersion(c *Conn) (int64, error) {
|
||||
serverVersionStr := c.PgConn().ParameterStatus("server_version")
|
||||
serverVersionStr = regexp.MustCompile(`^[0-9]+`).FindString(serverVersionStr)
|
||||
// if not PostgreSQL do nothing
|
||||
if serverVersionStr == "" {
|
||||
return 0, fmt.Errorf("Cannot identify server version in %q", serverVersionStr)
|
||||
}
|
||||
|
||||
version, err := strconv.ParseInt(serverVersionStr, 10, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("postgres version parsing failed: %w", err)
|
||||
}
|
||||
return version, nil
|
||||
}
|
@ -1,40 +0,0 @@
|
||||
package pgx_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCompositeCodecTranscodeWithLoadTypes(t *testing.T) {
|
||||
skipCockroachDB(t, "Server does not support composite types (see https://github.com/cockroachdb/cockroach/issues/27792)")
|
||||
|
||||
defaultConnTestRunner.RunTest(context.Background(), t, func(ctx context.Context, t testing.TB, conn *pgx.Conn) {
|
||||
_, err := conn.Exec(ctx, `
|
||||
drop type if exists dtype_test;
|
||||
drop domain if exists anotheruint64;
|
||||
|
||||
create domain anotheruint64 as numeric(20,0);
|
||||
create type dtype_test as (
|
||||
a text,
|
||||
b int4,
|
||||
c anotheruint64,
|
||||
d anotheruint64[]
|
||||
);`)
|
||||
require.NoError(t, err)
|
||||
defer conn.Exec(ctx, "drop type dtype_test")
|
||||
defer conn.Exec(ctx, "drop domain anotheruint64")
|
||||
|
||||
types, err := conn.LoadTypes(ctx, []string{"dtype_test"})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, types, 6)
|
||||
require.Equal(t, types[0].Name, "public.anotheruint64")
|
||||
require.Equal(t, types[1].Name, "anotheruint64")
|
||||
require.Equal(t, types[2].Name, "public._anotheruint64")
|
||||
require.Equal(t, types[3].Name, "_anotheruint64")
|
||||
require.Equal(t, types[4].Name, "public.dtype_test")
|
||||
require.Equal(t, types[5].Name, "dtype_test")
|
||||
})
|
||||
}
|
269
doc.go
269
doc.go
@ -1,54 +1,68 @@
|
||||
// Package pgx is a PostgreSQL database driver.
|
||||
/*
|
||||
pgx provides a native PostgreSQL driver and can act as a database/sql driver. The native PostgreSQL interface is similar
|
||||
to the database/sql interface while providing better speed and access to PostgreSQL specific features. Use
|
||||
github.com/jackc/pgx/v5/stdlib to use pgx as a database/sql compatible driver. See that package's documentation for
|
||||
details.
|
||||
pgx provides lower level access to PostgreSQL than the standard database/sql. It remains as similar to the database/sql
|
||||
interface as possible while providing better speed and access to PostgreSQL specific features. Import
|
||||
github.com/jackc/pgx/v4/stdlib to use pgx as a database/sql compatible driver.
|
||||
|
||||
Establishing a Connection
|
||||
|
||||
The primary way of establishing a connection is with [pgx.Connect]:
|
||||
The primary way of establishing a connection is with `pgx.Connect`.
|
||||
|
||||
conn, err := pgx.Connect(context.Background(), os.Getenv("DATABASE_URL"))
|
||||
|
||||
The database connection string can be in URL or key/value format. Both PostgreSQL settings and pgx settings can be
|
||||
specified here. In addition, a config struct can be created by [ParseConfig] and modified before establishing the
|
||||
connection with [ConnectConfig] to configure settings such as tracing that cannot be configured with a connection
|
||||
string.
|
||||
The database connection string can be in URL or DSN format. Both PostgreSQL settings and pgx settings can be specified
|
||||
here. In addition, a config struct can be created by `ParseConfig` and modified before establishing the connection with
|
||||
`ConnectConfig`.
|
||||
|
||||
config, err := pgx.ParseConfig(os.Getenv("DATABASE_URL"))
|
||||
if err != nil {
|
||||
// ...
|
||||
}
|
||||
config.Logger = log15adapter.NewLogger(log.New("module", "pgx"))
|
||||
|
||||
conn, err := pgx.ConnectConfig(context.Background(), config)
|
||||
|
||||
Connection Pool
|
||||
|
||||
[*pgx.Conn] represents a single connection to the database and is not concurrency safe. Use package
|
||||
github.com/jackc/pgx/v5/pgxpool for a concurrency safe connection pool.
|
||||
`*pgx.Conn` represents a single connection to the database and is not concurrency safe. Use sub-package pgxpool for a
|
||||
concurrency safe connection pool.
|
||||
|
||||
Query Interface
|
||||
|
||||
pgx implements Query in the familiar database/sql style. However, pgx provides generic functions such as CollectRows and
|
||||
ForEachRow that are a simpler and safer way of processing rows than manually calling defer rows.Close(), rows.Next(),
|
||||
rows.Scan, and rows.Err().
|
||||
pgx implements Query and Scan in the familiar database/sql style.
|
||||
|
||||
CollectRows can be used collect all returned rows into a slice.
|
||||
var sum int32
|
||||
|
||||
rows, _ := conn.Query(context.Background(), "select generate_series(1,$1)", 5)
|
||||
numbers, err := pgx.CollectRows(rows, pgx.RowTo[int32])
|
||||
// Send the query to the server. The returned rows MUST be closed
|
||||
// before conn can be used again.
|
||||
rows, err := conn.Query(context.Background(), "select generate_series(1,$1)", 10)
|
||||
if err != nil {
|
||||
return err
|
||||
return err
|
||||
}
|
||||
// numbers => [1 2 3 4 5]
|
||||
|
||||
ForEachRow can be used to execute a callback function for every row. This is often easier than iterating over rows
|
||||
directly.
|
||||
// rows.Close is called by rows.Next when all rows are read
|
||||
// or an error occurs in Next or Scan. So it may optionally be
|
||||
// omitted if nothing in the rows.Next loop can panic. It is
|
||||
// safe to close rows multiple times.
|
||||
defer rows.Close()
|
||||
|
||||
var sum, n int32
|
||||
rows, _ := conn.Query(context.Background(), "select generate_series(1,$1)", 10)
|
||||
_, err := pgx.ForEachRow(rows, []any{&n}, func() error {
|
||||
sum += n
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
// Iterate through the result set
|
||||
for rows.Next() {
|
||||
var n int32
|
||||
err = rows.Scan(&n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sum += n
|
||||
}
|
||||
|
||||
// Any errors encountered by rows.Next or rows.Scan will be returned here
|
||||
if rows.Err() != nil {
|
||||
return rows.Err()
|
||||
}
|
||||
|
||||
// No errors found - do something with sum
|
||||
|
||||
pgx also implements QueryRow in the same style as database/sql.
|
||||
|
||||
var name string
|
||||
@ -68,11 +82,148 @@ Use Exec to execute a query that does not return a result set.
|
||||
return errors.New("No row found to delete")
|
||||
}
|
||||
|
||||
PostgreSQL Data Types
|
||||
QueryFunc can be used to execute a callback function for every row. This is often easier to use than Query.
|
||||
|
||||
pgx uses the pgtype package to converting Go values to and from PostgreSQL values. It supports many PostgreSQL types
|
||||
directly and is customizable and extendable. User defined data types such as enums, domains, and composite types may
|
||||
require type registration. See that package's documentation for details.
|
||||
var sum, n int32
|
||||
_, err = conn.QueryFunc(
|
||||
context.Background(),
|
||||
"select generate_series(1,$1)",
|
||||
[]interface{}{10},
|
||||
[]interface{}{&n},
|
||||
func(pgx.QueryFuncRow) error {
|
||||
sum += n
|
||||
return nil
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
Base Type Mapping
|
||||
|
||||
pgx maps between all common base types directly between Go and PostgreSQL. In particular:
|
||||
|
||||
Go PostgreSQL
|
||||
-----------------------
|
||||
string varchar
|
||||
text
|
||||
|
||||
// Integers are automatically be converted to any other integer type if
|
||||
// it can be done without overflow or underflow.
|
||||
int8
|
||||
int16 smallint
|
||||
int32 int
|
||||
int64 bigint
|
||||
int
|
||||
uint8
|
||||
uint16
|
||||
uint32
|
||||
uint64
|
||||
uint
|
||||
|
||||
// Floats are strict and do not automatically convert like integers.
|
||||
float32 float4
|
||||
float64 float8
|
||||
|
||||
time.Time date
|
||||
timestamp
|
||||
timestamptz
|
||||
|
||||
[]byte bytea
|
||||
|
||||
|
||||
Null Mapping
|
||||
|
||||
pgx can map nulls in two ways. The first is package pgtype provides types that have a data field and a status field.
|
||||
They work in a similar fashion to database/sql. The second is to use a pointer to a pointer.
|
||||
|
||||
var foo pgtype.Varchar
|
||||
var bar *string
|
||||
err := conn.QueryRow("select foo, bar from widgets where id=$1", 42).Scan(&foo, &bar)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
Array Mapping
|
||||
|
||||
pgx maps between int16, int32, int64, float32, float64, and string Go slices and the equivalent PostgreSQL array type.
|
||||
Go slices of native types do not support nulls, so if a PostgreSQL array that contains a null is read into a native Go
|
||||
slice an error will occur. The pgtype package includes many more array types for PostgreSQL types that do not directly
|
||||
map to native Go types.
|
||||
|
||||
JSON and JSONB Mapping
|
||||
|
||||
pgx includes built-in support to marshal and unmarshal between Go types and the PostgreSQL JSON and JSONB.
|
||||
|
||||
Inet and CIDR Mapping
|
||||
|
||||
pgx encodes from net.IPNet to and from inet and cidr PostgreSQL types. In addition, as a convenience pgx will encode
|
||||
from a net.IP; it will assume a /32 netmask for IPv4 and a /128 for IPv6.
|
||||
|
||||
Custom Type Support
|
||||
|
||||
pgx includes support for the common data types like integers, floats, strings, dates, and times that have direct
|
||||
mappings between Go and SQL. In addition, pgx uses the github.com/jackc/pgtype library to support more types. See
|
||||
documention for that library for instructions on how to implement custom types.
|
||||
|
||||
See example_custom_type_test.go for an example of a custom type for the PostgreSQL point type.
|
||||
|
||||
pgx also includes support for custom types implementing the database/sql.Scanner and database/sql/driver.Valuer
|
||||
interfaces.
|
||||
|
||||
If pgx does cannot natively encode a type and that type is a renamed type (e.g. type MyTime time.Time) pgx will attempt
|
||||
to encode the underlying type. While this is usually desired behavior it can produce surprising behavior if one the
|
||||
underlying type and the renamed type each implement database/sql interfaces and the other implements pgx interfaces. It
|
||||
is recommended that this situation be avoided by implementing pgx interfaces on the renamed type.
|
||||
|
||||
Composite types and row values
|
||||
|
||||
Row values and composite types are represented as pgtype.Record (https://pkg.go.dev/github.com/jackc/pgtype?tab=doc#Record).
|
||||
It is possible to get values of your custom type by implementing DecodeBinary interface. Decoding into
|
||||
pgtype.Record first can simplify process by avoiding dealing with raw protocol directly.
|
||||
|
||||
For example:
|
||||
|
||||
type MyType struct {
|
||||
a int // NULL will cause decoding error
|
||||
b *string // there can be NULL in this position in SQL
|
||||
}
|
||||
|
||||
func (t *MyType) DecodeBinary(ci *pgtype.ConnInfo, src []byte) error {
|
||||
r := pgtype.Record{
|
||||
Fields: []pgtype.Value{&pgtype.Int4{}, &pgtype.Text{}},
|
||||
}
|
||||
|
||||
if err := r.DecodeBinary(ci, src); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if r.Status != pgtype.Present {
|
||||
return errors.New("BUG: decoding should not be called on NULL value")
|
||||
}
|
||||
|
||||
a := r.Fields[0].(*pgtype.Int4)
|
||||
b := r.Fields[1].(*pgtype.Text)
|
||||
|
||||
// type compatibility is checked by AssignTo
|
||||
// only lossless assignments will succeed
|
||||
if err := a.AssignTo(&t.a); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// AssignTo also deals with null value handling
|
||||
if err := b.AssignTo(&t.b); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
result := MyType{}
|
||||
err := conn.QueryRow(context.Background(), "select row(1, 'foo'::text)", pgx.QueryResultFormats{pgx.BinaryFormatCode}).Scan(&r)
|
||||
|
||||
Raw Bytes Mapping
|
||||
|
||||
[]byte passed as arguments to Query, QueryRow, and Exec are passed unmodified to PostgreSQL.
|
||||
|
||||
Transactions
|
||||
|
||||
@ -99,19 +250,7 @@ Transactions are started by calling Begin.
|
||||
The Tx returned from Begin also implements the Begin method. This can be used to implement pseudo nested transactions.
|
||||
These are internally implemented with savepoints.
|
||||
|
||||
Use BeginTx to control the transaction mode. BeginTx also can be used to ensure a new transaction is created instead of
|
||||
a pseudo nested transaction.
|
||||
|
||||
BeginFunc and BeginTxFunc are functions that begin a transaction, execute a function, and commit or rollback the
|
||||
transaction depending on the return value of the function. These can be simpler and less error prone to use.
|
||||
|
||||
err = pgx.BeginFunc(context.Background(), conn, func(tx pgx.Tx) error {
|
||||
_, err := tx.Exec(context.Background(), "insert into foo(id) values (1)")
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
Use BeginTx to control the transaction mode.
|
||||
|
||||
Prepared Statements
|
||||
|
||||
@ -123,10 +262,10 @@ for information on how to customize or disable the statement cache.
|
||||
Copy Protocol
|
||||
|
||||
Use CopyFrom to efficiently insert multiple rows at a time using the PostgreSQL copy protocol. CopyFrom accepts a
|
||||
CopyFromSource interface. If the data is already in a [][]any use CopyFromRows to wrap it in a CopyFromSource interface.
|
||||
Or implement CopyFromSource to avoid buffering the entire data set in memory.
|
||||
CopyFromSource interface. If the data is already in a [][]interface{} use CopyFromRows to wrap it in a CopyFromSource
|
||||
interface. Or implement CopyFromSource to avoid buffering the entire data set in memory.
|
||||
|
||||
rows := [][]any{
|
||||
rows := [][]interface{}{
|
||||
{"John", "Smith", int32(36)},
|
||||
{"Jane", "Doe", int32(29)},
|
||||
}
|
||||
@ -149,8 +288,8 @@ When you already have a typed array using CopyFromSlice can be more convenient.
|
||||
context.Background(),
|
||||
pgx.Identifier{"people"},
|
||||
[]string{"first_name", "last_name", "age"},
|
||||
pgx.CopyFromSlice(len(rows), func(i int) ([]any, error) {
|
||||
return []any{rows[i].FirstName, rows[i].LastName, rows[i].Age}, nil
|
||||
pgx.CopyFromSlice(len(rows), func(i int) ([]interface{}, error) {
|
||||
return []interface{user.FirstName, user.LastName, user.Age}, nil
|
||||
}),
|
||||
)
|
||||
|
||||
@ -159,36 +298,32 @@ CopyFrom can be faster than an insert with as few as 5 rows.
|
||||
Listen and Notify
|
||||
|
||||
pgx can listen to the PostgreSQL notification system with the `Conn.WaitForNotification` method. It blocks until a
|
||||
notification is received or the context is canceled.
|
||||
context is received or the context is canceled.
|
||||
|
||||
_, err := conn.Exec(context.Background(), "listen channelname")
|
||||
if err != nil {
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
notification, err := conn.WaitForNotification(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
if notification, err := conn.WaitForNotification(context.Background()); err != nil {
|
||||
// do something with notification
|
||||
}
|
||||
// do something with notification
|
||||
|
||||
|
||||
Tracing and Logging
|
||||
Logging
|
||||
|
||||
pgx supports tracing by setting ConnConfig.Tracer. To combine several tracers you can use the multitracer.Tracer.
|
||||
|
||||
In addition, the tracelog package provides the TraceLog type which lets a traditional logger act as a Tracer.
|
||||
|
||||
For debug tracing of the actual PostgreSQL wire protocol messages see github.com/jackc/pgx/v5/pgproto3.
|
||||
pgx defines a simple logger interface. Connections optionally accept a logger that satisfies this interface. Set
|
||||
LogLevel to control logging verbosity. Adapters for github.com/inconshreveable/log15, github.com/sirupsen/logrus,
|
||||
go.uber.org/zap, github.com/rs/zerolog, and the testing log are provided in the log directory.
|
||||
|
||||
Lower Level PostgreSQL Functionality
|
||||
|
||||
github.com/jackc/pgx/v5/pgconn contains a lower level PostgreSQL driver roughly at the level of libpq. pgx.Conn is
|
||||
implemented on top of pgconn. The Conn.PgConn() method can be used to access this lower layer.
|
||||
pgx is implemented on top of github.com/jackc/pgconn a lower level PostgreSQL driver. The Conn.PgConn() method can be
|
||||
used to access this lower layer.
|
||||
|
||||
PgBouncer
|
||||
|
||||
By default pgx automatically uses prepared statements. Prepared statements are incompatible with PgBouncer. This can be
|
||||
disabled by setting a different QueryExecMode in ConnConfig.DefaultQueryExecMode.
|
||||
pgx is compatible with PgBouncer in two modes. One is when the connection has a statement cache in "describe" mode. The
|
||||
other is when the connection is using the simple protocol. This can be set with the PreferSimpleProtocol config option.
|
||||
*/
|
||||
package pgx
|
||||
|
107
example_custom_type_test.go
Normal file
107
example_custom_type_test.go
Normal file
@ -0,0 +1,107 @@
|
||||
package pgx_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
"github.com/jackc/pgtype"
|
||||
"github.com/jackc/pgx/v4"
|
||||
errors "golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
var pointRegexp *regexp.Regexp = regexp.MustCompile(`^\((.*),(.*)\)$`)
|
||||
|
||||
// Point represents a point that may be null.
|
||||
type Point struct {
|
||||
X, Y float64 // Coordinates of point
|
||||
Status pgtype.Status
|
||||
}
|
||||
|
||||
func (dst *Point) Set(src interface{}) error {
|
||||
return errors.Errorf("cannot convert %v to Point", src)
|
||||
}
|
||||
|
||||
func (dst *Point) Get() interface{} {
|
||||
switch dst.Status {
|
||||
case pgtype.Present:
|
||||
return dst
|
||||
case pgtype.Null:
|
||||
return nil
|
||||
default:
|
||||
return dst.Status
|
||||
}
|
||||
}
|
||||
|
||||
func (src *Point) AssignTo(dst interface{}) error {
|
||||
return errors.Errorf("cannot assign %v to %T", src, dst)
|
||||
}
|
||||
|
||||
func (dst *Point) DecodeText(ci *pgtype.ConnInfo, src []byte) error {
|
||||
if src == nil {
|
||||
*dst = Point{Status: pgtype.Null}
|
||||
return nil
|
||||
}
|
||||
|
||||
s := string(src)
|
||||
match := pointRegexp.FindStringSubmatch(s)
|
||||
if match == nil {
|
||||
return errors.Errorf("Received invalid point: %v", s)
|
||||
}
|
||||
|
||||
x, err := strconv.ParseFloat(match[1], 64)
|
||||
if err != nil {
|
||||
return errors.Errorf("Received invalid point: %v", s)
|
||||
}
|
||||
y, err := strconv.ParseFloat(match[2], 64)
|
||||
if err != nil {
|
||||
return errors.Errorf("Received invalid point: %v", s)
|
||||
}
|
||||
|
||||
*dst = Point{X: x, Y: y, Status: pgtype.Present}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (src *Point) String() string {
|
||||
if src.Status == pgtype.Null {
|
||||
return "null point"
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%.1f, %.1f", src.X, src.Y)
|
||||
}
|
||||
|
||||
func Example_CustomType() {
|
||||
conn, err := pgx.Connect(context.Background(), os.Getenv("PGX_TEST_DATABASE"))
|
||||
if err != nil {
|
||||
fmt.Printf("Unable to establish connection: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Override registered handler for point
|
||||
conn.ConnInfo().RegisterDataType(pgtype.DataType{
|
||||
Value: &Point{},
|
||||
Name: "point",
|
||||
OID: 600,
|
||||
})
|
||||
|
||||
p := &Point{}
|
||||
err = conn.QueryRow(context.Background(), "select null::point").Scan(p)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
fmt.Println(p)
|
||||
|
||||
err = conn.QueryRow(context.Background(), "select point(1.5,2.5)").Scan(p)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
fmt.Println(p)
|
||||
// Output:
|
||||
// null point
|
||||
// 1.5, 2.5
|
||||
}
|
@ -1,14 +1,14 @@
|
||||
package pgtype_test
|
||||
package pgx_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/jackc/pgx/v4"
|
||||
)
|
||||
|
||||
func Example_json() {
|
||||
func Example_JSON() {
|
||||
conn, err := pgx.Connect(context.Background(), os.Getenv("PGX_TEST_DATABASE"))
|
||||
if err != nil {
|
||||
fmt.Printf("Unable to establish connection: %v", err)
|
@ -6,14 +6,14 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgxpool"
|
||||
"github.com/jackc/pgx/v4/pgxpool"
|
||||
)
|
||||
|
||||
var pool *pgxpool.Pool
|
||||
|
||||
func main() {
|
||||
var err error
|
||||
pool, err = pgxpool.New(context.Background(), os.Getenv("DATABASE_URL"))
|
||||
pool, err = pgxpool.Connect(context.Background(), os.Getenv("DATABASE_URL"))
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "Unable to connect to database:", err)
|
||||
os.Exit(1)
|
||||
|
@ -6,7 +6,7 @@ import (
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/jackc/pgx/v4"
|
||||
)
|
||||
|
||||
var conn *pgx.Conn
|
||||
|
@ -2,13 +2,14 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"log"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/jackc/pgx/v5/pgxpool"
|
||||
"github.com/jackc/pgx/v4"
|
||||
"github.com/jackc/pgx/v4/log/log15adapter"
|
||||
"github.com/jackc/pgx/v4/pgxpool"
|
||||
log "gopkg.in/inconshreveable/log15.v2"
|
||||
)
|
||||
|
||||
var db *pgxpool.Pool
|
||||
@ -29,7 +30,7 @@ func getUrlHandler(w http.ResponseWriter, req *http.Request) {
|
||||
func putUrlHandler(w http.ResponseWriter, req *http.Request) {
|
||||
id := req.URL.Path
|
||||
var url string
|
||||
if body, err := io.ReadAll(req.Body); err == nil {
|
||||
if body, err := ioutil.ReadAll(req.Body); err == nil {
|
||||
url = string(body)
|
||||
} else {
|
||||
http.Error(w, "Internal server error", http.StatusInternalServerError)
|
||||
@ -70,21 +71,28 @@ func urlHandler(w http.ResponseWriter, req *http.Request) {
|
||||
}
|
||||
|
||||
func main() {
|
||||
logger := log15adapter.NewLogger(log.New("module", "pgx"))
|
||||
|
||||
poolConfig, err := pgxpool.ParseConfig(os.Getenv("DATABASE_URL"))
|
||||
if err != nil {
|
||||
log.Fatalln("Unable to parse DATABASE_URL:", err)
|
||||
log.Crit("Unable to parse DATABASE_URL", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
db, err = pgxpool.NewWithConfig(context.Background(), poolConfig)
|
||||
poolConfig.ConnConfig.Logger = logger
|
||||
|
||||
db, err = pgxpool.ConnectConfig(context.Background(), poolConfig)
|
||||
if err != nil {
|
||||
log.Fatalln("Unable to create connection pool:", err)
|
||||
log.Crit("Unable to create connection pool", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
http.HandleFunc("/", urlHandler)
|
||||
|
||||
log.Println("Starting URL shortener on localhost:8080")
|
||||
log.Info("Starting URL shortener on localhost:8080")
|
||||
err = http.ListenAndServe("localhost:8080", nil)
|
||||
if err != nil {
|
||||
log.Fatalln("Unable to start web server:", err)
|
||||
log.Crit("Unable to start web server", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
@ -1,146 +1,168 @@
|
||||
package pgx
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
"github.com/jackc/pgx/v5/pgtype"
|
||||
"github.com/jackc/pgtype"
|
||||
)
|
||||
|
||||
// ExtendedQueryBuilder is used to choose the parameter formats, to format the parameters and to choose the result
|
||||
// formats for an extended query.
|
||||
type ExtendedQueryBuilder struct {
|
||||
ParamValues [][]byte
|
||||
type extendedQueryBuilder struct {
|
||||
paramValues [][]byte
|
||||
paramValueBytes []byte
|
||||
ParamFormats []int16
|
||||
ResultFormats []int16
|
||||
paramFormats []int16
|
||||
resultFormats []int16
|
||||
|
||||
resetCount int
|
||||
}
|
||||
|
||||
// Build sets ParamValues, ParamFormats, and ResultFormats for use with *PgConn.ExecParams or *PgConn.ExecPrepared. If
|
||||
// sd is nil then QueryExecModeExec behavior will be used.
|
||||
func (eqb *ExtendedQueryBuilder) Build(m *pgtype.Map, sd *pgconn.StatementDescription, args []any) error {
|
||||
eqb.reset()
|
||||
func (eqb *extendedQueryBuilder) AppendParam(ci *pgtype.ConnInfo, oid uint32, arg interface{}) error {
|
||||
f := chooseParameterFormatCode(ci, oid, arg)
|
||||
eqb.paramFormats = append(eqb.paramFormats, f)
|
||||
|
||||
if sd == nil {
|
||||
for i := range args {
|
||||
err := eqb.appendParam(m, 0, pgtype.TextFormatCode, args[i])
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to encode args[%d]: %w", i, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(sd.ParamOIDs) != len(args) {
|
||||
return fmt.Errorf("mismatched param and argument count")
|
||||
}
|
||||
|
||||
for i := range args {
|
||||
err := eqb.appendParam(m, sd.ParamOIDs[i], -1, args[i])
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to encode args[%d]: %w", i, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for i := range sd.Fields {
|
||||
eqb.appendResultFormat(m.FormatCodeForOID(sd.Fields[i].DataTypeOID))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// appendParam appends a parameter to the query. format may be -1 to automatically choose the format. If arg is nil it
|
||||
// must be an untyped nil.
|
||||
func (eqb *ExtendedQueryBuilder) appendParam(m *pgtype.Map, oid uint32, format int16, arg any) error {
|
||||
if format == -1 {
|
||||
preferredFormat := eqb.chooseParameterFormatCode(m, oid, arg)
|
||||
preferredErr := eqb.appendParam(m, oid, preferredFormat, arg)
|
||||
if preferredErr == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var otherFormat int16
|
||||
if preferredFormat == TextFormatCode {
|
||||
otherFormat = BinaryFormatCode
|
||||
} else {
|
||||
otherFormat = TextFormatCode
|
||||
}
|
||||
|
||||
otherErr := eqb.appendParam(m, oid, otherFormat, arg)
|
||||
if otherErr == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return preferredErr // return the error from the preferred format
|
||||
}
|
||||
|
||||
v, err := eqb.encodeExtendedParamValue(m, oid, format, arg)
|
||||
v, err := eqb.encodeExtendedParamValue(ci, oid, f, arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
eqb.ParamFormats = append(eqb.ParamFormats, format)
|
||||
eqb.ParamValues = append(eqb.ParamValues, v)
|
||||
eqb.paramValues = append(eqb.paramValues, v)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// appendResultFormat appends a result format to the query.
|
||||
func (eqb *ExtendedQueryBuilder) appendResultFormat(format int16) {
|
||||
eqb.ResultFormats = append(eqb.ResultFormats, format)
|
||||
func (eqb *extendedQueryBuilder) AppendResultFormat(f int16) {
|
||||
eqb.resultFormats = append(eqb.resultFormats, f)
|
||||
}
|
||||
|
||||
// reset readies eqb to build another query.
|
||||
func (eqb *ExtendedQueryBuilder) reset() {
|
||||
eqb.ParamValues = eqb.ParamValues[0:0]
|
||||
func (eqb *extendedQueryBuilder) Reset() {
|
||||
eqb.paramValues = eqb.paramValues[0:0]
|
||||
eqb.paramValueBytes = eqb.paramValueBytes[0:0]
|
||||
eqb.ParamFormats = eqb.ParamFormats[0:0]
|
||||
eqb.ResultFormats = eqb.ResultFormats[0:0]
|
||||
eqb.paramFormats = eqb.paramFormats[0:0]
|
||||
eqb.resultFormats = eqb.resultFormats[0:0]
|
||||
|
||||
if cap(eqb.ParamValues) > 64 {
|
||||
eqb.ParamValues = make([][]byte, 0, 64)
|
||||
eqb.resetCount++
|
||||
|
||||
// Every so often shrink our reserved memory if it is abnormally high
|
||||
if eqb.resetCount%128 == 0 {
|
||||
if cap(eqb.paramValues) > 64 {
|
||||
eqb.paramValues = make([][]byte, 0, cap(eqb.paramValues)/2)
|
||||
}
|
||||
|
||||
if cap(eqb.paramValueBytes) > 256 {
|
||||
eqb.paramValueBytes = make([]byte, 0, cap(eqb.paramValueBytes)/2)
|
||||
}
|
||||
|
||||
if cap(eqb.paramFormats) > 64 {
|
||||
eqb.paramFormats = make([]int16, 0, cap(eqb.paramFormats)/2)
|
||||
}
|
||||
if cap(eqb.resultFormats) > 64 {
|
||||
eqb.resultFormats = make([]int16, 0, cap(eqb.resultFormats)/2)
|
||||
}
|
||||
}
|
||||
|
||||
if cap(eqb.paramValueBytes) > 256 {
|
||||
eqb.paramValueBytes = make([]byte, 0, 256)
|
||||
}
|
||||
|
||||
if cap(eqb.ParamFormats) > 64 {
|
||||
eqb.ParamFormats = make([]int16, 0, 64)
|
||||
}
|
||||
if cap(eqb.ResultFormats) > 64 {
|
||||
eqb.ResultFormats = make([]int16, 0, 64)
|
||||
}
|
||||
}
|
||||
|
||||
func (eqb *ExtendedQueryBuilder) encodeExtendedParamValue(m *pgtype.Map, oid uint32, formatCode int16, arg any) ([]byte, error) {
|
||||
func (eqb *extendedQueryBuilder) encodeExtendedParamValue(ci *pgtype.ConnInfo, oid uint32, formatCode int16, arg interface{}) ([]byte, error) {
|
||||
if arg == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
refVal := reflect.ValueOf(arg)
|
||||
argIsPtr := refVal.Kind() == reflect.Ptr
|
||||
|
||||
if argIsPtr && refVal.IsNil() {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if eqb.paramValueBytes == nil {
|
||||
eqb.paramValueBytes = make([]byte, 0, 128)
|
||||
}
|
||||
|
||||
var err error
|
||||
var buf []byte
|
||||
pos := len(eqb.paramValueBytes)
|
||||
|
||||
buf, err := m.Encode(oid, formatCode, arg, eqb.paramValueBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if buf == nil {
|
||||
return nil, nil
|
||||
}
|
||||
eqb.paramValueBytes = buf
|
||||
return eqb.paramValueBytes[pos:], nil
|
||||
}
|
||||
|
||||
// chooseParameterFormatCode determines the correct format code for an
|
||||
// argument to a prepared statement. It defaults to TextFormatCode if no
|
||||
// determination can be made.
|
||||
func (eqb *ExtendedQueryBuilder) chooseParameterFormatCode(m *pgtype.Map, oid uint32, arg any) int16 {
|
||||
switch arg.(type) {
|
||||
case string, *string:
|
||||
return TextFormatCode
|
||||
if arg, ok := arg.(string); ok {
|
||||
return []byte(arg), nil
|
||||
}
|
||||
|
||||
return m.FormatCodeForOID(oid)
|
||||
if formatCode == TextFormatCode {
|
||||
if arg, ok := arg.(pgtype.TextEncoder); ok {
|
||||
buf, err = arg.EncodeText(ci, eqb.paramValueBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if buf == nil {
|
||||
return nil, nil
|
||||
}
|
||||
eqb.paramValueBytes = buf
|
||||
return eqb.paramValueBytes[pos:], nil
|
||||
}
|
||||
} else if formatCode == BinaryFormatCode {
|
||||
if arg, ok := arg.(pgtype.BinaryEncoder); ok {
|
||||
buf, err = arg.EncodeBinary(ci, eqb.paramValueBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if buf == nil {
|
||||
return nil, nil
|
||||
}
|
||||
eqb.paramValueBytes = buf
|
||||
return eqb.paramValueBytes[pos:], nil
|
||||
}
|
||||
}
|
||||
|
||||
if argIsPtr {
|
||||
// We have already checked that arg is not pointing to nil,
|
||||
// so it is safe to dereference here.
|
||||
arg = refVal.Elem().Interface()
|
||||
return eqb.encodeExtendedParamValue(ci, oid, formatCode, arg)
|
||||
}
|
||||
|
||||
if dt, ok := ci.DataTypeForOID(oid); ok {
|
||||
value := dt.Value
|
||||
err := value.Set(arg)
|
||||
if err != nil {
|
||||
{
|
||||
if arg, ok := arg.(driver.Valuer); ok {
|
||||
v, err := callValuerValue(arg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return eqb.encodeExtendedParamValue(ci, oid, formatCode, v)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return eqb.encodeExtendedParamValue(ci, oid, formatCode, value)
|
||||
}
|
||||
|
||||
// There is no data type registered for the destination OID, but maybe there is data type registered for the arg
|
||||
// type. If so use it's text encoder (if available).
|
||||
if dt, ok := ci.DataTypeForValue(arg); ok {
|
||||
value := dt.Value
|
||||
if textEncoder, ok := value.(pgtype.TextEncoder); ok {
|
||||
err := value.Set(arg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf, err = textEncoder.EncodeText(ci, eqb.paramValueBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if buf == nil {
|
||||
return nil, nil
|
||||
}
|
||||
eqb.paramValueBytes = buf
|
||||
return eqb.paramValueBytes[pos:], nil
|
||||
}
|
||||
}
|
||||
|
||||
if strippedArg, ok := stripNamedType(&refVal); ok {
|
||||
return eqb.encodeExtendedParamValue(ci, oid, formatCode, strippedArg)
|
||||
}
|
||||
return nil, SerializationError(fmt.Sprintf("Cannot encode %T into oid %v - %T must implement Encoder or be converted to a string", arg, oid, arg))
|
||||
}
|
||||
|
34
go.mod
34
go.mod
@ -1,21 +1,21 @@
|
||||
module github.com/jackc/pgx/v5
|
||||
module github.com/jackc/pgx/v4
|
||||
|
||||
go 1.23.0
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/jackc/pgpassfile v1.0.0
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761
|
||||
github.com/jackc/puddle/v2 v2.2.2
|
||||
github.com/stretchr/testify v1.8.1
|
||||
golang.org/x/crypto v0.37.0
|
||||
golang.org/x/sync v0.13.0
|
||||
golang.org/x/text v0.24.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/kr/pretty v0.3.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
github.com/cockroachdb/apd v1.1.0
|
||||
github.com/gofrs/uuid v3.2.0+incompatible
|
||||
github.com/jackc/pgconn v1.8.0
|
||||
github.com/jackc/pgio v1.0.0
|
||||
github.com/jackc/pgproto3/v2 v2.0.6
|
||||
github.com/jackc/pgtype v1.6.2
|
||||
github.com/jackc/puddle v1.1.3
|
||||
github.com/mattn/go-colorable v0.1.6 // indirect
|
||||
github.com/rs/zerolog v1.15.0
|
||||
github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc
|
||||
github.com/sirupsen/logrus v1.4.2
|
||||
github.com/stretchr/testify v1.5.1
|
||||
go.uber.org/zap v1.10.0
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543
|
||||
gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec
|
||||
)
|
||||
|
295
go.sum
295
go.sum
@ -1,45 +1,280 @@
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
|
||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE=
|
||||
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0=
|
||||
github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
|
||||
github.com/jackc/chunkreader/v2 v2.0.0 h1:DUwgMQuuPnS0rhMXenUtZpqZqrR/30NWY+qQvTpSvEs=
|
||||
github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
|
||||
github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8=
|
||||
github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
|
||||
github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3 h1:ZFYpB74Kq8xE9gmfxCmXD6QxZ27ja+j3HwGFc+YurhQ=
|
||||
github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA=
|
||||
github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb h1:d6GP9szHvXVopAOAnZ7WhRnF3Xdxrylmm/9jnfmW4Ag=
|
||||
github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE=
|
||||
github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s=
|
||||
github.com/jackc/pgconn v1.4.0/go.mod h1:Y2O3ZDF0q4mMacyWV3AstPJpeHXWGEetiFttmq5lahk=
|
||||
github.com/jackc/pgconn v1.5.0 h1:oFSOilzIZkyg787M1fEmyMfOUUvwj0daqYMfaWwNL4o=
|
||||
github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI=
|
||||
github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853 h1:LRlrfJW9S99uiOCY8F/qLvX1yEY1TVAaCBHFb79yHBQ=
|
||||
github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI=
|
||||
github.com/jackc/pgconn v1.6.0 h1:8FiBxMxS/Z0eQ9BeE1HhL6pzPL1R5x+ZuQ+T86WgZ4I=
|
||||
github.com/jackc/pgconn v1.6.0/go.mod h1:yeseQo4xhQbgyJs2c87RAXOH2i624N0Fh1KSPJya7qo=
|
||||
github.com/jackc/pgconn v1.6.1 h1:lwofaXKPbIx6qEaK8mNm7uZuOwxHw+PnAFGDsDFpkRI=
|
||||
github.com/jackc/pgconn v1.6.1/go.mod h1:g8mKMqmSUO6AzAvha7vy07g1rbGOlc7iF0nU0ei83hc=
|
||||
github.com/jackc/pgconn v1.6.2 h1:ifRs/oHByR6NfEXfusvjoTqX/KcSvDYNFASoK/wXKfs=
|
||||
github.com/jackc/pgconn v1.6.2/go.mod h1:w2pne1C2tZgP+TvjqLpOigGzNqjBgQW9dUw/4Chex78=
|
||||
github.com/jackc/pgconn v1.6.3 h1:4Ks3RKvSvKPolXZsnLQTDAsokDhgID14Cv4ehECmzlY=
|
||||
github.com/jackc/pgconn v1.6.3/go.mod h1:w2pne1C2tZgP+TvjqLpOigGzNqjBgQW9dUw/4Chex78=
|
||||
github.com/jackc/pgconn v1.6.4 h1:S7T6cx5o2OqmxdHaXLH1ZeD1SbI8jBznyYE9Ec0RCQ8=
|
||||
github.com/jackc/pgconn v1.6.4/go.mod h1:w2pne1C2tZgP+TvjqLpOigGzNqjBgQW9dUw/4Chex78=
|
||||
github.com/jackc/pgconn v1.6.5-0.20200821030021-3eb5432c4738 h1:t/IRFEw2da5v6DroUIYPbEIDWxGGyvVLItoO7gOUBZM=
|
||||
github.com/jackc/pgconn v1.6.5-0.20200821030021-3eb5432c4738/go.mod h1:gm9GeeZiC+Ja7JV4fB/MNDeaOqsCrzFiZlLVhAompxk=
|
||||
github.com/jackc/pgconn v1.6.5-0.20200821030840-fdfc783345f6 h1:7f1RmJO6KZI4cskLrNHBd5CCLmLpIQ0BD75hsorvdz8=
|
||||
github.com/jackc/pgconn v1.6.5-0.20200821030840-fdfc783345f6/go.mod h1:gm9GeeZiC+Ja7JV4fB/MNDeaOqsCrzFiZlLVhAompxk=
|
||||
github.com/jackc/pgconn v1.6.5-0.20200905181414-0d4f029683fc h1:9ThyBXKdyBFN2Y1NSCPGCA0kdWCNpd9u4SKWwtr6GfU=
|
||||
github.com/jackc/pgconn v1.6.5-0.20200905181414-0d4f029683fc/go.mod h1:gm9GeeZiC+Ja7JV4fB/MNDeaOqsCrzFiZlLVhAompxk=
|
||||
github.com/jackc/pgconn v1.7.0 h1:pwjzcYyfmz/HQOQlENvG1OcDqauTGaqlVahq934F0/U=
|
||||
github.com/jackc/pgconn v1.7.0/go.mod h1:sF/lPpNEMEOp+IYhyQGdAvrG20gWf6A1tKlr0v7JMeA=
|
||||
github.com/jackc/pgconn v1.7.1 h1:Ii3hORkg9yTX+8etl2LtfFnL+YzmnR6VSLeTflQBkaQ=
|
||||
github.com/jackc/pgconn v1.7.1/go.mod h1:sF/lPpNEMEOp+IYhyQGdAvrG20gWf6A1tKlr0v7JMeA=
|
||||
github.com/jackc/pgconn v1.7.3-0.20201111215259-cba610c24526 h1:5u4fYBcaCLuQFvquOCBaT2a7KLbUGgNowbOLgVz6DWI=
|
||||
github.com/jackc/pgconn v1.7.3-0.20201111215259-cba610c24526/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o=
|
||||
github.com/jackc/pgconn v1.8.0 h1:FmjZ0rOyXTr1wfWs45i4a9vjnjWUAGpMuQLD9OSs+lw=
|
||||
github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o=
|
||||
github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=
|
||||
github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=
|
||||
github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2 h1:JVX6jT/XfzNqIjye4717ITLaNwV9mWbJx0dLCpcRzdA=
|
||||
github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=
|
||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
|
||||
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A=
|
||||
github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78=
|
||||
github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db h1:UpaKn/gYxzH6/zWyRQH1S260zvKqwJJ4h8+Kf09ooh0=
|
||||
github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA=
|
||||
github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711 h1:vZp4bYotXUkFx7JUSm7U8KV/7Q0AOdrQxxBBj0ZmZsg=
|
||||
github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg=
|
||||
github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
|
||||
github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29 h1:f2HwOeI1NIJyNFVVeh1gUISyt57iw/fmI/IXJfH3ATE=
|
||||
github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
|
||||
github.com/jackc/pgproto3/v2 v2.0.1 h1:Rdjp4NFjwHnEslx2b66FfCI2S0LhO4itac3hXz6WX9M=
|
||||
github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
||||
github.com/jackc/pgproto3/v2 v2.0.2 h1:q1Hsy66zh4vuNsajBUF2PNqfAMMfxU5mk594lPE9vjY=
|
||||
github.com/jackc/pgproto3/v2 v2.0.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
||||
github.com/jackc/pgproto3/v2 v2.0.4 h1:RHkX5ZUD9bl/kn0f9dYUWs1N7Nwvo1wwUYvKiR26Zco=
|
||||
github.com/jackc/pgproto3/v2 v2.0.4/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
||||
github.com/jackc/pgproto3/v2 v2.0.5 h1:NUbEWPmCQZbMmYlTjVoNPhc0CfnYyz2bfUAh6A5ZVJM=
|
||||
github.com/jackc/pgproto3/v2 v2.0.5/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
||||
github.com/jackc/pgproto3/v2 v2.0.6 h1:b1105ZGEMFe7aCvrT1Cca3VoVb4ZFMaFJLJcg/3zD+8=
|
||||
github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
||||
github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8 h1:Q3tB+ExeflWUW7AFcAhXqk40s9mnNYLk1nOkKNZ5GnU=
|
||||
github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
|
||||
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
|
||||
github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0 h1:mX93v750WifMD1htCt7vqeolcnpaG1gz8URVGjSzcUM=
|
||||
github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg=
|
||||
github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90 h1:aN5Vlwa2Q3QvxHDtZNi1x+GYkQyketBadMjtiug7AbM=
|
||||
github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc=
|
||||
github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59 h1:xOamcCJ9MFJTxR5bvw3ZXmiP8evQMohdt2VJ57C0W8Q=
|
||||
github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
|
||||
github.com/jackc/pgtype v1.2.0/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0=
|
||||
github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a h1:XUNeoL8E15IgWouQ8gfA6EPHOfTqVetdxBhAKMYKNGo=
|
||||
github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a/go.mod h1:vaogEUkALtxZMCH411K+tKzNpwzCKU+AnPzBKZ+I+Po=
|
||||
github.com/jackc/pgtype v1.3.1-0.20200513130519-238967ec4e4c h1:dkoQjaMKaLf/zTPpbgbZjnU1qN4bpDY+uLBSyiKh/ns=
|
||||
github.com/jackc/pgtype v1.3.1-0.20200513130519-238967ec4e4c/go.mod h1:f3c+S645fwV5ZqwPvLWZmmnAfPkmaTeLnXs0byan+aA=
|
||||
github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c h1:nXT9KGu1TBy57S0XcCGkTUgqOrvj3jY7Yb+kw5Q2HVc=
|
||||
github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/VzJ9/lxTO5R5sf80p0DiucVtN7ZxvaC4GmQ=
|
||||
github.com/jackc/pgtype v1.4.0 h1:pHQfb4jh9iKqHyxPthq1fr+0HwSNIl3btYPbw2m2lbM=
|
||||
github.com/jackc/pgtype v1.4.0/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig=
|
||||
github.com/jackc/pgtype v1.4.1 h1:8PRKqCS9Nt2FQbNegoEAIlY6r/DTP2aaXyh5bAEn89g=
|
||||
github.com/jackc/pgtype v1.4.1/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig=
|
||||
github.com/jackc/pgtype v1.4.2 h1:t+6LWm5eWPLX1H5Se702JSBcirq6uWa4jiG4wV1rAWY=
|
||||
github.com/jackc/pgtype v1.4.2/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig=
|
||||
github.com/jackc/pgtype v1.4.3-0.20200905161353-e7d2b057a716 h1:DrP52jA32liWkjCF/g3rYC1QjnRh6kvyXaZSevAtlqE=
|
||||
github.com/jackc/pgtype v1.4.3-0.20200905161353-e7d2b057a716/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig=
|
||||
github.com/jackc/pgtype v1.5.0 h1:jzBqRk2HFG2CV4AIwgCI2PwTgm6UUoCAK2ofHHRirtc=
|
||||
github.com/jackc/pgtype v1.5.0/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig=
|
||||
github.com/jackc/pgtype v1.6.0 h1:jNYK10ttF4cwp3pDk98juNUsU7T9eSlzN11bWR7McrI=
|
||||
github.com/jackc/pgtype v1.6.0/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig=
|
||||
github.com/jackc/pgtype v1.6.1 h1:CAtFD7TS95KrxRAh3bidgLwva48WYxk8YkbHZsSWfbI=
|
||||
github.com/jackc/pgtype v1.6.1/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig=
|
||||
github.com/jackc/pgtype v1.6.2 h1:b3pDeuhbbzBYcg5kwNmNDun4pFUD/0AAr1kLXZLeNt8=
|
||||
github.com/jackc/pgtype v1.6.2/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig=
|
||||
github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
|
||||
github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
|
||||
github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
|
||||
github.com/jackc/pgx/v4 v4.5.0/go.mod h1:EpAKPLdnTorwmPUUsqrPxy5fphV18j9q3wrfRXgo+kA=
|
||||
github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9/go.mod h1:t3/cdRQl6fOLDxqtlyhe9UWgfIi9R8+8v8GKV5TRA/o=
|
||||
github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9sW1JMiNn0kdYBaLelIhw7Pg4qd+Vk6tw7Hg=
|
||||
github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b h1:cIcUpcEP55F/QuZWEtXyqHoWk+IV4TBiLjtBkeq/Q1c=
|
||||
github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9 h1:KLBBPU++1T3DHtm1B1QaIHy80Vhu0wNMErIFCNgAL8Y=
|
||||
github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jackc/puddle v1.1.1 h1:PJAw7H/9hoWC4Kf3J8iNmL1SwA6E8vfsLqBiL+F6CtI=
|
||||
github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jackc/puddle v1.1.2-0.20200821025810-91d0159cc97a h1:ec2LCBkfN1pOq0PhLRH/QitjSXr9s2dnh0gOFyohxHM=
|
||||
github.com/jackc/puddle v1.1.2-0.20200821025810-91d0159cc97a/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jackc/puddle v1.1.2 h1:mpQEXihFnWGDy6X98EOTh81JYuxn7txby8ilJ3iIPGM=
|
||||
github.com/jackc/puddle v1.1.2/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jackc/puddle v1.1.3 h1:JnPg/5Q9xVJGfjsO5CPUOjnJps1JaRUm8I9FXVCFK94=
|
||||
github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
|
||||
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.1.0 h1:/5u4a+KGJptBRqGzPvYQL9p0d/tPR4S31+Tnzj9lEO4=
|
||||
github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU=
|
||||
github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg=
|
||||
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
|
||||
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE=
|
||||
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.7 h1:UvyT9uN+3r7yLEYSlJsbQGdsaB/a0DlgWP3pql6iwOc=
|
||||
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.9 h1:d5US/mDsogSGW37IV293h//ZFaeajb69h+EHFsv2xGg=
|
||||
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
||||
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||
github.com/rs/zerolog v1.13.0 h1:hSNcYHyxDWycfePW7pUI8swuFkcSMPKh3E63Pokg1Hk=
|
||||
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
|
||||
github.com/rs/zerolog v1.15.0 h1:uPRuwkWF4J6fGsJ2R0Gn2jB1EQiav9k3S6CSdygQJXY=
|
||||
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
|
||||
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||
github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc h1:jUIKcSPO9MoMJBbEoyE/RJoE8vz7Mb8AjvifMMwSyvY=
|
||||
github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||
github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
|
||||
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
|
||||
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
|
||||
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
|
||||
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
|
||||
go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
|
||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
|
||||
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||
go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o=
|
||||
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a h1:Igim7XhdOpBnWPuYJ70XcNpq8q3BCACtVgNfoJxOV7g=
|
||||
golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0=
|
||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59 h1:3zb4D3T4G8jdExgVU/95+vQXfpEPiMdCaZgmGVxjNHM=
|
||||
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e h1:nFYrTHrdrAOpShe27kaFHjsqYSEQ0KWqdWLu3xuZJts=
|
||||
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ=
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373 h1:PPwnA7z1Pjf7XYaBP9GL1VAMZmcIWyFz7QCMSIIa3Bg=
|
||||
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522 h1:bhOzK9QyoD0ogCnFro1m2mz41+Ib0oOhfJnBp5MR4K4=
|
||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec h1:RlWgLqCMMIYYEVcAR5MDsuHlVkaIPDAF+5Dehzg8L5A=
|
||||
gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
|
61
go_stdlib.go
Normal file
61
go_stdlib.go
Normal file
@ -0,0 +1,61 @@
|
||||
package pgx
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// This file contains code copied from the Go standard library due to the
|
||||
// required function not being public.
|
||||
|
||||
// Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// From database/sql/convert.go
|
||||
|
||||
var valuerReflectType = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
|
||||
|
||||
// callValuerValue returns vr.Value(), with one exception:
|
||||
// If vr.Value is an auto-generated method on a pointer type and the
|
||||
// pointer is nil, it would panic at runtime in the panicwrap
|
||||
// method. Treat it like nil instead.
|
||||
// Issue 8415.
|
||||
//
|
||||
// This is so people can implement driver.Value on value types and
|
||||
// still use nil pointers to those types to mean nil/NULL, just like
|
||||
// string/*string.
|
||||
//
|
||||
// This function is mirrored in the database/sql/driver package.
|
||||
func callValuerValue(vr driver.Valuer) (v driver.Value, err error) {
|
||||
if rv := reflect.ValueOf(vr); rv.Kind() == reflect.Ptr &&
|
||||
rv.IsNil() &&
|
||||
rv.Type().Elem().Implements(valuerReflectType) {
|
||||
return nil, nil
|
||||
}
|
||||
return vr.Value()
|
||||
}
|
@ -2,26 +2,52 @@ package pgx_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
"github.com/jackc/pgx/v5/pgxtest"
|
||||
"github.com/jackc/pgconn"
|
||||
"github.com/jackc/pgx/v4"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var defaultConnTestRunner pgxtest.ConnTestRunner
|
||||
func testWithAndWithoutPreferSimpleProtocol(t *testing.T, f func(t *testing.T, conn *pgx.Conn)) {
|
||||
t.Run("SimpleProto",
|
||||
func(t *testing.T) {
|
||||
config, err := pgx.ParseConfig(os.Getenv("PGX_TEST_DATABASE"))
|
||||
require.NoError(t, err)
|
||||
|
||||
func init() {
|
||||
defaultConnTestRunner = pgxtest.DefaultConnTestRunner()
|
||||
defaultConnTestRunner.CreateConfig = func(ctx context.Context, t testing.TB) *pgx.ConnConfig {
|
||||
config, err := pgx.ParseConfig(os.Getenv("PGX_TEST_DATABASE"))
|
||||
require.NoError(t, err)
|
||||
return config
|
||||
}
|
||||
config.PreferSimpleProtocol = true
|
||||
conn, err := pgx.ConnectConfig(context.Background(), config)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
err := conn.Close(context.Background())
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
f(t, conn)
|
||||
|
||||
ensureConnValid(t, conn)
|
||||
},
|
||||
)
|
||||
|
||||
t.Run("DefaultProto",
|
||||
func(t *testing.T) {
|
||||
config, err := pgx.ParseConfig(os.Getenv("PGX_TEST_DATABASE"))
|
||||
require.NoError(t, err)
|
||||
|
||||
conn, err := pgx.ConnectConfig(context.Background(), config)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
err := conn.Close(context.Background())
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
f(t, conn)
|
||||
|
||||
ensureConnValid(t, conn)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func mustConnectString(t testing.TB, connString string) *pgx.Conn {
|
||||
@ -53,7 +79,7 @@ func closeConn(t testing.TB, conn *pgx.Conn) {
|
||||
}
|
||||
}
|
||||
|
||||
func mustExec(t testing.TB, conn *pgx.Conn, sql string, arguments ...any) (commandTag pgconn.CommandTag) {
|
||||
func mustExec(t testing.TB, conn *pgx.Conn, sql string, arguments ...interface{}) (commandTag pgconn.CommandTag) {
|
||||
var err error
|
||||
if commandTag, err = conn.Exec(context.Background(), sql, arguments...); err != nil {
|
||||
t.Fatalf("Exec unexpectedly failed with %v: %v", sql, err)
|
||||
@ -62,7 +88,7 @@ func mustExec(t testing.TB, conn *pgx.Conn, sql string, arguments ...any) (comma
|
||||
}
|
||||
|
||||
// Do a simple query to ensure the connection is still usable
|
||||
func ensureConnValid(t testing.TB, conn *pgx.Conn) {
|
||||
func ensureConnValid(t *testing.T, conn *pgx.Conn) {
|
||||
var sum, rowCount int32
|
||||
|
||||
rows, err := conn.Query(context.Background(), "select generate_series(1,$1)", 10)
|
||||
@ -79,7 +105,7 @@ func ensureConnValid(t testing.TB, conn *pgx.Conn) {
|
||||
}
|
||||
|
||||
if rows.Err() != nil {
|
||||
t.Fatalf("conn.Query failed: %v", rows.Err())
|
||||
t.Fatalf("conn.Query failed: %v", err)
|
||||
}
|
||||
|
||||
if rowCount != 10 {
|
||||
@ -98,11 +124,13 @@ func assertConfigsEqual(t *testing.T, expected, actual *pgx.ConnConfig, testName
|
||||
return
|
||||
}
|
||||
|
||||
assert.Equalf(t, expected.Tracer, actual.Tracer, "%s - Tracer", testName)
|
||||
assert.Equalf(t, expected.Logger, actual.Logger, "%s - Logger", testName)
|
||||
assert.Equalf(t, expected.LogLevel, actual.LogLevel, "%s - LogLevel", testName)
|
||||
assert.Equalf(t, expected.ConnString(), actual.ConnString(), "%s - ConnString", testName)
|
||||
assert.Equalf(t, expected.StatementCacheCapacity, actual.StatementCacheCapacity, "%s - StatementCacheCapacity", testName)
|
||||
assert.Equalf(t, expected.DescriptionCacheCapacity, actual.DescriptionCacheCapacity, "%s - DescriptionCacheCapacity", testName)
|
||||
assert.Equalf(t, expected.DefaultQueryExecMode, actual.DefaultQueryExecMode, "%s - DefaultQueryExecMode", testName)
|
||||
// Can't test function equality, so just test that they are set or not.
|
||||
assert.Equalf(t, expected.BuildStatementCache == nil, actual.BuildStatementCache == nil, "%s - BuildStatementCache", testName)
|
||||
assert.Equalf(t, expected.PreferSimpleProtocol, actual.PreferSimpleProtocol, "%s - PreferSimpleProtocol", testName)
|
||||
|
||||
assert.Equalf(t, expected.Host, actual.Host, "%s - Host", testName)
|
||||
assert.Equalf(t, expected.Database, actual.Database, "%s - Database", testName)
|
||||
assert.Equalf(t, expected.Port, actual.Port, "%s - Port", testName)
|
||||
|
@ -1,70 +0,0 @@
|
||||
// Package iobufpool implements a global segregated-fit pool of buffers for IO.
|
||||
//
|
||||
// It uses *[]byte instead of []byte to avoid the sync.Pool allocation with Put. Unfortunately, using a pointer to avoid
|
||||
// an allocation is purposely not documented. https://github.com/golang/go/issues/16323
|
||||
package iobufpool
|
||||
|
||||
import "sync"
|
||||
|
||||
const minPoolExpOf2 = 8
|
||||
|
||||
var pools [18]*sync.Pool
|
||||
|
||||
func init() {
|
||||
for i := range pools {
|
||||
bufLen := 1 << (minPoolExpOf2 + i)
|
||||
pools[i] = &sync.Pool{
|
||||
New: func() any {
|
||||
buf := make([]byte, bufLen)
|
||||
return &buf
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get gets a []byte of len size with cap <= size*2.
|
||||
func Get(size int) *[]byte {
|
||||
i := getPoolIdx(size)
|
||||
if i >= len(pools) {
|
||||
buf := make([]byte, size)
|
||||
return &buf
|
||||
}
|
||||
|
||||
ptrBuf := (pools[i].Get().(*[]byte))
|
||||
*ptrBuf = (*ptrBuf)[:size]
|
||||
|
||||
return ptrBuf
|
||||
}
|
||||
|
||||
func getPoolIdx(size int) int {
|
||||
size--
|
||||
size >>= minPoolExpOf2
|
||||
i := 0
|
||||
for size > 0 {
|
||||
size >>= 1
|
||||
i++
|
||||
}
|
||||
|
||||
return i
|
||||
}
|
||||
|
||||
// Put returns buf to the pool.
|
||||
func Put(buf *[]byte) {
|
||||
i := putPoolIdx(cap(*buf))
|
||||
if i < 0 {
|
||||
return
|
||||
}
|
||||
|
||||
pools[i].Put(buf)
|
||||
}
|
||||
|
||||
func putPoolIdx(size int) int {
|
||||
minPoolSize := 1 << minPoolExpOf2
|
||||
for i := range pools {
|
||||
if size == minPoolSize<<i {
|
||||
return i
|
||||
}
|
||||
}
|
||||
|
||||
return -1
|
||||
}
|
@ -1,36 +0,0 @@
|
||||
package iobufpool
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestPoolIdx(t *testing.T) {
|
||||
tests := []struct {
|
||||
size int
|
||||
expected int
|
||||
}{
|
||||
{size: 0, expected: 0},
|
||||
{size: 1, expected: 0},
|
||||
{size: 255, expected: 0},
|
||||
{size: 256, expected: 0},
|
||||
{size: 257, expected: 1},
|
||||
{size: 511, expected: 1},
|
||||
{size: 512, expected: 1},
|
||||
{size: 513, expected: 2},
|
||||
{size: 1023, expected: 2},
|
||||
{size: 1024, expected: 2},
|
||||
{size: 1025, expected: 3},
|
||||
{size: 2047, expected: 3},
|
||||
{size: 2048, expected: 3},
|
||||
{size: 2049, expected: 4},
|
||||
{size: 8388607, expected: 15},
|
||||
{size: 8388608, expected: 15},
|
||||
{size: 8388609, expected: 16},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
idx := getPoolIdx(tt.size)
|
||||
assert.Equalf(t, tt.expected, idx, "size: %d", tt.size)
|
||||
}
|
||||
}
|
@ -1,85 +0,0 @@
|
||||
package iobufpool_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/jackc/pgx/v5/internal/iobufpool"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGetCap(t *testing.T) {
|
||||
tests := []struct {
|
||||
requestedLen int
|
||||
expectedCap int
|
||||
}{
|
||||
{requestedLen: 0, expectedCap: 256},
|
||||
{requestedLen: 128, expectedCap: 256},
|
||||
{requestedLen: 255, expectedCap: 256},
|
||||
{requestedLen: 256, expectedCap: 256},
|
||||
{requestedLen: 257, expectedCap: 512},
|
||||
{requestedLen: 511, expectedCap: 512},
|
||||
{requestedLen: 512, expectedCap: 512},
|
||||
{requestedLen: 513, expectedCap: 1024},
|
||||
{requestedLen: 1023, expectedCap: 1024},
|
||||
{requestedLen: 1024, expectedCap: 1024},
|
||||
{requestedLen: 33554431, expectedCap: 33554432},
|
||||
{requestedLen: 33554432, expectedCap: 33554432},
|
||||
|
||||
// Above 32 MiB skip the pool and allocate exactly the requested size.
|
||||
{requestedLen: 33554433, expectedCap: 33554433},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
buf := iobufpool.Get(tt.requestedLen)
|
||||
assert.Equalf(t, tt.requestedLen, len(*buf), "bad len for requestedLen: %d", len(*buf), tt.requestedLen)
|
||||
assert.Equalf(t, tt.expectedCap, cap(*buf), "bad cap for requestedLen: %d", tt.requestedLen)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPutHandlesWrongSizedBuffers(t *testing.T) {
|
||||
for putBufSize := range []int{0, 1, 128, 250, 256, 257, 1023, 1024, 1025, 1 << 28} {
|
||||
putBuf := make([]byte, putBufSize)
|
||||
iobufpool.Put(&putBuf)
|
||||
|
||||
tests := []struct {
|
||||
requestedLen int
|
||||
expectedCap int
|
||||
}{
|
||||
{requestedLen: 0, expectedCap: 256},
|
||||
{requestedLen: 128, expectedCap: 256},
|
||||
{requestedLen: 255, expectedCap: 256},
|
||||
{requestedLen: 256, expectedCap: 256},
|
||||
{requestedLen: 257, expectedCap: 512},
|
||||
{requestedLen: 511, expectedCap: 512},
|
||||
{requestedLen: 512, expectedCap: 512},
|
||||
{requestedLen: 513, expectedCap: 1024},
|
||||
{requestedLen: 1023, expectedCap: 1024},
|
||||
{requestedLen: 1024, expectedCap: 1024},
|
||||
{requestedLen: 33554431, expectedCap: 33554432},
|
||||
{requestedLen: 33554432, expectedCap: 33554432},
|
||||
|
||||
// Above 32 MiB skip the pool and allocate exactly the requested size.
|
||||
{requestedLen: 33554433, expectedCap: 33554433},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
getBuf := iobufpool.Get(tt.requestedLen)
|
||||
assert.Equalf(t, tt.requestedLen, len(*getBuf), "len(putBuf): %d, requestedLen: %d", len(putBuf), tt.requestedLen)
|
||||
assert.Equalf(t, tt.expectedCap, cap(*getBuf), "cap(putBuf): %d, requestedLen: %d", cap(putBuf), tt.requestedLen)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPutGetBufferReuse(t *testing.T) {
|
||||
// There is no way to guarantee a buffer will be reused. It should be, but a GC between the Put and the Get will cause
|
||||
// it not to be. So try many times.
|
||||
for i := 0; i < 100000; i++ {
|
||||
buf := iobufpool.Get(4)
|
||||
(*buf)[0] = 1
|
||||
iobufpool.Put(buf)
|
||||
buf = iobufpool.Get(4)
|
||||
if (*buf)[0] == 1 {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
t.Error("buffer was never reused")
|
||||
}
|
@ -1,6 +0,0 @@
|
||||
# pgio
|
||||
|
||||
Package pgio is a low-level toolkit building messages in the PostgreSQL wire protocol.
|
||||
|
||||
pgio provides functions for appending integers to a []byte while doing byte
|
||||
order conversion.
|
@ -1,6 +0,0 @@
|
||||
// Package pgio is a low-level toolkit building messages in the PostgreSQL wire protocol.
|
||||
/*
|
||||
pgio provides functions for appending integers to a []byte while doing byte
|
||||
order conversion.
|
||||
*/
|
||||
package pgio
|
@ -1,40 +0,0 @@
|
||||
package pgio
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
func AppendUint16(buf []byte, n uint16) []byte {
|
||||
wp := len(buf)
|
||||
buf = append(buf, 0, 0)
|
||||
binary.BigEndian.PutUint16(buf[wp:], n)
|
||||
return buf
|
||||
}
|
||||
|
||||
func AppendUint32(buf []byte, n uint32) []byte {
|
||||
wp := len(buf)
|
||||
buf = append(buf, 0, 0, 0, 0)
|
||||
binary.BigEndian.PutUint32(buf[wp:], n)
|
||||
return buf
|
||||
}
|
||||
|
||||
func AppendUint64(buf []byte, n uint64) []byte {
|
||||
wp := len(buf)
|
||||
buf = append(buf, 0, 0, 0, 0, 0, 0, 0, 0)
|
||||
binary.BigEndian.PutUint64(buf[wp:], n)
|
||||
return buf
|
||||
}
|
||||
|
||||
func AppendInt16(buf []byte, n int16) []byte {
|
||||
return AppendUint16(buf, uint16(n))
|
||||
}
|
||||
|
||||
func AppendInt32(buf []byte, n int32) []byte {
|
||||
return AppendUint32(buf, uint32(n))
|
||||
}
|
||||
|
||||
func AppendInt64(buf []byte, n int64) []byte {
|
||||
return AppendUint64(buf, uint64(n))
|
||||
}
|
||||
|
||||
func SetInt32(buf []byte, n int32) {
|
||||
binary.BigEndian.PutUint32(buf, uint32(n))
|
||||
}
|
@ -1,78 +0,0 @@
|
||||
package pgio
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAppendUint16NilBuf(t *testing.T) {
|
||||
buf := AppendUint16(nil, 1)
|
||||
if !reflect.DeepEqual(buf, []byte{0, 1}) {
|
||||
t.Errorf("AppendUint16(nil, 1) => %v, want %v", buf, []byte{0, 1})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendUint16EmptyBuf(t *testing.T) {
|
||||
buf := []byte{}
|
||||
buf = AppendUint16(buf, 1)
|
||||
if !reflect.DeepEqual(buf, []byte{0, 1}) {
|
||||
t.Errorf("AppendUint16(nil, 1) => %v, want %v", buf, []byte{0, 1})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendUint16BufWithCapacityDoesNotAllocate(t *testing.T) {
|
||||
buf := make([]byte, 0, 4)
|
||||
AppendUint16(buf, 1)
|
||||
buf = buf[0:2]
|
||||
if !reflect.DeepEqual(buf, []byte{0, 1}) {
|
||||
t.Errorf("AppendUint16(nil, 1) => %v, want %v", buf, []byte{0, 1})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendUint32NilBuf(t *testing.T) {
|
||||
buf := AppendUint32(nil, 1)
|
||||
if !reflect.DeepEqual(buf, []byte{0, 0, 0, 1}) {
|
||||
t.Errorf("AppendUint32(nil, 1) => %v, want %v", buf, []byte{0, 0, 0, 1})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendUint32EmptyBuf(t *testing.T) {
|
||||
buf := []byte{}
|
||||
buf = AppendUint32(buf, 1)
|
||||
if !reflect.DeepEqual(buf, []byte{0, 0, 0, 1}) {
|
||||
t.Errorf("AppendUint32(nil, 1) => %v, want %v", buf, []byte{0, 0, 0, 1})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendUint32BufWithCapacityDoesNotAllocate(t *testing.T) {
|
||||
buf := make([]byte, 0, 4)
|
||||
AppendUint32(buf, 1)
|
||||
buf = buf[0:4]
|
||||
if !reflect.DeepEqual(buf, []byte{0, 0, 0, 1}) {
|
||||
t.Errorf("AppendUint32(nil, 1) => %v, want %v", buf, []byte{0, 0, 0, 1})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendUint64NilBuf(t *testing.T) {
|
||||
buf := AppendUint64(nil, 1)
|
||||
if !reflect.DeepEqual(buf, []byte{0, 0, 0, 0, 0, 0, 0, 1}) {
|
||||
t.Errorf("AppendUint64(nil, 1) => %v, want %v", buf, []byte{0, 0, 0, 0, 0, 0, 0, 1})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendUint64EmptyBuf(t *testing.T) {
|
||||
buf := []byte{}
|
||||
buf = AppendUint64(buf, 1)
|
||||
if !reflect.DeepEqual(buf, []byte{0, 0, 0, 0, 0, 0, 0, 1}) {
|
||||
t.Errorf("AppendUint64(nil, 1) => %v, want %v", buf, []byte{0, 0, 0, 0, 0, 0, 0, 1})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendUint64BufWithCapacityDoesNotAllocate(t *testing.T) {
|
||||
buf := make([]byte, 0, 8)
|
||||
AppendUint64(buf, 1)
|
||||
buf = buf[0:8]
|
||||
if !reflect.DeepEqual(buf, []byte{0, 0, 0, 0, 0, 0, 0, 1}) {
|
||||
t.Errorf("AppendUint64(nil, 1) => %v, want %v", buf, []byte{0, 0, 0, 0, 0, 0, 0, 1})
|
||||
}
|
||||
}
|
@ -1,136 +0,0 @@
|
||||
// Package pgmock provides the ability to mock a PostgreSQL server.
|
||||
package pgmock
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgproto3"
|
||||
)
|
||||
|
||||
type Step interface {
|
||||
Step(*pgproto3.Backend) error
|
||||
}
|
||||
|
||||
type Script struct {
|
||||
Steps []Step
|
||||
}
|
||||
|
||||
func (s *Script) Run(backend *pgproto3.Backend) error {
|
||||
for _, step := range s.Steps {
|
||||
err := step.Step(backend)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Script) Step(backend *pgproto3.Backend) error {
|
||||
return s.Run(backend)
|
||||
}
|
||||
|
||||
type expectMessageStep struct {
|
||||
want pgproto3.FrontendMessage
|
||||
any bool
|
||||
}
|
||||
|
||||
func (e *expectMessageStep) Step(backend *pgproto3.Backend) error {
|
||||
msg, err := backend.Receive()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if e.any && reflect.TypeOf(msg) == reflect.TypeOf(e.want) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(msg, e.want) {
|
||||
return fmt.Errorf("msg => %#v, e.want => %#v", msg, e.want)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type expectStartupMessageStep struct {
|
||||
want *pgproto3.StartupMessage
|
||||
any bool
|
||||
}
|
||||
|
||||
func (e *expectStartupMessageStep) Step(backend *pgproto3.Backend) error {
|
||||
msg, err := backend.ReceiveStartupMessage()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if e.any {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(msg, e.want) {
|
||||
return fmt.Errorf("msg => %#v, e.want => %#v", msg, e.want)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ExpectMessage(want pgproto3.FrontendMessage) Step {
|
||||
return expectMessage(want, false)
|
||||
}
|
||||
|
||||
func ExpectAnyMessage(want pgproto3.FrontendMessage) Step {
|
||||
return expectMessage(want, true)
|
||||
}
|
||||
|
||||
func expectMessage(want pgproto3.FrontendMessage, any bool) Step {
|
||||
if want, ok := want.(*pgproto3.StartupMessage); ok {
|
||||
return &expectStartupMessageStep{want: want, any: any}
|
||||
}
|
||||
|
||||
return &expectMessageStep{want: want, any: any}
|
||||
}
|
||||
|
||||
type sendMessageStep struct {
|
||||
msg pgproto3.BackendMessage
|
||||
}
|
||||
|
||||
func (e *sendMessageStep) Step(backend *pgproto3.Backend) error {
|
||||
backend.Send(e.msg)
|
||||
return backend.Flush()
|
||||
}
|
||||
|
||||
func SendMessage(msg pgproto3.BackendMessage) Step {
|
||||
return &sendMessageStep{msg: msg}
|
||||
}
|
||||
|
||||
type waitForCloseMessageStep struct{}
|
||||
|
||||
func (e *waitForCloseMessageStep) Step(backend *pgproto3.Backend) error {
|
||||
for {
|
||||
msg, err := backend.Receive()
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, ok := msg.(*pgproto3.Terminate); ok {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func WaitForClose() Step {
|
||||
return &waitForCloseMessageStep{}
|
||||
}
|
||||
|
||||
func AcceptUnauthenticatedConnRequestSteps() []Step {
|
||||
return []Step{
|
||||
ExpectAnyMessage(&pgproto3.StartupMessage{ProtocolVersion: pgproto3.ProtocolVersionNumber, Parameters: map[string]string{}}),
|
||||
SendMessage(&pgproto3.AuthenticationOk{}),
|
||||
SendMessage(&pgproto3.BackendKeyData{ProcessID: 0, SecretKey: 0}),
|
||||
SendMessage(&pgproto3.ReadyForQuery{TxStatus: 'I'}),
|
||||
}
|
||||
}
|
@ -1,91 +0,0 @@
|
||||
package pgmock_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5/internal/pgmock"
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
"github.com/jackc/pgx/v5/pgproto3"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestScript(t *testing.T) {
|
||||
script := &pgmock.Script{
|
||||
Steps: pgmock.AcceptUnauthenticatedConnRequestSteps(),
|
||||
}
|
||||
script.Steps = append(script.Steps, pgmock.ExpectMessage(&pgproto3.Query{String: "select 42"}))
|
||||
script.Steps = append(script.Steps, pgmock.SendMessage(&pgproto3.RowDescription{
|
||||
Fields: []pgproto3.FieldDescription{
|
||||
{
|
||||
Name: []byte("?column?"),
|
||||
TableOID: 0,
|
||||
TableAttributeNumber: 0,
|
||||
DataTypeOID: 23,
|
||||
DataTypeSize: 4,
|
||||
TypeModifier: -1,
|
||||
Format: 0,
|
||||
},
|
||||
},
|
||||
}))
|
||||
script.Steps = append(script.Steps, pgmock.SendMessage(&pgproto3.DataRow{
|
||||
Values: [][]byte{[]byte("42")},
|
||||
}))
|
||||
script.Steps = append(script.Steps, pgmock.SendMessage(&pgproto3.CommandComplete{CommandTag: []byte("SELECT 1")}))
|
||||
script.Steps = append(script.Steps, pgmock.SendMessage(&pgproto3.ReadyForQuery{TxStatus: 'I'}))
|
||||
script.Steps = append(script.Steps, pgmock.ExpectMessage(&pgproto3.Terminate{}))
|
||||
|
||||
ln, err := net.Listen("tcp", "127.0.0.1:")
|
||||
require.NoError(t, err)
|
||||
defer ln.Close()
|
||||
|
||||
serverErrChan := make(chan error, 1)
|
||||
go func() {
|
||||
defer close(serverErrChan)
|
||||
|
||||
conn, err := ln.Accept()
|
||||
if err != nil {
|
||||
serverErrChan <- err
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
err = conn.SetDeadline(time.Now().Add(time.Second))
|
||||
if err != nil {
|
||||
serverErrChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
err = script.Run(pgproto3.NewBackend(conn, conn))
|
||||
if err != nil {
|
||||
serverErrChan <- err
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
host, port, _ := strings.Cut(ln.Addr().String(), ":")
|
||||
connStr := fmt.Sprintf("sslmode=disable host=%s port=%s", host, port)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
pgConn, err := pgconn.Connect(ctx, connStr)
|
||||
require.NoError(t, err)
|
||||
results, err := pgConn.Exec(ctx, "select 42").ReadAll()
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Len(t, results, 1)
|
||||
assert.Nil(t, results[0].Err)
|
||||
assert.Equal(t, "SELECT 1", results[0].CommandTag.String())
|
||||
assert.Len(t, results[0].Rows, 1)
|
||||
assert.Equal(t, "42", string(results[0].Rows[0][0]))
|
||||
|
||||
pgConn.Close(ctx)
|
||||
|
||||
assert.NoError(t, <-serverErrChan)
|
||||
}
|
@ -1,60 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
current_branch=$(git rev-parse --abbrev-ref HEAD)
|
||||
if [ "$current_branch" == "HEAD" ]; then
|
||||
current_branch=$(git rev-parse HEAD)
|
||||
fi
|
||||
|
||||
restore_branch() {
|
||||
echo "Restoring original branch/commit: $current_branch"
|
||||
git checkout "$current_branch"
|
||||
}
|
||||
trap restore_branch EXIT
|
||||
|
||||
# Check if there are uncommitted changes
|
||||
if ! git diff --quiet || ! git diff --cached --quiet; then
|
||||
echo "There are uncommitted changes. Please commit or stash them before running this script."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Ensure that at least one commit argument is passed
|
||||
if [ "$#" -lt 1 ]; then
|
||||
echo "Usage: $0 <commit1> <commit2> ... <commitN>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
commits=("$@")
|
||||
benchmarks_dir=benchmarks
|
||||
|
||||
if ! mkdir -p "${benchmarks_dir}"; then
|
||||
echo "Unable to create dir for benchmarks data"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Benchmark results
|
||||
bench_files=()
|
||||
|
||||
# Run benchmark for each listed commit
|
||||
for i in "${!commits[@]}"; do
|
||||
commit="${commits[i]}"
|
||||
git checkout "$commit" || {
|
||||
echo "Failed to checkout $commit"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Sanitized commmit message
|
||||
commit_message=$(git log -1 --pretty=format:"%s" | tr -c '[:alnum:]-_' '_')
|
||||
|
||||
# Benchmark data will go there
|
||||
bench_file="${benchmarks_dir}/${i}_${commit_message}.bench"
|
||||
|
||||
if ! go test -bench=. -count=10 >"$bench_file"; then
|
||||
echo "Benchmarking failed for commit $commit"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
bench_files+=("$bench_file")
|
||||
done
|
||||
|
||||
# go install golang.org/x/perf/cmd/benchstat[@latest]
|
||||
benchstat "${bench_files[@]}"
|
@ -3,209 +3,97 @@ package sanitize
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
errors "golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// Part is either a string or an int. A string is raw SQL. An int is a
|
||||
// argument placeholder.
|
||||
type Part any
|
||||
type Part interface{}
|
||||
|
||||
type Query struct {
|
||||
Parts []Part
|
||||
}
|
||||
|
||||
// utf.DecodeRune returns the utf8.RuneError for errors. But that is actually rune U+FFFD -- the unicode replacement
|
||||
// character. utf8.RuneError is not an error if it is also width 3.
|
||||
//
|
||||
// https://github.com/jackc/pgx/issues/1380
|
||||
const replacementcharacterwidth = 3
|
||||
|
||||
const maxBufSize = 16384 // 16 Ki
|
||||
|
||||
var bufPool = &pool[*bytes.Buffer]{
|
||||
new: func() *bytes.Buffer {
|
||||
return &bytes.Buffer{}
|
||||
},
|
||||
reset: func(b *bytes.Buffer) bool {
|
||||
n := b.Len()
|
||||
b.Reset()
|
||||
return n < maxBufSize
|
||||
},
|
||||
}
|
||||
|
||||
var null = []byte("null")
|
||||
|
||||
func (q *Query) Sanitize(args ...any) (string, error) {
|
||||
func (q *Query) Sanitize(args ...interface{}) (string, error) {
|
||||
argUse := make([]bool, len(args))
|
||||
buf := bufPool.get()
|
||||
defer bufPool.put(buf)
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
for _, part := range q.Parts {
|
||||
var str string
|
||||
switch part := part.(type) {
|
||||
case string:
|
||||
buf.WriteString(part)
|
||||
str = part
|
||||
case int:
|
||||
argIdx := part - 1
|
||||
var p []byte
|
||||
if argIdx < 0 {
|
||||
return "", fmt.Errorf("first sql argument must be > 0")
|
||||
}
|
||||
|
||||
if argIdx >= len(args) {
|
||||
return "", fmt.Errorf("insufficient arguments")
|
||||
return "", errors.Errorf("insufficient arguments")
|
||||
}
|
||||
|
||||
// Prevent SQL injection via Line Comment Creation
|
||||
// https://github.com/jackc/pgx/security/advisories/GHSA-m7wr-2xf7-cm9p
|
||||
buf.WriteByte(' ')
|
||||
|
||||
arg := args[argIdx]
|
||||
switch arg := arg.(type) {
|
||||
case nil:
|
||||
p = null
|
||||
str = "null"
|
||||
case int64:
|
||||
p = strconv.AppendInt(buf.AvailableBuffer(), arg, 10)
|
||||
str = strconv.FormatInt(arg, 10)
|
||||
case float64:
|
||||
p = strconv.AppendFloat(buf.AvailableBuffer(), arg, 'f', -1, 64)
|
||||
str = strconv.FormatFloat(arg, 'f', -1, 64)
|
||||
case bool:
|
||||
p = strconv.AppendBool(buf.AvailableBuffer(), arg)
|
||||
str = strconv.FormatBool(arg)
|
||||
case []byte:
|
||||
p = QuoteBytes(buf.AvailableBuffer(), arg)
|
||||
str = QuoteBytes(arg)
|
||||
case string:
|
||||
p = QuoteString(buf.AvailableBuffer(), arg)
|
||||
str = QuoteString(arg)
|
||||
case time.Time:
|
||||
p = arg.Truncate(time.Microsecond).
|
||||
AppendFormat(buf.AvailableBuffer(), "'2006-01-02 15:04:05.999999999Z07:00:00'")
|
||||
str = arg.Truncate(time.Microsecond).Format("'2006-01-02 15:04:05.999999999Z07:00:00'")
|
||||
default:
|
||||
return "", fmt.Errorf("invalid arg type: %T", arg)
|
||||
return "", errors.Errorf("invalid arg type: %T", arg)
|
||||
}
|
||||
argUse[argIdx] = true
|
||||
|
||||
buf.Write(p)
|
||||
|
||||
// Prevent SQL injection via Line Comment Creation
|
||||
// https://github.com/jackc/pgx/security/advisories/GHSA-m7wr-2xf7-cm9p
|
||||
buf.WriteByte(' ')
|
||||
default:
|
||||
return "", fmt.Errorf("invalid Part type: %T", part)
|
||||
return "", errors.Errorf("invalid Part type: %T", part)
|
||||
}
|
||||
buf.WriteString(str)
|
||||
}
|
||||
|
||||
for i, used := range argUse {
|
||||
if !used {
|
||||
return "", fmt.Errorf("unused argument: %d", i)
|
||||
return "", errors.Errorf("unused argument: %d", i)
|
||||
}
|
||||
}
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
func NewQuery(sql string) (*Query, error) {
|
||||
query := &Query{}
|
||||
query.init(sql)
|
||||
|
||||
return query, nil
|
||||
}
|
||||
|
||||
var sqlLexerPool = &pool[*sqlLexer]{
|
||||
new: func() *sqlLexer {
|
||||
return &sqlLexer{}
|
||||
},
|
||||
reset: func(sl *sqlLexer) bool {
|
||||
*sl = sqlLexer{}
|
||||
return true
|
||||
},
|
||||
}
|
||||
|
||||
func (q *Query) init(sql string) {
|
||||
parts := q.Parts[:0]
|
||||
if parts == nil {
|
||||
// dirty, but fast heuristic to preallocate for ~90% usecases
|
||||
n := strings.Count(sql, "$") + strings.Count(sql, "--") + 1
|
||||
parts = make([]Part, 0, n)
|
||||
l := &sqlLexer{
|
||||
src: sql,
|
||||
stateFn: rawState,
|
||||
}
|
||||
|
||||
l := sqlLexerPool.get()
|
||||
defer sqlLexerPool.put(l)
|
||||
|
||||
l.src = sql
|
||||
l.stateFn = rawState
|
||||
l.parts = parts
|
||||
|
||||
for l.stateFn != nil {
|
||||
l.stateFn = l.stateFn(l)
|
||||
}
|
||||
|
||||
q.Parts = l.parts
|
||||
query := &Query{Parts: l.parts}
|
||||
|
||||
return query, nil
|
||||
}
|
||||
|
||||
func QuoteString(dst []byte, str string) []byte {
|
||||
const quote = '\''
|
||||
|
||||
// Preallocate space for the worst case scenario
|
||||
dst = slices.Grow(dst, len(str)*2+2)
|
||||
|
||||
// Add opening quote
|
||||
dst = append(dst, quote)
|
||||
|
||||
// Iterate through the string without allocating
|
||||
for i := 0; i < len(str); i++ {
|
||||
if str[i] == quote {
|
||||
dst = append(dst, quote, quote)
|
||||
} else {
|
||||
dst = append(dst, str[i])
|
||||
}
|
||||
}
|
||||
|
||||
// Add closing quote
|
||||
dst = append(dst, quote)
|
||||
|
||||
return dst
|
||||
func QuoteString(str string) string {
|
||||
return "'" + strings.Replace(str, "'", "''", -1) + "'"
|
||||
}
|
||||
|
||||
func QuoteBytes(dst, buf []byte) []byte {
|
||||
if len(buf) == 0 {
|
||||
return append(dst, `'\x'`...)
|
||||
}
|
||||
|
||||
// Calculate required length
|
||||
requiredLen := 3 + hex.EncodedLen(len(buf)) + 1
|
||||
|
||||
// Ensure dst has enough capacity
|
||||
if cap(dst)-len(dst) < requiredLen {
|
||||
newDst := make([]byte, len(dst), len(dst)+requiredLen)
|
||||
copy(newDst, dst)
|
||||
dst = newDst
|
||||
}
|
||||
|
||||
// Record original length and extend slice
|
||||
origLen := len(dst)
|
||||
dst = dst[:origLen+requiredLen]
|
||||
|
||||
// Add prefix
|
||||
dst[origLen] = '\''
|
||||
dst[origLen+1] = '\\'
|
||||
dst[origLen+2] = 'x'
|
||||
|
||||
// Encode bytes directly into dst
|
||||
hex.Encode(dst[origLen+3:len(dst)-1], buf)
|
||||
|
||||
// Add suffix
|
||||
dst[len(dst)-1] = '\''
|
||||
|
||||
return dst
|
||||
func QuoteBytes(buf []byte) string {
|
||||
return `'\x` + hex.EncodeToString(buf) + "'"
|
||||
}
|
||||
|
||||
type sqlLexer struct {
|
||||
src string
|
||||
start int
|
||||
pos int
|
||||
nested int // multiline comment nesting level.
|
||||
stateFn stateFn
|
||||
parts []Part
|
||||
}
|
||||
@ -237,26 +125,12 @@ func rawState(l *sqlLexer) stateFn {
|
||||
l.start = l.pos
|
||||
return placeholderState
|
||||
}
|
||||
case '-':
|
||||
nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
if nextRune == '-' {
|
||||
l.pos += width
|
||||
return oneLineCommentState
|
||||
}
|
||||
case '/':
|
||||
nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
if nextRune == '*' {
|
||||
l.pos += width
|
||||
return multilineCommentState
|
||||
}
|
||||
case utf8.RuneError:
|
||||
if width != replacementcharacterwidth {
|
||||
if l.pos-l.start > 0 {
|
||||
l.parts = append(l.parts, l.src[l.start:l.pos])
|
||||
l.start = l.pos
|
||||
}
|
||||
return nil
|
||||
if l.pos-l.start > 0 {
|
||||
l.parts = append(l.parts, l.src[l.start:l.pos])
|
||||
l.start = l.pos
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -274,13 +148,11 @@ func singleQuoteState(l *sqlLexer) stateFn {
|
||||
}
|
||||
l.pos += width
|
||||
case utf8.RuneError:
|
||||
if width != replacementcharacterwidth {
|
||||
if l.pos-l.start > 0 {
|
||||
l.parts = append(l.parts, l.src[l.start:l.pos])
|
||||
l.start = l.pos
|
||||
}
|
||||
return nil
|
||||
if l.pos-l.start > 0 {
|
||||
l.parts = append(l.parts, l.src[l.start:l.pos])
|
||||
l.start = l.pos
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -298,13 +170,11 @@ func doubleQuoteState(l *sqlLexer) stateFn {
|
||||
}
|
||||
l.pos += width
|
||||
case utf8.RuneError:
|
||||
if width != replacementcharacterwidth {
|
||||
if l.pos-l.start > 0 {
|
||||
l.parts = append(l.parts, l.src[l.start:l.pos])
|
||||
l.start = l.pos
|
||||
}
|
||||
return nil
|
||||
if l.pos-l.start > 0 {
|
||||
l.parts = append(l.parts, l.src[l.start:l.pos])
|
||||
l.start = l.pos
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -346,115 +216,22 @@ func escapeStringState(l *sqlLexer) stateFn {
|
||||
}
|
||||
l.pos += width
|
||||
case utf8.RuneError:
|
||||
if width != replacementcharacterwidth {
|
||||
if l.pos-l.start > 0 {
|
||||
l.parts = append(l.parts, l.src[l.start:l.pos])
|
||||
l.start = l.pos
|
||||
}
|
||||
return nil
|
||||
if l.pos-l.start > 0 {
|
||||
l.parts = append(l.parts, l.src[l.start:l.pos])
|
||||
l.start = l.pos
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func oneLineCommentState(l *sqlLexer) stateFn {
|
||||
for {
|
||||
r, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
l.pos += width
|
||||
|
||||
switch r {
|
||||
case '\\':
|
||||
_, width = utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
l.pos += width
|
||||
case '\n', '\r':
|
||||
return rawState
|
||||
case utf8.RuneError:
|
||||
if width != replacementcharacterwidth {
|
||||
if l.pos-l.start > 0 {
|
||||
l.parts = append(l.parts, l.src[l.start:l.pos])
|
||||
l.start = l.pos
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func multilineCommentState(l *sqlLexer) stateFn {
|
||||
for {
|
||||
r, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
l.pos += width
|
||||
|
||||
switch r {
|
||||
case '/':
|
||||
nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
if nextRune == '*' {
|
||||
l.pos += width
|
||||
l.nested++
|
||||
}
|
||||
case '*':
|
||||
nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
if nextRune != '/' {
|
||||
continue
|
||||
}
|
||||
|
||||
l.pos += width
|
||||
if l.nested == 0 {
|
||||
return rawState
|
||||
}
|
||||
l.nested--
|
||||
|
||||
case utf8.RuneError:
|
||||
if width != replacementcharacterwidth {
|
||||
if l.pos-l.start > 0 {
|
||||
l.parts = append(l.parts, l.src[l.start:l.pos])
|
||||
l.start = l.pos
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var queryPool = &pool[*Query]{
|
||||
new: func() *Query {
|
||||
return &Query{}
|
||||
},
|
||||
reset: func(q *Query) bool {
|
||||
n := len(q.Parts)
|
||||
q.Parts = q.Parts[:0]
|
||||
return n < 64 // drop too large queries
|
||||
},
|
||||
}
|
||||
|
||||
// SanitizeSQL replaces placeholder values with args. It quotes and escapes args
|
||||
// as necessary. This function is only safe when standard_conforming_strings is
|
||||
// on.
|
||||
func SanitizeSQL(sql string, args ...any) (string, error) {
|
||||
query := queryPool.get()
|
||||
query.init(sql)
|
||||
defer queryPool.put(query)
|
||||
|
||||
func SanitizeSQL(sql string, args ...interface{}) (string, error) {
|
||||
query, err := NewQuery(sql)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return query.Sanitize(args...)
|
||||
}
|
||||
|
||||
type pool[E any] struct {
|
||||
p sync.Pool
|
||||
new func() E
|
||||
reset func(E) bool
|
||||
}
|
||||
|
||||
func (pool *pool[E]) get() E {
|
||||
v, ok := pool.p.Get().(E)
|
||||
if !ok {
|
||||
v = pool.new()
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
func (p *pool[E]) put(v E) {
|
||||
if p.reset(v) {
|
||||
p.p.Put(v)
|
||||
}
|
||||
}
|
||||
|
@ -1,62 +0,0 @@
|
||||
// sanitize_benchmark_test.go
|
||||
package sanitize_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5/internal/sanitize"
|
||||
)
|
||||
|
||||
var benchmarkSanitizeResult string
|
||||
|
||||
const benchmarkQuery = "" +
|
||||
`SELECT *
|
||||
FROM "water_containers"
|
||||
WHERE NOT "id" = $1 -- int64
|
||||
AND "tags" NOT IN $2 -- nil
|
||||
AND "volume" > $3 -- float64
|
||||
AND "transportable" = $4 -- bool
|
||||
AND position($5 IN "sign") -- bytes
|
||||
AND "label" LIKE $6 -- string
|
||||
AND "created_at" > $7; -- time.Time`
|
||||
|
||||
var benchmarkArgs = []any{
|
||||
int64(12345),
|
||||
nil,
|
||||
float64(500),
|
||||
true,
|
||||
[]byte("8BADF00D"),
|
||||
"kombucha's han'dy awokowa",
|
||||
time.Date(2015, 10, 1, 0, 0, 0, 0, time.UTC),
|
||||
}
|
||||
|
||||
func BenchmarkSanitize(b *testing.B) {
|
||||
query, err := sanitize.NewQuery(benchmarkQuery)
|
||||
if err != nil {
|
||||
b.Fatalf("failed to create query: %v", err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
benchmarkSanitizeResult, err = query.Sanitize(benchmarkArgs...)
|
||||
if err != nil {
|
||||
b.Fatalf("failed to sanitize query: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var benchmarkNewSQLResult string
|
||||
|
||||
func BenchmarkSanitizeSQL(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
var err error
|
||||
for i := 0; i < b.N; i++ {
|
||||
benchmarkNewSQLResult, err = sanitize.SanitizeSQL(benchmarkQuery, benchmarkArgs...)
|
||||
if err != nil {
|
||||
b.Fatalf("failed to sanitize SQL: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
@ -1,55 +0,0 @@
|
||||
package sanitize_test
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/jackc/pgx/v5/internal/sanitize"
|
||||
)
|
||||
|
||||
func FuzzQuoteString(f *testing.F) {
|
||||
const prefix = "prefix"
|
||||
f.Add("new\nline")
|
||||
f.Add("sample text")
|
||||
f.Add("sample q'u'o't'e's")
|
||||
f.Add("select 'quoted $42', $1")
|
||||
|
||||
f.Fuzz(func(t *testing.T, input string) {
|
||||
got := string(sanitize.QuoteString([]byte(prefix), input))
|
||||
want := oldQuoteString(input)
|
||||
|
||||
quoted, ok := strings.CutPrefix(got, prefix)
|
||||
if !ok {
|
||||
t.Fatalf("result has no prefix")
|
||||
}
|
||||
|
||||
if want != quoted {
|
||||
t.Errorf("got %q", got)
|
||||
t.Fatalf("want %q", want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func FuzzQuoteBytes(f *testing.F) {
|
||||
const prefix = "prefix"
|
||||
f.Add([]byte(nil))
|
||||
f.Add([]byte("\n"))
|
||||
f.Add([]byte("sample text"))
|
||||
f.Add([]byte("sample q'u'o't'e's"))
|
||||
f.Add([]byte("select 'quoted $42', $1"))
|
||||
|
||||
f.Fuzz(func(t *testing.T, input []byte) {
|
||||
got := string(sanitize.QuoteBytes([]byte(prefix), input))
|
||||
want := oldQuoteBytes(input)
|
||||
|
||||
quoted, ok := strings.CutPrefix(got, prefix)
|
||||
if !ok {
|
||||
t.Fatalf("result has no prefix")
|
||||
}
|
||||
|
||||
if want != quoted {
|
||||
t.Errorf("got %q", got)
|
||||
t.Fatalf("want %q", want)
|
||||
}
|
||||
})
|
||||
}
|
@ -1,12 +1,10 @@
|
||||
package sanitize_test
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5/internal/sanitize"
|
||||
"github.com/jackc/pgx/v4/internal/sanitize"
|
||||
)
|
||||
|
||||
func TestNewQuery(t *testing.T) {
|
||||
@ -62,44 +60,6 @@ func TestNewQuery(t *testing.T) {
|
||||
sql: `select e'escape string\' $42', $1`,
|
||||
expected: sanitize.Query{Parts: []sanitize.Part{`select e'escape string\' $42', `, 1}},
|
||||
},
|
||||
{
|
||||
sql: `select /* a baby's toy */ 'barbie', $1`,
|
||||
expected: sanitize.Query{Parts: []sanitize.Part{`select /* a baby's toy */ 'barbie', `, 1}},
|
||||
},
|
||||
{
|
||||
sql: `select /* *_* */ $1`,
|
||||
expected: sanitize.Query{Parts: []sanitize.Part{`select /* *_* */ `, 1}},
|
||||
},
|
||||
{
|
||||
sql: `select 42 /* /* /* 42 */ */ */, $1`,
|
||||
expected: sanitize.Query{Parts: []sanitize.Part{`select 42 /* /* /* 42 */ */ */, `, 1}},
|
||||
},
|
||||
{
|
||||
sql: "select -- a baby's toy\n'barbie', $1",
|
||||
expected: sanitize.Query{Parts: []sanitize.Part{"select -- a baby's toy\n'barbie', ", 1}},
|
||||
},
|
||||
{
|
||||
sql: "select 42 -- is a Deep Thought's favorite number",
|
||||
expected: sanitize.Query{Parts: []sanitize.Part{"select 42 -- is a Deep Thought's favorite number"}},
|
||||
},
|
||||
{
|
||||
sql: "select 42, -- \\nis a Deep Thought's favorite number\n$1",
|
||||
expected: sanitize.Query{Parts: []sanitize.Part{"select 42, -- \\nis a Deep Thought's favorite number\n", 1}},
|
||||
},
|
||||
{
|
||||
sql: "select 42, -- \\nis a Deep Thought's favorite number\r$1",
|
||||
expected: sanitize.Query{Parts: []sanitize.Part{"select 42, -- \\nis a Deep Thought's favorite number\r", 1}},
|
||||
},
|
||||
{
|
||||
// https://github.com/jackc/pgx/issues/1380
|
||||
sql: "select 'hello w<>rld'",
|
||||
expected: sanitize.Query{Parts: []sanitize.Part{"select 'hello w<>rld'"}},
|
||||
},
|
||||
{
|
||||
// Unterminated quoted string
|
||||
sql: "select 'hello world",
|
||||
expected: sanitize.Query{Parts: []sanitize.Part{"select 'hello world"}},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range successTests {
|
||||
@ -123,68 +83,58 @@ func TestNewQuery(t *testing.T) {
|
||||
func TestQuerySanitize(t *testing.T) {
|
||||
successfulTests := []struct {
|
||||
query sanitize.Query
|
||||
args []any
|
||||
args []interface{}
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
query: sanitize.Query{Parts: []sanitize.Part{"select 42"}},
|
||||
args: []any{},
|
||||
args: []interface{}{},
|
||||
expected: `select 42`,
|
||||
},
|
||||
{
|
||||
query: sanitize.Query{Parts: []sanitize.Part{"select ", 1}},
|
||||
args: []any{int64(42)},
|
||||
expected: `select 42 `,
|
||||
args: []interface{}{int64(42)},
|
||||
expected: `select 42`,
|
||||
},
|
||||
{
|
||||
query: sanitize.Query{Parts: []sanitize.Part{"select ", 1}},
|
||||
args: []any{float64(1.23)},
|
||||
expected: `select 1.23 `,
|
||||
args: []interface{}{float64(1.23)},
|
||||
expected: `select 1.23`,
|
||||
},
|
||||
{
|
||||
query: sanitize.Query{Parts: []sanitize.Part{"select ", 1}},
|
||||
args: []any{true},
|
||||
expected: `select true `,
|
||||
args: []interface{}{true},
|
||||
expected: `select true`,
|
||||
},
|
||||
{
|
||||
query: sanitize.Query{Parts: []sanitize.Part{"select ", 1}},
|
||||
args: []any{[]byte{0, 1, 2, 3, 255}},
|
||||
expected: `select '\x00010203ff' `,
|
||||
args: []interface{}{[]byte{0, 1, 2, 3, 255}},
|
||||
expected: `select '\x00010203ff'`,
|
||||
},
|
||||
{
|
||||
query: sanitize.Query{Parts: []sanitize.Part{"select ", 1}},
|
||||
args: []any{nil},
|
||||
expected: `select null `,
|
||||
args: []interface{}{nil},
|
||||
expected: `select null`,
|
||||
},
|
||||
{
|
||||
query: sanitize.Query{Parts: []sanitize.Part{"select ", 1}},
|
||||
args: []any{"foobar"},
|
||||
expected: `select 'foobar' `,
|
||||
args: []interface{}{"foobar"},
|
||||
expected: `select 'foobar'`,
|
||||
},
|
||||
{
|
||||
query: sanitize.Query{Parts: []sanitize.Part{"select ", 1}},
|
||||
args: []any{"foo'bar"},
|
||||
expected: `select 'foo''bar' `,
|
||||
args: []interface{}{"foo'bar"},
|
||||
expected: `select 'foo''bar'`,
|
||||
},
|
||||
{
|
||||
query: sanitize.Query{Parts: []sanitize.Part{"select ", 1}},
|
||||
args: []any{`foo\'bar`},
|
||||
expected: `select 'foo\''bar' `,
|
||||
args: []interface{}{`foo\'bar`},
|
||||
expected: `select 'foo\''bar'`,
|
||||
},
|
||||
{
|
||||
query: sanitize.Query{Parts: []sanitize.Part{"insert ", 1}},
|
||||
args: []any{time.Date(2020, time.March, 1, 23, 59, 59, 999999999, time.UTC)},
|
||||
expected: `insert '2020-03-01 23:59:59.999999Z' `,
|
||||
},
|
||||
{
|
||||
query: sanitize.Query{Parts: []sanitize.Part{"select 1-", 1}},
|
||||
args: []any{int64(-1)},
|
||||
expected: `select 1- -1 `,
|
||||
},
|
||||
{
|
||||
query: sanitize.Query{Parts: []sanitize.Part{"select 1-", 1}},
|
||||
args: []any{float64(-1)},
|
||||
expected: `select 1- -1 `,
|
||||
args: []interface{}{time.Date(2020, time.March, 1, 23, 59, 59, 999999999, time.UTC)},
|
||||
expected: `insert '2020-03-01 23:59:59.999999Z'`,
|
||||
},
|
||||
}
|
||||
|
||||
@ -202,22 +152,22 @@ func TestQuerySanitize(t *testing.T) {
|
||||
|
||||
errorTests := []struct {
|
||||
query sanitize.Query
|
||||
args []any
|
||||
args []interface{}
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
query: sanitize.Query{Parts: []sanitize.Part{"select ", 1, ", ", 2}},
|
||||
args: []any{int64(42)},
|
||||
args: []interface{}{int64(42)},
|
||||
expected: `insufficient arguments`,
|
||||
},
|
||||
{
|
||||
query: sanitize.Query{Parts: []sanitize.Part{"select 'foo'"}},
|
||||
args: []any{int64(42)},
|
||||
args: []interface{}{int64(42)},
|
||||
expected: `unused argument: 0`,
|
||||
},
|
||||
{
|
||||
query: sanitize.Query{Parts: []sanitize.Part{"select ", 1}},
|
||||
args: []any{42},
|
||||
args: []interface{}{42},
|
||||
expected: `invalid arg type: int`,
|
||||
},
|
||||
}
|
||||
@ -229,55 +179,3 @@ func TestQuerySanitize(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestQuoteString(t *testing.T) {
|
||||
tc := func(name, input string) {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
got := string(sanitize.QuoteString(nil, input))
|
||||
want := oldQuoteString(input)
|
||||
|
||||
if got != want {
|
||||
t.Errorf("got: %s", got)
|
||||
t.Fatalf("want: %s", want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
tc("empty", "")
|
||||
tc("text", "abcd")
|
||||
tc("with quotes", `one's hat is always a cat`)
|
||||
}
|
||||
|
||||
// This function was used before optimizations.
|
||||
// You should keep for testing purposes - we want to ensure there are no breaking changes.
|
||||
func oldQuoteString(str string) string {
|
||||
return "'" + strings.ReplaceAll(str, "'", "''") + "'"
|
||||
}
|
||||
|
||||
func TestQuoteBytes(t *testing.T) {
|
||||
tc := func(name string, input []byte) {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
got := string(sanitize.QuoteBytes(nil, input))
|
||||
want := oldQuoteBytes(input)
|
||||
|
||||
if got != want {
|
||||
t.Errorf("got: %s", got)
|
||||
t.Fatalf("want: %s", want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
tc("nil", nil)
|
||||
tc("empty", []byte{})
|
||||
tc("text", []byte("abcd"))
|
||||
}
|
||||
|
||||
// This function was used before optimizations.
|
||||
// You should keep for testing purposes - we want to ensure there are no breaking changes.
|
||||
func oldQuoteBytes(buf []byte) string {
|
||||
return `'\x` + hex.EncodeToString(buf) + "'"
|
||||
}
|
||||
|
@ -1,111 +0,0 @@
|
||||
package stmtcache
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
)
|
||||
|
||||
// LRUCache implements Cache with a Least Recently Used (LRU) cache.
|
||||
type LRUCache struct {
|
||||
cap int
|
||||
m map[string]*list.Element
|
||||
l *list.List
|
||||
invalidStmts []*pgconn.StatementDescription
|
||||
}
|
||||
|
||||
// NewLRUCache creates a new LRUCache. cap is the maximum size of the cache.
|
||||
func NewLRUCache(cap int) *LRUCache {
|
||||
return &LRUCache{
|
||||
cap: cap,
|
||||
m: make(map[string]*list.Element),
|
||||
l: list.New(),
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns the statement description for sql. Returns nil if not found.
|
||||
func (c *LRUCache) Get(key string) *pgconn.StatementDescription {
|
||||
if el, ok := c.m[key]; ok {
|
||||
c.l.MoveToFront(el)
|
||||
return el.Value.(*pgconn.StatementDescription)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put stores sd in the cache. Put panics if sd.SQL is "". Put does nothing if sd.SQL already exists in the cache or
|
||||
// sd.SQL has been invalidated and HandleInvalidated has not been called yet.
|
||||
func (c *LRUCache) Put(sd *pgconn.StatementDescription) {
|
||||
if sd.SQL == "" {
|
||||
panic("cannot store statement description with empty SQL")
|
||||
}
|
||||
|
||||
if _, present := c.m[sd.SQL]; present {
|
||||
return
|
||||
}
|
||||
|
||||
// The statement may have been invalidated but not yet handled. Do not readd it to the cache.
|
||||
for _, invalidSD := range c.invalidStmts {
|
||||
if invalidSD.SQL == sd.SQL {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if c.l.Len() == c.cap {
|
||||
c.invalidateOldest()
|
||||
}
|
||||
|
||||
el := c.l.PushFront(sd)
|
||||
c.m[sd.SQL] = el
|
||||
}
|
||||
|
||||
// Invalidate invalidates statement description identified by sql. Does nothing if not found.
|
||||
func (c *LRUCache) Invalidate(sql string) {
|
||||
if el, ok := c.m[sql]; ok {
|
||||
delete(c.m, sql)
|
||||
c.invalidStmts = append(c.invalidStmts, el.Value.(*pgconn.StatementDescription))
|
||||
c.l.Remove(el)
|
||||
}
|
||||
}
|
||||
|
||||
// InvalidateAll invalidates all statement descriptions.
|
||||
func (c *LRUCache) InvalidateAll() {
|
||||
el := c.l.Front()
|
||||
for el != nil {
|
||||
c.invalidStmts = append(c.invalidStmts, el.Value.(*pgconn.StatementDescription))
|
||||
el = el.Next()
|
||||
}
|
||||
|
||||
c.m = make(map[string]*list.Element)
|
||||
c.l = list.New()
|
||||
}
|
||||
|
||||
// GetInvalidated returns a slice of all statement descriptions invalidated since the last call to RemoveInvalidated.
|
||||
func (c *LRUCache) GetInvalidated() []*pgconn.StatementDescription {
|
||||
return c.invalidStmts
|
||||
}
|
||||
|
||||
// RemoveInvalidated removes all invalidated statement descriptions. No other calls to Cache must be made between a
|
||||
// call to GetInvalidated and RemoveInvalidated or RemoveInvalidated may remove statement descriptions that were
|
||||
// never seen by the call to GetInvalidated.
|
||||
func (c *LRUCache) RemoveInvalidated() {
|
||||
c.invalidStmts = nil
|
||||
}
|
||||
|
||||
// Len returns the number of cached prepared statement descriptions.
|
||||
func (c *LRUCache) Len() int {
|
||||
return c.l.Len()
|
||||
}
|
||||
|
||||
// Cap returns the maximum number of cached prepared statement descriptions.
|
||||
func (c *LRUCache) Cap() int {
|
||||
return c.cap
|
||||
}
|
||||
|
||||
func (c *LRUCache) invalidateOldest() {
|
||||
oldest := c.l.Back()
|
||||
sd := oldest.Value.(*pgconn.StatementDescription)
|
||||
c.invalidStmts = append(c.invalidStmts, sd)
|
||||
delete(c.m, sd.SQL)
|
||||
c.l.Remove(oldest)
|
||||
}
|
@ -1,45 +0,0 @@
|
||||
// Package stmtcache is a cache for statement descriptions.
|
||||
package stmtcache
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
)
|
||||
|
||||
// StatementName returns a statement name that will be stable for sql across multiple connections and program
|
||||
// executions.
|
||||
func StatementName(sql string) string {
|
||||
digest := sha256.Sum256([]byte(sql))
|
||||
return "stmtcache_" + hex.EncodeToString(digest[0:24])
|
||||
}
|
||||
|
||||
// Cache caches statement descriptions.
|
||||
type Cache interface {
|
||||
// Get returns the statement description for sql. Returns nil if not found.
|
||||
Get(sql string) *pgconn.StatementDescription
|
||||
|
||||
// Put stores sd in the cache. Put panics if sd.SQL is "". Put does nothing if sd.SQL already exists in the cache.
|
||||
Put(sd *pgconn.StatementDescription)
|
||||
|
||||
// Invalidate invalidates statement description identified by sql. Does nothing if not found.
|
||||
Invalidate(sql string)
|
||||
|
||||
// InvalidateAll invalidates all statement descriptions.
|
||||
InvalidateAll()
|
||||
|
||||
// GetInvalidated returns a slice of all statement descriptions invalidated since the last call to RemoveInvalidated.
|
||||
GetInvalidated() []*pgconn.StatementDescription
|
||||
|
||||
// RemoveInvalidated removes all invalidated statement descriptions. No other calls to Cache must be made between a
|
||||
// call to GetInvalidated and RemoveInvalidated or RemoveInvalidated may remove statement descriptions that were
|
||||
// never seen by the call to GetInvalidated.
|
||||
RemoveInvalidated()
|
||||
|
||||
// Len returns the number of cached prepared statement descriptions.
|
||||
Len() int
|
||||
|
||||
// Cap returns the maximum number of cached prepared statement descriptions.
|
||||
Cap() int
|
||||
}
|
@ -1,77 +0,0 @@
|
||||
package stmtcache
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
)
|
||||
|
||||
// UnlimitedCache implements Cache with no capacity limit.
|
||||
type UnlimitedCache struct {
|
||||
m map[string]*pgconn.StatementDescription
|
||||
invalidStmts []*pgconn.StatementDescription
|
||||
}
|
||||
|
||||
// NewUnlimitedCache creates a new UnlimitedCache.
|
||||
func NewUnlimitedCache() *UnlimitedCache {
|
||||
return &UnlimitedCache{
|
||||
m: make(map[string]*pgconn.StatementDescription),
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns the statement description for sql. Returns nil if not found.
|
||||
func (c *UnlimitedCache) Get(sql string) *pgconn.StatementDescription {
|
||||
return c.m[sql]
|
||||
}
|
||||
|
||||
// Put stores sd in the cache. Put panics if sd.SQL is "". Put does nothing if sd.SQL already exists in the cache.
|
||||
func (c *UnlimitedCache) Put(sd *pgconn.StatementDescription) {
|
||||
if sd.SQL == "" {
|
||||
panic("cannot store statement description with empty SQL")
|
||||
}
|
||||
|
||||
if _, present := c.m[sd.SQL]; present {
|
||||
return
|
||||
}
|
||||
|
||||
c.m[sd.SQL] = sd
|
||||
}
|
||||
|
||||
// Invalidate invalidates statement description identified by sql. Does nothing if not found.
|
||||
func (c *UnlimitedCache) Invalidate(sql string) {
|
||||
if sd, ok := c.m[sql]; ok {
|
||||
delete(c.m, sql)
|
||||
c.invalidStmts = append(c.invalidStmts, sd)
|
||||
}
|
||||
}
|
||||
|
||||
// InvalidateAll invalidates all statement descriptions.
|
||||
func (c *UnlimitedCache) InvalidateAll() {
|
||||
for _, sd := range c.m {
|
||||
c.invalidStmts = append(c.invalidStmts, sd)
|
||||
}
|
||||
|
||||
c.m = make(map[string]*pgconn.StatementDescription)
|
||||
}
|
||||
|
||||
// GetInvalidated returns a slice of all statement descriptions invalidated since the last call to RemoveInvalidated.
|
||||
func (c *UnlimitedCache) GetInvalidated() []*pgconn.StatementDescription {
|
||||
return c.invalidStmts
|
||||
}
|
||||
|
||||
// RemoveInvalidated removes all invalidated statement descriptions. No other calls to Cache must be made between a
|
||||
// call to GetInvalidated and RemoveInvalidated or RemoveInvalidated may remove statement descriptions that were
|
||||
// never seen by the call to GetInvalidated.
|
||||
func (c *UnlimitedCache) RemoveInvalidated() {
|
||||
c.invalidStmts = nil
|
||||
}
|
||||
|
||||
// Len returns the number of cached prepared statement descriptions.
|
||||
func (c *UnlimitedCache) Len() int {
|
||||
return len(c.m)
|
||||
}
|
||||
|
||||
// Cap returns the maximum number of cached prepared statement descriptions.
|
||||
func (c *UnlimitedCache) Cap() int {
|
||||
return math.MaxInt
|
||||
}
|
@ -2,17 +2,11 @@ package pgx
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgtype"
|
||||
errors "golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// The PostgreSQL wire protocol has a limit of 1 GB - 1 per message. See definition of
|
||||
// PQ_LARGE_MESSAGE_LIMIT in the PostgreSQL source code. To allow for the other data
|
||||
// in the message,maxLargeObjectMessageLength should be no larger than 1 GB - 1 KB.
|
||||
var maxLargeObjectMessageLength = 1024*1024*1024 - 1024
|
||||
|
||||
// LargeObjects is a structure used to access the large objects API. It is only valid within the transaction where it
|
||||
// was created.
|
||||
//
|
||||
@ -63,10 +57,10 @@ func (o *LargeObjects) Unlink(ctx context.Context, oid uint32) error {
|
||||
// A LargeObject is a large object stored on the server. It is only valid within the transaction that it was initialized
|
||||
// in. It uses the context it was initialized with for all operations. It implements these interfaces:
|
||||
//
|
||||
// io.Writer
|
||||
// io.Reader
|
||||
// io.Seeker
|
||||
// io.Closer
|
||||
// io.Writer
|
||||
// io.Reader
|
||||
// io.Seeker
|
||||
// io.Closer
|
||||
type LargeObject struct {
|
||||
ctx context.Context
|
||||
tx Tx
|
||||
@ -75,65 +69,32 @@ type LargeObject struct {
|
||||
|
||||
// Write writes p to the large object and returns the number of bytes written and an error if not all of p was written.
|
||||
func (o *LargeObject) Write(p []byte) (int, error) {
|
||||
nTotal := 0
|
||||
for {
|
||||
expected := len(p) - nTotal
|
||||
if expected == 0 {
|
||||
break
|
||||
} else if expected > maxLargeObjectMessageLength {
|
||||
expected = maxLargeObjectMessageLength
|
||||
}
|
||||
|
||||
var n int
|
||||
err := o.tx.QueryRow(o.ctx, "select lowrite($1, $2)", o.fd, p[nTotal:nTotal+expected]).Scan(&n)
|
||||
if err != nil {
|
||||
return nTotal, err
|
||||
}
|
||||
|
||||
if n < 0 {
|
||||
return nTotal, errors.New("failed to write to large object")
|
||||
}
|
||||
|
||||
nTotal += n
|
||||
|
||||
if n < expected {
|
||||
return nTotal, errors.New("short write to large object")
|
||||
} else if n > expected {
|
||||
return nTotal, errors.New("invalid write to large object")
|
||||
}
|
||||
var n int
|
||||
err := o.tx.QueryRow(o.ctx, "select lowrite($1, $2)", o.fd, p).Scan(&n)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
return nTotal, nil
|
||||
if n < 0 {
|
||||
return 0, errors.New("failed to write to large object")
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Read reads up to len(p) bytes into p returning the number of bytes read.
|
||||
func (o *LargeObject) Read(p []byte) (int, error) {
|
||||
nTotal := 0
|
||||
for {
|
||||
expected := len(p) - nTotal
|
||||
if expected == 0 {
|
||||
break
|
||||
} else if expected > maxLargeObjectMessageLength {
|
||||
expected = maxLargeObjectMessageLength
|
||||
}
|
||||
|
||||
res := pgtype.PreallocBytes(p[nTotal:])
|
||||
err := o.tx.QueryRow(o.ctx, "select loread($1, $2)", o.fd, expected).Scan(&res)
|
||||
// We compute expected so that it always fits into p, so it should never happen
|
||||
// that PreallocBytes's ScanBytes had to allocate a new slice.
|
||||
nTotal += len(res)
|
||||
if err != nil {
|
||||
return nTotal, err
|
||||
}
|
||||
|
||||
if len(res) < expected {
|
||||
return nTotal, io.EOF
|
||||
} else if len(res) > expected {
|
||||
return nTotal, errors.New("invalid read of large object")
|
||||
}
|
||||
var res []byte
|
||||
err := o.tx.QueryRow(o.ctx, "select loread($1, $2)", o.fd, len(p)).Scan(&res)
|
||||
copy(p, res)
|
||||
if err != nil {
|
||||
return len(res), err
|
||||
}
|
||||
|
||||
return nTotal, nil
|
||||
if len(res) < len(p) {
|
||||
err = io.EOF
|
||||
}
|
||||
return len(res), err
|
||||
}
|
||||
|
||||
// Seek moves the current location pointer to the new location specified by offset.
|
||||
@ -148,13 +109,13 @@ func (o *LargeObject) Tell() (n int64, err error) {
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Truncate the large object to size.
|
||||
// Trunctes the large object to size.
|
||||
func (o *LargeObject) Truncate(size int64) (err error) {
|
||||
_, err = o.tx.Exec(o.ctx, "select lo_truncate64($1, $2)", o.fd, size)
|
||||
return err
|
||||
}
|
||||
|
||||
// Close the large object descriptor.
|
||||
// Close closees the large object descriptor.
|
||||
func (o *LargeObject) Close() error {
|
||||
_, err := o.tx.Exec(o.ctx, "select lo_close($1)", o.fd)
|
||||
return err
|
||||
|
@ -1,20 +0,0 @@
|
||||
package pgx
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// SetMaxLargeObjectMessageLength sets internal maxLargeObjectMessageLength variable
|
||||
// to the given length for the duration of the test.
|
||||
//
|
||||
// Tests using this helper should not use t.Parallel().
|
||||
func SetMaxLargeObjectMessageLength(t *testing.T, length int) {
|
||||
t.Helper()
|
||||
|
||||
original := maxLargeObjectMessageLength
|
||||
t.Cleanup(func() {
|
||||
maxLargeObjectMessageLength = original
|
||||
})
|
||||
|
||||
maxLargeObjectMessageLength = length
|
||||
}
|
@ -7,16 +7,14 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
"github.com/jackc/pgx/v5/pgxtest"
|
||||
"github.com/jackc/pgconn"
|
||||
"github.com/jackc/pgx/v4"
|
||||
)
|
||||
|
||||
func TestLargeObjects(t *testing.T) {
|
||||
// We use a very short limit to test chunking logic.
|
||||
pgx.SetMaxLargeObjectMessageLength(t, 2)
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
conn, err := pgx.Connect(ctx, os.Getenv("PGX_TEST_DATABASE"))
|
||||
@ -24,8 +22,6 @@ func TestLargeObjects(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pgxtest.SkipCockroachDB(t, conn, "Server does support large objects")
|
||||
|
||||
tx, err := conn.Begin(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -34,11 +30,10 @@ func TestLargeObjects(t *testing.T) {
|
||||
testLargeObjects(t, ctx, tx)
|
||||
}
|
||||
|
||||
func TestLargeObjectsSimpleProtocol(t *testing.T) {
|
||||
// We use a very short limit to test chunking logic.
|
||||
pgx.SetMaxLargeObjectMessageLength(t, 2)
|
||||
func TestLargeObjectsPreferSimpleProtocol(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
config, err := pgx.ParseConfig(os.Getenv("PGX_TEST_DATABASE"))
|
||||
@ -46,15 +41,13 @@ func TestLargeObjectsSimpleProtocol(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
config.DefaultQueryExecMode = pgx.QueryExecModeSimpleProtocol
|
||||
config.PreferSimpleProtocol = true
|
||||
|
||||
conn, err := pgx.ConnectConfig(ctx, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pgxtest.SkipCockroachDB(t, conn, "Server does support large objects")
|
||||
|
||||
tx, err := conn.Begin(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -162,10 +155,9 @@ func testLargeObjects(t *testing.T, ctx context.Context, tx pgx.Tx) {
|
||||
}
|
||||
|
||||
func TestLargeObjectsMultipleTransactions(t *testing.T) {
|
||||
// We use a very short limit to test chunking logic.
|
||||
pgx.SetMaxLargeObjectMessageLength(t, 2)
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
conn, err := pgx.Connect(ctx, os.Getenv("PGX_TEST_DATABASE"))
|
||||
@ -173,8 +165,6 @@ func TestLargeObjectsMultipleTransactions(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pgxtest.SkipCockroachDB(t, conn, "Server does support large objects")
|
||||
|
||||
tx, err := conn.Begin(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
49
log/log15adapter/adapter.go
Normal file
49
log/log15adapter/adapter.go
Normal file
@ -0,0 +1,49 @@
|
||||
// Package log15adapter provides a logger that writes to a github.com/inconshreveable/log15.Logger
|
||||
// log.
|
||||
package log15adapter
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/jackc/pgx/v4"
|
||||
)
|
||||
|
||||
// Log15Logger interface defines the subset of
|
||||
// github.com/inconshreveable/log15.Logger that this adapter uses.
|
||||
type Log15Logger interface {
|
||||
Debug(msg string, ctx ...interface{})
|
||||
Info(msg string, ctx ...interface{})
|
||||
Warn(msg string, ctx ...interface{})
|
||||
Error(msg string, ctx ...interface{})
|
||||
Crit(msg string, ctx ...interface{})
|
||||
}
|
||||
|
||||
type Logger struct {
|
||||
l Log15Logger
|
||||
}
|
||||
|
||||
func NewLogger(l Log15Logger) *Logger {
|
||||
return &Logger{l: l}
|
||||
}
|
||||
|
||||
func (l *Logger) Log(ctx context.Context, level pgx.LogLevel, msg string, data map[string]interface{}) {
|
||||
logArgs := make([]interface{}, 0, len(data))
|
||||
for k, v := range data {
|
||||
logArgs = append(logArgs, k, v)
|
||||
}
|
||||
|
||||
switch level {
|
||||
case pgx.LogLevelTrace:
|
||||
l.l.Debug(msg, append(logArgs, "PGX_LOG_LEVEL", level)...)
|
||||
case pgx.LogLevelDebug:
|
||||
l.l.Debug(msg, logArgs...)
|
||||
case pgx.LogLevelInfo:
|
||||
l.l.Info(msg, logArgs...)
|
||||
case pgx.LogLevelWarn:
|
||||
l.l.Warn(msg, logArgs...)
|
||||
case pgx.LogLevelError:
|
||||
l.l.Error(msg, logArgs...)
|
||||
default:
|
||||
l.l.Error(msg, append(logArgs, "INVALID_PGX_LOG_LEVEL", level)...)
|
||||
}
|
||||
}
|
42
log/logrusadapter/adapter.go
Normal file
42
log/logrusadapter/adapter.go
Normal file
@ -0,0 +1,42 @@
|
||||
// Package logrusadapter provides a logger that writes to a github.com/sirupsen/logrus.Logger
|
||||
// log.
|
||||
package logrusadapter
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/jackc/pgx/v4"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Logger struct {
|
||||
l logrus.FieldLogger
|
||||
}
|
||||
|
||||
func NewLogger(l logrus.FieldLogger) *Logger {
|
||||
return &Logger{l: l}
|
||||
}
|
||||
|
||||
func (l *Logger) Log(ctx context.Context, level pgx.LogLevel, msg string, data map[string]interface{}) {
|
||||
var logger logrus.FieldLogger
|
||||
if data != nil {
|
||||
logger = l.l.WithFields(data)
|
||||
} else {
|
||||
logger = l.l
|
||||
}
|
||||
|
||||
switch level {
|
||||
case pgx.LogLevelTrace:
|
||||
logger.WithField("PGX_LOG_LEVEL", level).Debug(msg)
|
||||
case pgx.LogLevelDebug:
|
||||
logger.Debug(msg)
|
||||
case pgx.LogLevelInfo:
|
||||
logger.Info(msg)
|
||||
case pgx.LogLevelWarn:
|
||||
logger.Warn(msg)
|
||||
case pgx.LogLevelError:
|
||||
logger.Error(msg)
|
||||
default:
|
||||
logger.WithField("INVALID_PGX_LOG_LEVEL", level).Error(msg)
|
||||
}
|
||||
}
|
@ -6,13 +6,13 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/jackc/pgx/v5/tracelog"
|
||||
"github.com/jackc/pgx/v4"
|
||||
)
|
||||
|
||||
// TestingLogger interface defines the subset of testing.TB methods used by this
|
||||
// adapter.
|
||||
type TestingLogger interface {
|
||||
Log(args ...any)
|
||||
Log(args ...interface{})
|
||||
}
|
||||
|
||||
type Logger struct {
|
||||
@ -23,8 +23,8 @@ func NewLogger(l TestingLogger) *Logger {
|
||||
return &Logger{l: l}
|
||||
}
|
||||
|
||||
func (l *Logger) Log(ctx context.Context, level tracelog.LogLevel, msg string, data map[string]any) {
|
||||
logArgs := make([]any, 0, 2+len(data))
|
||||
func (l *Logger) Log(ctx context.Context, level pgx.LogLevel, msg string, data map[string]interface{}) {
|
||||
logArgs := make([]interface{}, 0, 2+len(data))
|
||||
logArgs = append(logArgs, level, msg)
|
||||
for k, v := range data {
|
||||
logArgs = append(logArgs, fmt.Sprintf("%s=%v", k, v))
|
||||
|
42
log/zapadapter/adapter.go
Normal file
42
log/zapadapter/adapter.go
Normal file
@ -0,0 +1,42 @@
|
||||
// Package zapadapter provides a logger that writes to a go.uber.org/zap.Logger.
|
||||
package zapadapter
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/jackc/pgx/v4"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
type Logger struct {
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
func NewLogger(logger *zap.Logger) *Logger {
|
||||
return &Logger{logger: logger.WithOptions(zap.AddCallerSkip(1))}
|
||||
}
|
||||
|
||||
func (pl *Logger) Log(ctx context.Context, level pgx.LogLevel, msg string, data map[string]interface{}) {
|
||||
fields := make([]zapcore.Field, len(data))
|
||||
i := 0
|
||||
for k, v := range data {
|
||||
fields[i] = zap.Reflect(k, v)
|
||||
i++
|
||||
}
|
||||
|
||||
switch level {
|
||||
case pgx.LogLevelTrace:
|
||||
pl.logger.Debug(msg, append(fields, zap.Stringer("PGX_LOG_LEVEL", level))...)
|
||||
case pgx.LogLevelDebug:
|
||||
pl.logger.Debug(msg, fields...)
|
||||
case pgx.LogLevelInfo:
|
||||
pl.logger.Info(msg, fields...)
|
||||
case pgx.LogLevelWarn:
|
||||
pl.logger.Warn(msg, fields...)
|
||||
case pgx.LogLevelError:
|
||||
pl.logger.Error(msg, fields...)
|
||||
default:
|
||||
pl.logger.Error(msg, append(fields, zap.Stringer("PGX_LOG_LEVEL", level))...)
|
||||
}
|
||||
}
|
42
log/zerologadapter/adapter.go
Normal file
42
log/zerologadapter/adapter.go
Normal file
@ -0,0 +1,42 @@
|
||||
// Package zerologadapter provides a logger that writes to a github.com/rs/zerolog.
|
||||
package zerologadapter
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/jackc/pgx/v4"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
type Logger struct {
|
||||
logger zerolog.Logger
|
||||
}
|
||||
|
||||
// NewLogger accepts a zerolog.Logger as input and returns a new custom pgx
|
||||
// logging fascade as output.
|
||||
func NewLogger(logger zerolog.Logger) *Logger {
|
||||
return &Logger{
|
||||
logger: logger.With().Str("module", "pgx").Logger(),
|
||||
}
|
||||
}
|
||||
|
||||
func (pl *Logger) Log(ctx context.Context, level pgx.LogLevel, msg string, data map[string]interface{}) {
|
||||
var zlevel zerolog.Level
|
||||
switch level {
|
||||
case pgx.LogLevelNone:
|
||||
zlevel = zerolog.NoLevel
|
||||
case pgx.LogLevelError:
|
||||
zlevel = zerolog.ErrorLevel
|
||||
case pgx.LogLevelWarn:
|
||||
zlevel = zerolog.WarnLevel
|
||||
case pgx.LogLevelInfo:
|
||||
zlevel = zerolog.InfoLevel
|
||||
case pgx.LogLevelDebug:
|
||||
zlevel = zerolog.DebugLevel
|
||||
default:
|
||||
zlevel = zerolog.DebugLevel
|
||||
}
|
||||
|
||||
pgxlog := pl.logger.With().Fields(data).Logger()
|
||||
pgxlog.WithLevel(zlevel).Msg(msg)
|
||||
}
|
99
logger.go
Normal file
99
logger.go
Normal file
@ -0,0 +1,99 @@
|
||||
package pgx
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
errors "golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// The values for log levels are chosen such that the zero value means that no
|
||||
// log level was specified.
|
||||
const (
|
||||
LogLevelTrace = 6
|
||||
LogLevelDebug = 5
|
||||
LogLevelInfo = 4
|
||||
LogLevelWarn = 3
|
||||
LogLevelError = 2
|
||||
LogLevelNone = 1
|
||||
)
|
||||
|
||||
// LogLevel represents the pgx logging level. See LogLevel* constants for
|
||||
// possible values.
|
||||
type LogLevel int
|
||||
|
||||
func (ll LogLevel) String() string {
|
||||
switch ll {
|
||||
case LogLevelTrace:
|
||||
return "trace"
|
||||
case LogLevelDebug:
|
||||
return "debug"
|
||||
case LogLevelInfo:
|
||||
return "info"
|
||||
case LogLevelWarn:
|
||||
return "warn"
|
||||
case LogLevelError:
|
||||
return "error"
|
||||
case LogLevelNone:
|
||||
return "none"
|
||||
default:
|
||||
return fmt.Sprintf("invalid level %d", ll)
|
||||
}
|
||||
}
|
||||
|
||||
// Logger is the interface used to get logging from pgx internals.
|
||||
type Logger interface {
|
||||
// Log a message at the given level with data key/value pairs. data may be nil.
|
||||
Log(ctx context.Context, level LogLevel, msg string, data map[string]interface{})
|
||||
}
|
||||
|
||||
// LogLevelFromString converts log level string to constant
|
||||
//
|
||||
// Valid levels:
|
||||
// trace
|
||||
// debug
|
||||
// info
|
||||
// warn
|
||||
// error
|
||||
// none
|
||||
func LogLevelFromString(s string) (LogLevel, error) {
|
||||
switch s {
|
||||
case "trace":
|
||||
return LogLevelTrace, nil
|
||||
case "debug":
|
||||
return LogLevelDebug, nil
|
||||
case "info":
|
||||
return LogLevelInfo, nil
|
||||
case "warn":
|
||||
return LogLevelWarn, nil
|
||||
case "error":
|
||||
return LogLevelError, nil
|
||||
case "none":
|
||||
return LogLevelNone, nil
|
||||
default:
|
||||
return 0, errors.New("invalid log level")
|
||||
}
|
||||
}
|
||||
|
||||
func logQueryArgs(args []interface{}) []interface{} {
|
||||
logArgs := make([]interface{}, 0, len(args))
|
||||
|
||||
for _, a := range args {
|
||||
switch v := a.(type) {
|
||||
case []byte:
|
||||
if len(v) < 64 {
|
||||
a = hex.EncodeToString(v)
|
||||
} else {
|
||||
a = fmt.Sprintf("%x (truncated %d bytes)", v[:64], len(v)-64)
|
||||
}
|
||||
case string:
|
||||
if len(v) > 64 {
|
||||
a = fmt.Sprintf("%s (truncated %d bytes)", v[:64], len(v)-64)
|
||||
}
|
||||
}
|
||||
logArgs = append(logArgs, a)
|
||||
}
|
||||
|
||||
return logArgs
|
||||
}
|
23
messages.go
Normal file
23
messages.go
Normal file
@ -0,0 +1,23 @@
|
||||
package pgx
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
|
||||
"github.com/jackc/pgtype"
|
||||
)
|
||||
|
||||
func convertDriverValuers(args []interface{}) ([]interface{}, error) {
|
||||
for i, arg := range args {
|
||||
switch arg := arg.(type) {
|
||||
case pgtype.BinaryEncoder:
|
||||
case pgtype.TextEncoder:
|
||||
case driver.Valuer:
|
||||
v, err := callValuerValue(arg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
args[i] = v
|
||||
}
|
||||
}
|
||||
return args, nil
|
||||
}
|
@ -1,152 +0,0 @@
|
||||
// Package multitracer provides a Tracer that can combine several tracers into one.
|
||||
package multitracer
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/jackc/pgx/v5/pgxpool"
|
||||
)
|
||||
|
||||
// Tracer can combine several tracers into one.
|
||||
// You can use New to automatically split tracers by interface.
|
||||
type Tracer struct {
|
||||
QueryTracers []pgx.QueryTracer
|
||||
BatchTracers []pgx.BatchTracer
|
||||
CopyFromTracers []pgx.CopyFromTracer
|
||||
PrepareTracers []pgx.PrepareTracer
|
||||
ConnectTracers []pgx.ConnectTracer
|
||||
PoolAcquireTracers []pgxpool.AcquireTracer
|
||||
PoolReleaseTracers []pgxpool.ReleaseTracer
|
||||
}
|
||||
|
||||
// New returns new Tracer from tracers with automatically split tracers by interface.
|
||||
func New(tracers ...pgx.QueryTracer) *Tracer {
|
||||
var t Tracer
|
||||
|
||||
for _, tracer := range tracers {
|
||||
t.QueryTracers = append(t.QueryTracers, tracer)
|
||||
|
||||
if batchTracer, ok := tracer.(pgx.BatchTracer); ok {
|
||||
t.BatchTracers = append(t.BatchTracers, batchTracer)
|
||||
}
|
||||
|
||||
if copyFromTracer, ok := tracer.(pgx.CopyFromTracer); ok {
|
||||
t.CopyFromTracers = append(t.CopyFromTracers, copyFromTracer)
|
||||
}
|
||||
|
||||
if prepareTracer, ok := tracer.(pgx.PrepareTracer); ok {
|
||||
t.PrepareTracers = append(t.PrepareTracers, prepareTracer)
|
||||
}
|
||||
|
||||
if connectTracer, ok := tracer.(pgx.ConnectTracer); ok {
|
||||
t.ConnectTracers = append(t.ConnectTracers, connectTracer)
|
||||
}
|
||||
|
||||
if poolAcquireTracer, ok := tracer.(pgxpool.AcquireTracer); ok {
|
||||
t.PoolAcquireTracers = append(t.PoolAcquireTracers, poolAcquireTracer)
|
||||
}
|
||||
|
||||
if poolReleaseTracer, ok := tracer.(pgxpool.ReleaseTracer); ok {
|
||||
t.PoolReleaseTracers = append(t.PoolReleaseTracers, poolReleaseTracer)
|
||||
}
|
||||
}
|
||||
|
||||
return &t
|
||||
}
|
||||
|
||||
func (t *Tracer) TraceQueryStart(ctx context.Context, conn *pgx.Conn, data pgx.TraceQueryStartData) context.Context {
|
||||
for _, tracer := range t.QueryTracers {
|
||||
ctx = tracer.TraceQueryStart(ctx, conn, data)
|
||||
}
|
||||
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (t *Tracer) TraceQueryEnd(ctx context.Context, conn *pgx.Conn, data pgx.TraceQueryEndData) {
|
||||
for _, tracer := range t.QueryTracers {
|
||||
tracer.TraceQueryEnd(ctx, conn, data)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tracer) TraceBatchStart(ctx context.Context, conn *pgx.Conn, data pgx.TraceBatchStartData) context.Context {
|
||||
for _, tracer := range t.BatchTracers {
|
||||
ctx = tracer.TraceBatchStart(ctx, conn, data)
|
||||
}
|
||||
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (t *Tracer) TraceBatchQuery(ctx context.Context, conn *pgx.Conn, data pgx.TraceBatchQueryData) {
|
||||
for _, tracer := range t.BatchTracers {
|
||||
tracer.TraceBatchQuery(ctx, conn, data)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tracer) TraceBatchEnd(ctx context.Context, conn *pgx.Conn, data pgx.TraceBatchEndData) {
|
||||
for _, tracer := range t.BatchTracers {
|
||||
tracer.TraceBatchEnd(ctx, conn, data)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tracer) TraceCopyFromStart(ctx context.Context, conn *pgx.Conn, data pgx.TraceCopyFromStartData) context.Context {
|
||||
for _, tracer := range t.CopyFromTracers {
|
||||
ctx = tracer.TraceCopyFromStart(ctx, conn, data)
|
||||
}
|
||||
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (t *Tracer) TraceCopyFromEnd(ctx context.Context, conn *pgx.Conn, data pgx.TraceCopyFromEndData) {
|
||||
for _, tracer := range t.CopyFromTracers {
|
||||
tracer.TraceCopyFromEnd(ctx, conn, data)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tracer) TracePrepareStart(ctx context.Context, conn *pgx.Conn, data pgx.TracePrepareStartData) context.Context {
|
||||
for _, tracer := range t.PrepareTracers {
|
||||
ctx = tracer.TracePrepareStart(ctx, conn, data)
|
||||
}
|
||||
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (t *Tracer) TracePrepareEnd(ctx context.Context, conn *pgx.Conn, data pgx.TracePrepareEndData) {
|
||||
for _, tracer := range t.PrepareTracers {
|
||||
tracer.TracePrepareEnd(ctx, conn, data)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tracer) TraceConnectStart(ctx context.Context, data pgx.TraceConnectStartData) context.Context {
|
||||
for _, tracer := range t.ConnectTracers {
|
||||
ctx = tracer.TraceConnectStart(ctx, data)
|
||||
}
|
||||
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (t *Tracer) TraceConnectEnd(ctx context.Context, data pgx.TraceConnectEndData) {
|
||||
for _, tracer := range t.ConnectTracers {
|
||||
tracer.TraceConnectEnd(ctx, data)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tracer) TraceAcquireStart(ctx context.Context, pool *pgxpool.Pool, data pgxpool.TraceAcquireStartData) context.Context {
|
||||
for _, tracer := range t.PoolAcquireTracers {
|
||||
ctx = tracer.TraceAcquireStart(ctx, pool, data)
|
||||
}
|
||||
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (t *Tracer) TraceAcquireEnd(ctx context.Context, pool *pgxpool.Pool, data pgxpool.TraceAcquireEndData) {
|
||||
for _, tracer := range t.PoolAcquireTracers {
|
||||
tracer.TraceAcquireEnd(ctx, pool, data)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tracer) TraceRelease(pool *pgxpool.Pool, data pgxpool.TraceReleaseData) {
|
||||
for _, tracer := range t.PoolReleaseTracers {
|
||||
tracer.TraceRelease(pool, data)
|
||||
}
|
||||
}
|
@ -1,115 +0,0 @@
|
||||
package multitracer_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/jackc/pgx/v5/multitracer"
|
||||
"github.com/jackc/pgx/v5/pgxpool"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type testFullTracer struct{}
|
||||
|
||||
func (tt *testFullTracer) TraceQueryStart(ctx context.Context, conn *pgx.Conn, data pgx.TraceQueryStartData) context.Context {
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (tt *testFullTracer) TraceQueryEnd(ctx context.Context, conn *pgx.Conn, data pgx.TraceQueryEndData) {
|
||||
}
|
||||
|
||||
func (tt *testFullTracer) TraceBatchStart(ctx context.Context, conn *pgx.Conn, data pgx.TraceBatchStartData) context.Context {
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (tt *testFullTracer) TraceBatchQuery(ctx context.Context, conn *pgx.Conn, data pgx.TraceBatchQueryData) {
|
||||
}
|
||||
|
||||
func (tt *testFullTracer) TraceBatchEnd(ctx context.Context, conn *pgx.Conn, data pgx.TraceBatchEndData) {
|
||||
}
|
||||
|
||||
func (tt *testFullTracer) TraceCopyFromStart(ctx context.Context, conn *pgx.Conn, data pgx.TraceCopyFromStartData) context.Context {
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (tt *testFullTracer) TraceCopyFromEnd(ctx context.Context, conn *pgx.Conn, data pgx.TraceCopyFromEndData) {
|
||||
}
|
||||
|
||||
func (tt *testFullTracer) TracePrepareStart(ctx context.Context, conn *pgx.Conn, data pgx.TracePrepareStartData) context.Context {
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (tt *testFullTracer) TracePrepareEnd(ctx context.Context, conn *pgx.Conn, data pgx.TracePrepareEndData) {
|
||||
}
|
||||
|
||||
func (tt *testFullTracer) TraceConnectStart(ctx context.Context, data pgx.TraceConnectStartData) context.Context {
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (tt *testFullTracer) TraceConnectEnd(ctx context.Context, data pgx.TraceConnectEndData) {
|
||||
}
|
||||
|
||||
func (tt *testFullTracer) TraceAcquireStart(ctx context.Context, pool *pgxpool.Pool, data pgxpool.TraceAcquireStartData) context.Context {
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (tt *testFullTracer) TraceAcquireEnd(ctx context.Context, pool *pgxpool.Pool, data pgxpool.TraceAcquireEndData) {
|
||||
}
|
||||
|
||||
func (tt *testFullTracer) TraceRelease(pool *pgxpool.Pool, data pgxpool.TraceReleaseData) {
|
||||
}
|
||||
|
||||
type testCopyTracer struct{}
|
||||
|
||||
func (tt *testCopyTracer) TraceQueryStart(ctx context.Context, conn *pgx.Conn, data pgx.TraceQueryStartData) context.Context {
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (tt *testCopyTracer) TraceQueryEnd(ctx context.Context, conn *pgx.Conn, data pgx.TraceQueryEndData) {
|
||||
}
|
||||
|
||||
func (tt *testCopyTracer) TraceCopyFromStart(ctx context.Context, conn *pgx.Conn, data pgx.TraceCopyFromStartData) context.Context {
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (tt *testCopyTracer) TraceCopyFromEnd(ctx context.Context, conn *pgx.Conn, data pgx.TraceCopyFromEndData) {
|
||||
}
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fullTracer := &testFullTracer{}
|
||||
copyTracer := &testCopyTracer{}
|
||||
|
||||
mt := multitracer.New(fullTracer, copyTracer)
|
||||
require.Equal(
|
||||
t,
|
||||
&multitracer.Tracer{
|
||||
QueryTracers: []pgx.QueryTracer{
|
||||
fullTracer,
|
||||
copyTracer,
|
||||
},
|
||||
BatchTracers: []pgx.BatchTracer{
|
||||
fullTracer,
|
||||
},
|
||||
CopyFromTracers: []pgx.CopyFromTracer{
|
||||
fullTracer,
|
||||
copyTracer,
|
||||
},
|
||||
PrepareTracers: []pgx.PrepareTracer{
|
||||
fullTracer,
|
||||
},
|
||||
ConnectTracers: []pgx.ConnectTracer{
|
||||
fullTracer,
|
||||
},
|
||||
PoolAcquireTracers: []pgxpool.AcquireTracer{
|
||||
fullTracer,
|
||||
},
|
||||
PoolReleaseTracers: []pgxpool.ReleaseTracer{
|
||||
fullTracer,
|
||||
},
|
||||
},
|
||||
mt,
|
||||
)
|
||||
}
|
295
named_args.go
295
named_args.go
@ -1,295 +0,0 @@
|
||||
package pgx
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// NamedArgs can be used as the first argument to a query method. It will replace every '@' named placeholder with a '$'
|
||||
// ordinal placeholder and construct the appropriate arguments.
|
||||
//
|
||||
// For example, the following two queries are equivalent:
|
||||
//
|
||||
// conn.Query(ctx, "select * from widgets where foo = @foo and bar = @bar", pgx.NamedArgs{"foo": 1, "bar": 2})
|
||||
// conn.Query(ctx, "select * from widgets where foo = $1 and bar = $2", 1, 2)
|
||||
//
|
||||
// Named placeholders are case sensitive and must start with a letter or underscore. Subsequent characters can be
|
||||
// letters, numbers, or underscores.
|
||||
type NamedArgs map[string]any
|
||||
|
||||
// RewriteQuery implements the QueryRewriter interface.
|
||||
func (na NamedArgs) RewriteQuery(ctx context.Context, conn *Conn, sql string, args []any) (newSQL string, newArgs []any, err error) {
|
||||
return rewriteQuery(na, sql, false)
|
||||
}
|
||||
|
||||
// StrictNamedArgs can be used in the same way as NamedArgs, but provided arguments are also checked to include all
|
||||
// named arguments that the sql query uses, and no extra arguments.
|
||||
type StrictNamedArgs map[string]any
|
||||
|
||||
// RewriteQuery implements the QueryRewriter interface.
|
||||
func (sna StrictNamedArgs) RewriteQuery(ctx context.Context, conn *Conn, sql string, args []any) (newSQL string, newArgs []any, err error) {
|
||||
return rewriteQuery(sna, sql, true)
|
||||
}
|
||||
|
||||
type namedArg string
|
||||
|
||||
type sqlLexer struct {
|
||||
src string
|
||||
start int
|
||||
pos int
|
||||
nested int // multiline comment nesting level.
|
||||
stateFn stateFn
|
||||
parts []any
|
||||
|
||||
nameToOrdinal map[namedArg]int
|
||||
}
|
||||
|
||||
type stateFn func(*sqlLexer) stateFn
|
||||
|
||||
func rewriteQuery(na map[string]any, sql string, isStrict bool) (newSQL string, newArgs []any, err error) {
|
||||
l := &sqlLexer{
|
||||
src: sql,
|
||||
stateFn: rawState,
|
||||
nameToOrdinal: make(map[namedArg]int, len(na)),
|
||||
}
|
||||
|
||||
for l.stateFn != nil {
|
||||
l.stateFn = l.stateFn(l)
|
||||
}
|
||||
|
||||
sb := strings.Builder{}
|
||||
for _, p := range l.parts {
|
||||
switch p := p.(type) {
|
||||
case string:
|
||||
sb.WriteString(p)
|
||||
case namedArg:
|
||||
sb.WriteRune('$')
|
||||
sb.WriteString(strconv.Itoa(l.nameToOrdinal[p]))
|
||||
}
|
||||
}
|
||||
|
||||
newArgs = make([]any, len(l.nameToOrdinal))
|
||||
for name, ordinal := range l.nameToOrdinal {
|
||||
var found bool
|
||||
newArgs[ordinal-1], found = na[string(name)]
|
||||
if isStrict && !found {
|
||||
return "", nil, fmt.Errorf("argument %s found in sql query but not present in StrictNamedArgs", name)
|
||||
}
|
||||
}
|
||||
|
||||
if isStrict {
|
||||
for name := range na {
|
||||
if _, found := l.nameToOrdinal[namedArg(name)]; !found {
|
||||
return "", nil, fmt.Errorf("argument %s of StrictNamedArgs not found in sql query", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return sb.String(), newArgs, nil
|
||||
}
|
||||
|
||||
func rawState(l *sqlLexer) stateFn {
|
||||
for {
|
||||
r, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
l.pos += width
|
||||
|
||||
switch r {
|
||||
case 'e', 'E':
|
||||
nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
if nextRune == '\'' {
|
||||
l.pos += width
|
||||
return escapeStringState
|
||||
}
|
||||
case '\'':
|
||||
return singleQuoteState
|
||||
case '"':
|
||||
return doubleQuoteState
|
||||
case '@':
|
||||
nextRune, _ := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
if isLetter(nextRune) || nextRune == '_' {
|
||||
if l.pos-l.start > 0 {
|
||||
l.parts = append(l.parts, l.src[l.start:l.pos-width])
|
||||
}
|
||||
l.start = l.pos
|
||||
return namedArgState
|
||||
}
|
||||
case '-':
|
||||
nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
if nextRune == '-' {
|
||||
l.pos += width
|
||||
return oneLineCommentState
|
||||
}
|
||||
case '/':
|
||||
nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
if nextRune == '*' {
|
||||
l.pos += width
|
||||
return multilineCommentState
|
||||
}
|
||||
case utf8.RuneError:
|
||||
if l.pos-l.start > 0 {
|
||||
l.parts = append(l.parts, l.src[l.start:l.pos])
|
||||
l.start = l.pos
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isLetter(r rune) bool {
|
||||
return (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z')
|
||||
}
|
||||
|
||||
func namedArgState(l *sqlLexer) stateFn {
|
||||
for {
|
||||
r, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
l.pos += width
|
||||
|
||||
if r == utf8.RuneError {
|
||||
if l.pos-l.start > 0 {
|
||||
na := namedArg(l.src[l.start:l.pos])
|
||||
if _, found := l.nameToOrdinal[na]; !found {
|
||||
l.nameToOrdinal[na] = len(l.nameToOrdinal) + 1
|
||||
}
|
||||
l.parts = append(l.parts, na)
|
||||
l.start = l.pos
|
||||
}
|
||||
return nil
|
||||
} else if !(isLetter(r) || (r >= '0' && r <= '9') || r == '_') {
|
||||
l.pos -= width
|
||||
na := namedArg(l.src[l.start:l.pos])
|
||||
if _, found := l.nameToOrdinal[na]; !found {
|
||||
l.nameToOrdinal[na] = len(l.nameToOrdinal) + 1
|
||||
}
|
||||
l.parts = append(l.parts, namedArg(na))
|
||||
l.start = l.pos
|
||||
return rawState
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func singleQuoteState(l *sqlLexer) stateFn {
|
||||
for {
|
||||
r, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
l.pos += width
|
||||
|
||||
switch r {
|
||||
case '\'':
|
||||
nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
if nextRune != '\'' {
|
||||
return rawState
|
||||
}
|
||||
l.pos += width
|
||||
case utf8.RuneError:
|
||||
if l.pos-l.start > 0 {
|
||||
l.parts = append(l.parts, l.src[l.start:l.pos])
|
||||
l.start = l.pos
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func doubleQuoteState(l *sqlLexer) stateFn {
|
||||
for {
|
||||
r, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
l.pos += width
|
||||
|
||||
switch r {
|
||||
case '"':
|
||||
nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
if nextRune != '"' {
|
||||
return rawState
|
||||
}
|
||||
l.pos += width
|
||||
case utf8.RuneError:
|
||||
if l.pos-l.start > 0 {
|
||||
l.parts = append(l.parts, l.src[l.start:l.pos])
|
||||
l.start = l.pos
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func escapeStringState(l *sqlLexer) stateFn {
|
||||
for {
|
||||
r, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
l.pos += width
|
||||
|
||||
switch r {
|
||||
case '\\':
|
||||
_, width = utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
l.pos += width
|
||||
case '\'':
|
||||
nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
if nextRune != '\'' {
|
||||
return rawState
|
||||
}
|
||||
l.pos += width
|
||||
case utf8.RuneError:
|
||||
if l.pos-l.start > 0 {
|
||||
l.parts = append(l.parts, l.src[l.start:l.pos])
|
||||
l.start = l.pos
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func oneLineCommentState(l *sqlLexer) stateFn {
|
||||
for {
|
||||
r, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
l.pos += width
|
||||
|
||||
switch r {
|
||||
case '\\':
|
||||
_, width = utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
l.pos += width
|
||||
case '\n', '\r':
|
||||
return rawState
|
||||
case utf8.RuneError:
|
||||
if l.pos-l.start > 0 {
|
||||
l.parts = append(l.parts, l.src[l.start:l.pos])
|
||||
l.start = l.pos
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func multilineCommentState(l *sqlLexer) stateFn {
|
||||
for {
|
||||
r, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
l.pos += width
|
||||
|
||||
switch r {
|
||||
case '/':
|
||||
nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
if nextRune == '*' {
|
||||
l.pos += width
|
||||
l.nested++
|
||||
}
|
||||
case '*':
|
||||
nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
if nextRune != '/' {
|
||||
continue
|
||||
}
|
||||
|
||||
l.pos += width
|
||||
if l.nested == 0 {
|
||||
return rawState
|
||||
}
|
||||
l.nested--
|
||||
|
||||
case utf8.RuneError:
|
||||
if l.pos-l.start > 0 {
|
||||
l.parts = append(l.parts, l.src[l.start:l.pos])
|
||||
l.start = l.pos
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
@ -1,162 +0,0 @@
|
||||
package pgx_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNamedArgsRewriteQuery(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for i, tt := range []struct {
|
||||
sql string
|
||||
args []any
|
||||
namedArgs pgx.NamedArgs
|
||||
expectedSQL string
|
||||
expectedArgs []any
|
||||
}{
|
||||
{
|
||||
sql: "select * from users where id = @id",
|
||||
namedArgs: pgx.NamedArgs{"id": int32(42)},
|
||||
expectedSQL: "select * from users where id = $1",
|
||||
expectedArgs: []any{int32(42)},
|
||||
},
|
||||
{
|
||||
sql: "select * from t where foo < @abc and baz = @def and bar < @abc",
|
||||
namedArgs: pgx.NamedArgs{"abc": int32(42), "def": int32(1)},
|
||||
expectedSQL: "select * from t where foo < $1 and baz = $2 and bar < $1",
|
||||
expectedArgs: []any{int32(42), int32(1)},
|
||||
},
|
||||
{
|
||||
sql: "select @a::int, @b::text",
|
||||
namedArgs: pgx.NamedArgs{"a": int32(42), "b": "foo"},
|
||||
expectedSQL: "select $1::int, $2::text",
|
||||
expectedArgs: []any{int32(42), "foo"},
|
||||
},
|
||||
{
|
||||
sql: "select @Abc::int, @b_4::text, @_c::int",
|
||||
namedArgs: pgx.NamedArgs{"Abc": int32(42), "b_4": "foo", "_c": int32(1)},
|
||||
expectedSQL: "select $1::int, $2::text, $3::int",
|
||||
expectedArgs: []any{int32(42), "foo", int32(1)},
|
||||
},
|
||||
{
|
||||
sql: "at end @",
|
||||
namedArgs: pgx.NamedArgs{"a": int32(42), "b": "foo"},
|
||||
expectedSQL: "at end @",
|
||||
expectedArgs: []any{},
|
||||
},
|
||||
{
|
||||
sql: "ignores without valid character after @ foo bar",
|
||||
namedArgs: pgx.NamedArgs{"a": int32(42), "b": "foo"},
|
||||
expectedSQL: "ignores without valid character after @ foo bar",
|
||||
expectedArgs: []any{},
|
||||
},
|
||||
{
|
||||
sql: "name cannot start with number @1 foo bar",
|
||||
namedArgs: pgx.NamedArgs{"a": int32(42), "b": "foo"},
|
||||
expectedSQL: "name cannot start with number @1 foo bar",
|
||||
expectedArgs: []any{},
|
||||
},
|
||||
{
|
||||
sql: `select *, '@foo' as "@bar" from users where id = @id`,
|
||||
namedArgs: pgx.NamedArgs{"id": int32(42)},
|
||||
expectedSQL: `select *, '@foo' as "@bar" from users where id = $1`,
|
||||
expectedArgs: []any{int32(42)},
|
||||
},
|
||||
{
|
||||
sql: `select * -- @foo
|
||||
from users -- @single line comments
|
||||
where id = @id;`,
|
||||
namedArgs: pgx.NamedArgs{"id": int32(42)},
|
||||
expectedSQL: `select * -- @foo
|
||||
from users -- @single line comments
|
||||
where id = $1;`,
|
||||
expectedArgs: []any{int32(42)},
|
||||
},
|
||||
{
|
||||
sql: `select * /* @multi line
|
||||
@comment
|
||||
*/
|
||||
/* /* with @nesting */ */
|
||||
from users
|
||||
where id = @id;`,
|
||||
namedArgs: pgx.NamedArgs{"id": int32(42)},
|
||||
expectedSQL: `select * /* @multi line
|
||||
@comment
|
||||
*/
|
||||
/* /* with @nesting */ */
|
||||
from users
|
||||
where id = $1;`,
|
||||
expectedArgs: []any{int32(42)},
|
||||
},
|
||||
{
|
||||
sql: "extra provided argument",
|
||||
namedArgs: pgx.NamedArgs{"extra": int32(1)},
|
||||
expectedSQL: "extra provided argument",
|
||||
expectedArgs: []any{},
|
||||
},
|
||||
{
|
||||
sql: "@missing argument",
|
||||
namedArgs: pgx.NamedArgs{},
|
||||
expectedSQL: "$1 argument",
|
||||
expectedArgs: []any{nil},
|
||||
},
|
||||
|
||||
// test comments and quotes
|
||||
} {
|
||||
sql, args, err := tt.namedArgs.RewriteQuery(context.Background(), nil, tt.sql, tt.args)
|
||||
require.NoError(t, err)
|
||||
assert.Equalf(t, tt.expectedSQL, sql, "%d", i)
|
||||
assert.Equalf(t, tt.expectedArgs, args, "%d", i)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStrictNamedArgsRewriteQuery(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for i, tt := range []struct {
|
||||
sql string
|
||||
namedArgs pgx.StrictNamedArgs
|
||||
expectedSQL string
|
||||
expectedArgs []any
|
||||
isExpectedError bool
|
||||
}{
|
||||
{
|
||||
sql: "no arguments",
|
||||
namedArgs: pgx.StrictNamedArgs{},
|
||||
expectedSQL: "no arguments",
|
||||
expectedArgs: []any{},
|
||||
isExpectedError: false,
|
||||
},
|
||||
{
|
||||
sql: "@all @matches",
|
||||
namedArgs: pgx.StrictNamedArgs{"all": int32(1), "matches": int32(2)},
|
||||
expectedSQL: "$1 $2",
|
||||
expectedArgs: []any{int32(1), int32(2)},
|
||||
isExpectedError: false,
|
||||
},
|
||||
{
|
||||
sql: "extra provided argument",
|
||||
namedArgs: pgx.StrictNamedArgs{"extra": int32(1)},
|
||||
isExpectedError: true,
|
||||
},
|
||||
{
|
||||
sql: "@missing argument",
|
||||
namedArgs: pgx.StrictNamedArgs{},
|
||||
isExpectedError: true,
|
||||
},
|
||||
} {
|
||||
sql, args, err := tt.namedArgs.RewriteQuery(context.Background(), nil, tt.sql, nil)
|
||||
if tt.isExpectedError {
|
||||
assert.Errorf(t, err, "%d", i)
|
||||
} else {
|
||||
require.NoErrorf(t, err, "%d", i)
|
||||
assert.Equalf(t, tt.expectedSQL, sql, "%d", i)
|
||||
assert.Equalf(t, tt.expectedArgs, args, "%d", i)
|
||||
}
|
||||
}
|
||||
}
|
@ -5,7 +5,9 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/jackc/pgconn"
|
||||
"github.com/jackc/pgconn/stmtcache"
|
||||
"github.com/jackc/pgx/v4"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@ -17,8 +19,9 @@ func TestPgbouncerStatementCacheDescribe(t *testing.T) {
|
||||
}
|
||||
|
||||
config := mustParseConfig(t, connString)
|
||||
config.DefaultQueryExecMode = pgx.QueryExecModeCacheDescribe
|
||||
config.DescriptionCacheCapacity = 1024
|
||||
config.BuildStatementCache = func(conn *pgconn.PgConn) stmtcache.Cache {
|
||||
return stmtcache.New(conn, stmtcache.ModeDescribe, 1024)
|
||||
}
|
||||
|
||||
testPgbouncer(t, config, 10, 100)
|
||||
}
|
||||
@ -30,7 +33,8 @@ func TestPgbouncerSimpleProtocol(t *testing.T) {
|
||||
}
|
||||
|
||||
config := mustParseConfig(t, connString)
|
||||
config.DefaultQueryExecMode = pgx.QueryExecModeSimpleProtocol
|
||||
config.BuildStatementCache = nil
|
||||
config.PreferSimpleProtocol = true
|
||||
|
||||
testPgbouncer(t, config, 10, 100)
|
||||
}
|
||||
@ -72,4 +76,5 @@ func testPgbouncer(t *testing.T, config *pgx.ConnConfig, workers, iterations int
|
||||
for i := 0; i < workers; i++ {
|
||||
<-doneChan
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1,29 +0,0 @@
|
||||
# pgconn
|
||||
|
||||
Package pgconn is a low-level PostgreSQL database driver. It operates at nearly the same level as the C library libpq.
|
||||
It is primarily intended to serve as the foundation for higher level libraries such as https://github.com/jackc/pgx.
|
||||
Applications should handle normal queries with a higher level library and only use pgconn directly when required for
|
||||
low-level access to PostgreSQL functionality.
|
||||
|
||||
## Example Usage
|
||||
|
||||
```go
|
||||
pgConn, err := pgconn.Connect(context.Background(), os.Getenv("DATABASE_URL"))
|
||||
if err != nil {
|
||||
log.Fatalln("pgconn failed to connect:", err)
|
||||
}
|
||||
defer pgConn.Close(context.Background())
|
||||
|
||||
result := pgConn.ExecParams(context.Background(), "SELECT email FROM users WHERE id=$1", [][]byte{[]byte("123")}, nil, nil, nil)
|
||||
for result.NextRow() {
|
||||
fmt.Println("User 123 has email:", string(result.Values()[0]))
|
||||
}
|
||||
_, err = result.Close()
|
||||
if err != nil {
|
||||
log.Fatalln("failed reading result:", err)
|
||||
}
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
See CONTRIBUTING.md for setup instructions.
|
@ -1,272 +0,0 @@
|
||||
// SCRAM-SHA-256 authentication
|
||||
//
|
||||
// Resources:
|
||||
// https://tools.ietf.org/html/rfc5802
|
||||
// https://tools.ietf.org/html/rfc8265
|
||||
// https://www.postgresql.org/docs/current/sasl-authentication.html
|
||||
//
|
||||
// Inspiration drawn from other implementations:
|
||||
// https://github.com/lib/pq/pull/608
|
||||
// https://github.com/lib/pq/pull/788
|
||||
// https://github.com/lib/pq/pull/833
|
||||
|
||||
package pgconn
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgproto3"
|
||||
"golang.org/x/crypto/pbkdf2"
|
||||
"golang.org/x/text/secure/precis"
|
||||
)
|
||||
|
||||
const clientNonceLen = 18
|
||||
|
||||
// Perform SCRAM authentication.
|
||||
func (c *PgConn) scramAuth(serverAuthMechanisms []string) error {
|
||||
sc, err := newScramClient(serverAuthMechanisms, c.config.Password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Send client-first-message in a SASLInitialResponse
|
||||
saslInitialResponse := &pgproto3.SASLInitialResponse{
|
||||
AuthMechanism: "SCRAM-SHA-256",
|
||||
Data: sc.clientFirstMessage(),
|
||||
}
|
||||
c.frontend.Send(saslInitialResponse)
|
||||
err = c.flushWithPotentialWriteReadDeadlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Receive server-first-message payload in an AuthenticationSASLContinue.
|
||||
saslContinue, err := c.rxSASLContinue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = sc.recvServerFirstMessage(saslContinue.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Send client-final-message in a SASLResponse
|
||||
saslResponse := &pgproto3.SASLResponse{
|
||||
Data: []byte(sc.clientFinalMessage()),
|
||||
}
|
||||
c.frontend.Send(saslResponse)
|
||||
err = c.flushWithPotentialWriteReadDeadlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Receive server-final-message payload in an AuthenticationSASLFinal.
|
||||
saslFinal, err := c.rxSASLFinal()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return sc.recvServerFinalMessage(saslFinal.Data)
|
||||
}
|
||||
|
||||
func (c *PgConn) rxSASLContinue() (*pgproto3.AuthenticationSASLContinue, error) {
|
||||
msg, err := c.receiveMessage()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch m := msg.(type) {
|
||||
case *pgproto3.AuthenticationSASLContinue:
|
||||
return m, nil
|
||||
case *pgproto3.ErrorResponse:
|
||||
return nil, ErrorResponseToPgError(m)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("expected AuthenticationSASLContinue message but received unexpected message %T", msg)
|
||||
}
|
||||
|
||||
func (c *PgConn) rxSASLFinal() (*pgproto3.AuthenticationSASLFinal, error) {
|
||||
msg, err := c.receiveMessage()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch m := msg.(type) {
|
||||
case *pgproto3.AuthenticationSASLFinal:
|
||||
return m, nil
|
||||
case *pgproto3.ErrorResponse:
|
||||
return nil, ErrorResponseToPgError(m)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("expected AuthenticationSASLFinal message but received unexpected message %T", msg)
|
||||
}
|
||||
|
||||
type scramClient struct {
|
||||
serverAuthMechanisms []string
|
||||
password []byte
|
||||
clientNonce []byte
|
||||
|
||||
clientFirstMessageBare []byte
|
||||
|
||||
serverFirstMessage []byte
|
||||
clientAndServerNonce []byte
|
||||
salt []byte
|
||||
iterations int
|
||||
|
||||
saltedPassword []byte
|
||||
authMessage []byte
|
||||
}
|
||||
|
||||
func newScramClient(serverAuthMechanisms []string, password string) (*scramClient, error) {
|
||||
sc := &scramClient{
|
||||
serverAuthMechanisms: serverAuthMechanisms,
|
||||
}
|
||||
|
||||
// Ensure server supports SCRAM-SHA-256
|
||||
hasScramSHA256 := false
|
||||
for _, mech := range sc.serverAuthMechanisms {
|
||||
if mech == "SCRAM-SHA-256" {
|
||||
hasScramSHA256 = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasScramSHA256 {
|
||||
return nil, errors.New("server does not support SCRAM-SHA-256")
|
||||
}
|
||||
|
||||
// precis.OpaqueString is equivalent to SASLprep for password.
|
||||
var err error
|
||||
sc.password, err = precis.OpaqueString.Bytes([]byte(password))
|
||||
if err != nil {
|
||||
// PostgreSQL allows passwords invalid according to SCRAM / SASLprep.
|
||||
sc.password = []byte(password)
|
||||
}
|
||||
|
||||
buf := make([]byte, clientNonceLen)
|
||||
_, err = rand.Read(buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sc.clientNonce = make([]byte, base64.RawStdEncoding.EncodedLen(len(buf)))
|
||||
base64.RawStdEncoding.Encode(sc.clientNonce, buf)
|
||||
|
||||
return sc, nil
|
||||
}
|
||||
|
||||
func (sc *scramClient) clientFirstMessage() []byte {
|
||||
sc.clientFirstMessageBare = []byte(fmt.Sprintf("n=,r=%s", sc.clientNonce))
|
||||
return []byte(fmt.Sprintf("n,,%s", sc.clientFirstMessageBare))
|
||||
}
|
||||
|
||||
func (sc *scramClient) recvServerFirstMessage(serverFirstMessage []byte) error {
|
||||
sc.serverFirstMessage = serverFirstMessage
|
||||
buf := serverFirstMessage
|
||||
if !bytes.HasPrefix(buf, []byte("r=")) {
|
||||
return errors.New("invalid SCRAM server-first-message received from server: did not include r=")
|
||||
}
|
||||
buf = buf[2:]
|
||||
|
||||
idx := bytes.IndexByte(buf, ',')
|
||||
if idx == -1 {
|
||||
return errors.New("invalid SCRAM server-first-message received from server: did not include s=")
|
||||
}
|
||||
sc.clientAndServerNonce = buf[:idx]
|
||||
buf = buf[idx+1:]
|
||||
|
||||
if !bytes.HasPrefix(buf, []byte("s=")) {
|
||||
return errors.New("invalid SCRAM server-first-message received from server: did not include s=")
|
||||
}
|
||||
buf = buf[2:]
|
||||
|
||||
idx = bytes.IndexByte(buf, ',')
|
||||
if idx == -1 {
|
||||
return errors.New("invalid SCRAM server-first-message received from server: did not include i=")
|
||||
}
|
||||
saltStr := buf[:idx]
|
||||
buf = buf[idx+1:]
|
||||
|
||||
if !bytes.HasPrefix(buf, []byte("i=")) {
|
||||
return errors.New("invalid SCRAM server-first-message received from server: did not include i=")
|
||||
}
|
||||
buf = buf[2:]
|
||||
iterationsStr := buf
|
||||
|
||||
var err error
|
||||
sc.salt, err = base64.StdEncoding.DecodeString(string(saltStr))
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid SCRAM salt received from server: %w", err)
|
||||
}
|
||||
|
||||
sc.iterations, err = strconv.Atoi(string(iterationsStr))
|
||||
if err != nil || sc.iterations <= 0 {
|
||||
return fmt.Errorf("invalid SCRAM iteration count received from server: %w", err)
|
||||
}
|
||||
|
||||
if !bytes.HasPrefix(sc.clientAndServerNonce, sc.clientNonce) {
|
||||
return errors.New("invalid SCRAM nonce: did not start with client nonce")
|
||||
}
|
||||
|
||||
if len(sc.clientAndServerNonce) <= len(sc.clientNonce) {
|
||||
return errors.New("invalid SCRAM nonce: did not include server nonce")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *scramClient) clientFinalMessage() string {
|
||||
clientFinalMessageWithoutProof := []byte(fmt.Sprintf("c=biws,r=%s", sc.clientAndServerNonce))
|
||||
|
||||
sc.saltedPassword = pbkdf2.Key([]byte(sc.password), sc.salt, sc.iterations, 32, sha256.New)
|
||||
sc.authMessage = bytes.Join([][]byte{sc.clientFirstMessageBare, sc.serverFirstMessage, clientFinalMessageWithoutProof}, []byte(","))
|
||||
|
||||
clientProof := computeClientProof(sc.saltedPassword, sc.authMessage)
|
||||
|
||||
return fmt.Sprintf("%s,p=%s", clientFinalMessageWithoutProof, clientProof)
|
||||
}
|
||||
|
||||
func (sc *scramClient) recvServerFinalMessage(serverFinalMessage []byte) error {
|
||||
if !bytes.HasPrefix(serverFinalMessage, []byte("v=")) {
|
||||
return errors.New("invalid SCRAM server-final-message received from server")
|
||||
}
|
||||
|
||||
serverSignature := serverFinalMessage[2:]
|
||||
|
||||
if !hmac.Equal(serverSignature, computeServerSignature(sc.saltedPassword, sc.authMessage)) {
|
||||
return errors.New("invalid SCRAM ServerSignature received from server")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func computeHMAC(key, msg []byte) []byte {
|
||||
mac := hmac.New(sha256.New, key)
|
||||
mac.Write(msg)
|
||||
return mac.Sum(nil)
|
||||
}
|
||||
|
||||
func computeClientProof(saltedPassword, authMessage []byte) []byte {
|
||||
clientKey := computeHMAC(saltedPassword, []byte("Client Key"))
|
||||
storedKey := sha256.Sum256(clientKey)
|
||||
clientSignature := computeHMAC(storedKey[:], authMessage)
|
||||
|
||||
clientProof := make([]byte, len(clientSignature))
|
||||
for i := 0; i < len(clientSignature); i++ {
|
||||
clientProof[i] = clientKey[i] ^ clientSignature[i]
|
||||
}
|
||||
|
||||
buf := make([]byte, base64.StdEncoding.EncodedLen(len(clientProof)))
|
||||
base64.StdEncoding.Encode(buf, clientProof)
|
||||
return buf
|
||||
}
|
||||
|
||||
func computeServerSignature(saltedPassword, authMessage []byte) []byte {
|
||||
serverKey := computeHMAC(saltedPassword, []byte("Server Key"))
|
||||
serverSignature := computeHMAC(serverKey, authMessage)
|
||||
buf := make([]byte, base64.StdEncoding.EncodedLen(len(serverSignature)))
|
||||
base64.StdEncoding.Encode(buf, serverSignature)
|
||||
return buf
|
||||
}
|
@ -1,73 +0,0 @@
|
||||
package pgconn
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkCommandTagRowsAffected(b *testing.B) {
|
||||
benchmarks := []struct {
|
||||
commandTag string
|
||||
rowsAffected int64
|
||||
}{
|
||||
{"UPDATE 1", 1},
|
||||
{"UPDATE 123456789", 123456789},
|
||||
{"INSERT 0 1", 1},
|
||||
{"INSERT 0 123456789", 123456789},
|
||||
}
|
||||
|
||||
for _, bm := range benchmarks {
|
||||
ct := CommandTag{s: bm.commandTag}
|
||||
b.Run(bm.commandTag, func(b *testing.B) {
|
||||
var n int64
|
||||
for i := 0; i < b.N; i++ {
|
||||
n = ct.RowsAffected()
|
||||
}
|
||||
if n != bm.rowsAffected {
|
||||
b.Errorf("expected %d got %d", bm.rowsAffected, n)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCommandTagTypeFromString(b *testing.B) {
|
||||
ct := CommandTag{s: "UPDATE 1"}
|
||||
|
||||
var update bool
|
||||
for i := 0; i < b.N; i++ {
|
||||
update = strings.HasPrefix(ct.String(), "UPDATE")
|
||||
}
|
||||
if !update {
|
||||
b.Error("expected update")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCommandTagInsert(b *testing.B) {
|
||||
benchmarks := []struct {
|
||||
commandTag string
|
||||
is bool
|
||||
}{
|
||||
{"INSERT 1", true},
|
||||
{"INSERT 1234567890", true},
|
||||
{"UPDATE 1", false},
|
||||
{"UPDATE 1234567890", false},
|
||||
{"DELETE 1", false},
|
||||
{"DELETE 1234567890", false},
|
||||
{"SELECT 1", false},
|
||||
{"SELECT 1234567890", false},
|
||||
{"UNKNOWN 1234567890", false},
|
||||
}
|
||||
|
||||
for _, bm := range benchmarks {
|
||||
ct := CommandTag{s: bm.commandTag}
|
||||
b.Run(bm.commandTag, func(b *testing.B) {
|
||||
var is bool
|
||||
for i := 0; i < b.N; i++ {
|
||||
is = ct.Insert()
|
||||
}
|
||||
if is != bm.is {
|
||||
b.Errorf("expected %v got %v", bm.is, is)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -1,250 +0,0 @@
|
||||
package pgconn_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func BenchmarkConnect(b *testing.B) {
|
||||
benchmarks := []struct {
|
||||
name string
|
||||
env string
|
||||
}{
|
||||
{"Unix socket", "PGX_TEST_UNIX_SOCKET_CONN_STRING"},
|
||||
{"TCP", "PGX_TEST_TCP_CONN_STRING"},
|
||||
}
|
||||
|
||||
for _, bm := range benchmarks {
|
||||
bm := bm
|
||||
b.Run(bm.name, func(b *testing.B) {
|
||||
connString := os.Getenv(bm.env)
|
||||
if connString == "" {
|
||||
b.Skipf("Skipping due to missing environment variable %v", bm.env)
|
||||
}
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
conn, err := pgconn.Connect(context.Background(), connString)
|
||||
require.Nil(b, err)
|
||||
|
||||
err = conn.Close(context.Background())
|
||||
require.Nil(b, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkExec(b *testing.B) {
|
||||
expectedValues := [][]byte{[]byte("hello"), []byte("42"), []byte("2019-01-01")}
|
||||
benchmarks := []struct {
|
||||
name string
|
||||
ctx context.Context
|
||||
}{
|
||||
// Using an empty context other than context.Background() to compare
|
||||
// performance
|
||||
{"background context", context.Background()},
|
||||
{"empty context", context.TODO()},
|
||||
}
|
||||
|
||||
for _, bm := range benchmarks {
|
||||
bm := bm
|
||||
b.Run(bm.name, func(b *testing.B) {
|
||||
conn, err := pgconn.Connect(bm.ctx, os.Getenv("PGX_TEST_DATABASE"))
|
||||
require.Nil(b, err)
|
||||
defer closeConn(b, conn)
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
mrr := conn.Exec(bm.ctx, "select 'hello'::text as a, 42::int4 as b, '2019-01-01'::date")
|
||||
|
||||
for mrr.NextResult() {
|
||||
rr := mrr.ResultReader()
|
||||
|
||||
rowCount := 0
|
||||
for rr.NextRow() {
|
||||
rowCount++
|
||||
if len(rr.Values()) != len(expectedValues) {
|
||||
b.Fatalf("unexpected number of values: %d", len(rr.Values()))
|
||||
}
|
||||
for i := range rr.Values() {
|
||||
if !bytes.Equal(rr.Values()[i], expectedValues[i]) {
|
||||
b.Fatalf("unexpected values: %s %s", rr.Values()[i], expectedValues[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
_, err = rr.Close()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if rowCount != 1 {
|
||||
b.Fatalf("unexpected rowCount: %d", rowCount)
|
||||
}
|
||||
}
|
||||
|
||||
err := mrr.Close()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkExecPossibleToCancel(b *testing.B) {
|
||||
conn, err := pgconn.Connect(context.Background(), os.Getenv("PGX_TEST_DATABASE"))
|
||||
require.Nil(b, err)
|
||||
defer closeConn(b, conn)
|
||||
|
||||
expectedValues := [][]byte{[]byte("hello"), []byte("42"), []byte("2019-01-01")}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
mrr := conn.Exec(ctx, "select 'hello'::text as a, 42::int4 as b, '2019-01-01'::date")
|
||||
|
||||
for mrr.NextResult() {
|
||||
rr := mrr.ResultReader()
|
||||
|
||||
rowCount := 0
|
||||
for rr.NextRow() {
|
||||
rowCount++
|
||||
if len(rr.Values()) != len(expectedValues) {
|
||||
b.Fatalf("unexpected number of values: %d", len(rr.Values()))
|
||||
}
|
||||
for i := range rr.Values() {
|
||||
if !bytes.Equal(rr.Values()[i], expectedValues[i]) {
|
||||
b.Fatalf("unexpected values: %s %s", rr.Values()[i], expectedValues[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
_, err = rr.Close()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if rowCount != 1 {
|
||||
b.Fatalf("unexpected rowCount: %d", rowCount)
|
||||
}
|
||||
}
|
||||
|
||||
err := mrr.Close()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkExecPrepared(b *testing.B) {
|
||||
expectedValues := [][]byte{[]byte("hello"), []byte("42"), []byte("2019-01-01")}
|
||||
|
||||
benchmarks := []struct {
|
||||
name string
|
||||
ctx context.Context
|
||||
}{
|
||||
// Using an empty context other than context.Background() to compare
|
||||
// performance
|
||||
{"background context", context.Background()},
|
||||
{"empty context", context.TODO()},
|
||||
}
|
||||
|
||||
for _, bm := range benchmarks {
|
||||
bm := bm
|
||||
b.Run(bm.name, func(b *testing.B) {
|
||||
conn, err := pgconn.Connect(bm.ctx, os.Getenv("PGX_TEST_DATABASE"))
|
||||
require.Nil(b, err)
|
||||
defer closeConn(b, conn)
|
||||
|
||||
_, err = conn.Prepare(bm.ctx, "ps1", "select 'hello'::text as a, 42::int4 as b, '2019-01-01'::date", nil)
|
||||
require.Nil(b, err)
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
rr := conn.ExecPrepared(bm.ctx, "ps1", nil, nil, nil)
|
||||
|
||||
rowCount := 0
|
||||
for rr.NextRow() {
|
||||
rowCount++
|
||||
if len(rr.Values()) != len(expectedValues) {
|
||||
b.Fatalf("unexpected number of values: %d", len(rr.Values()))
|
||||
}
|
||||
for i := range rr.Values() {
|
||||
if !bytes.Equal(rr.Values()[i], expectedValues[i]) {
|
||||
b.Fatalf("unexpected values: %s %s", rr.Values()[i], expectedValues[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
_, err = rr.Close()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if rowCount != 1 {
|
||||
b.Fatalf("unexpected rowCount: %d", rowCount)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkExecPreparedPossibleToCancel(b *testing.B) {
|
||||
conn, err := pgconn.Connect(context.Background(), os.Getenv("PGX_TEST_DATABASE"))
|
||||
require.Nil(b, err)
|
||||
defer closeConn(b, conn)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
_, err = conn.Prepare(ctx, "ps1", "select 'hello'::text as a, 42::int4 as b, '2019-01-01'::date", nil)
|
||||
require.Nil(b, err)
|
||||
|
||||
expectedValues := [][]byte{[]byte("hello"), []byte("42"), []byte("2019-01-01")}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
rr := conn.ExecPrepared(ctx, "ps1", nil, nil, nil)
|
||||
|
||||
rowCount := 0
|
||||
for rr.NextRow() {
|
||||
rowCount += 1
|
||||
if len(rr.Values()) != len(expectedValues) {
|
||||
b.Fatalf("unexpected number of values: %d", len(rr.Values()))
|
||||
}
|
||||
for i := range rr.Values() {
|
||||
if !bytes.Equal(rr.Values()[i], expectedValues[i]) {
|
||||
b.Fatalf("unexpected values: %s %s", rr.Values()[i], expectedValues[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
_, err = rr.Close()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if rowCount != 1 {
|
||||
b.Fatalf("unexpected rowCount: %d", rowCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// func BenchmarkChanToSetDeadlinePossibleToCancel(b *testing.B) {
|
||||
// conn, err := pgconn.Connect(context.Background(), os.Getenv("PGX_TEST_DATABASE"))
|
||||
// require.Nil(b, err)
|
||||
// defer closeConn(b, conn)
|
||||
|
||||
// ctx, cancel := context.WithCancel(context.Background())
|
||||
// defer cancel()
|
||||
|
||||
// b.ResetTimer()
|
||||
|
||||
// for i := 0; i < b.N; i++ {
|
||||
// conn.ChanToSetDeadline().Watch(ctx)
|
||||
// conn.ChanToSetDeadline().Ignore()
|
||||
// }
|
||||
// }
|
953
pgconn/config.go
953
pgconn/config.go
@ -1,953 +0,0 @@
|
||||
package pgconn
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgpassfile"
|
||||
"github.com/jackc/pgservicefile"
|
||||
"github.com/jackc/pgx/v5/pgconn/ctxwatch"
|
||||
"github.com/jackc/pgx/v5/pgproto3"
|
||||
)
|
||||
|
||||
type (
|
||||
AfterConnectFunc func(ctx context.Context, pgconn *PgConn) error
|
||||
ValidateConnectFunc func(ctx context.Context, pgconn *PgConn) error
|
||||
GetSSLPasswordFunc func(ctx context.Context) string
|
||||
)
|
||||
|
||||
// Config is the settings used to establish a connection to a PostgreSQL server. It must be created by [ParseConfig]. A
|
||||
// manually initialized Config will cause ConnectConfig to panic.
|
||||
type Config struct {
|
||||
Host string // host (e.g. localhost) or absolute path to unix domain socket directory (e.g. /private/tmp)
|
||||
Port uint16
|
||||
Database string
|
||||
User string
|
||||
Password string
|
||||
TLSConfig *tls.Config // nil disables TLS
|
||||
ConnectTimeout time.Duration
|
||||
DialFunc DialFunc // e.g. net.Dialer.DialContext
|
||||
LookupFunc LookupFunc // e.g. net.Resolver.LookupHost
|
||||
BuildFrontend BuildFrontendFunc
|
||||
|
||||
// BuildContextWatcherHandler is called to create a ContextWatcherHandler for a connection. The handler is called
|
||||
// when a context passed to a PgConn method is canceled.
|
||||
BuildContextWatcherHandler func(*PgConn) ctxwatch.Handler
|
||||
|
||||
RuntimeParams map[string]string // Run-time parameters to set on connection as session default values (e.g. search_path or application_name)
|
||||
|
||||
KerberosSrvName string
|
||||
KerberosSpn string
|
||||
Fallbacks []*FallbackConfig
|
||||
|
||||
SSLNegotiation string // sslnegotiation=postgres or sslnegotiation=direct
|
||||
|
||||
// ValidateConnect is called during a connection attempt after a successful authentication with the PostgreSQL server.
|
||||
// It can be used to validate that the server is acceptable. If this returns an error the connection is closed and the next
|
||||
// fallback config is tried. This allows implementing high availability behavior such as libpq does with target_session_attrs.
|
||||
ValidateConnect ValidateConnectFunc
|
||||
|
||||
// AfterConnect is called after ValidateConnect. It can be used to set up the connection (e.g. Set session variables
|
||||
// or prepare statements). If this returns an error the connection attempt fails.
|
||||
AfterConnect AfterConnectFunc
|
||||
|
||||
// OnNotice is a callback function called when a notice response is received.
|
||||
OnNotice NoticeHandler
|
||||
|
||||
// OnNotification is a callback function called when a notification from the LISTEN/NOTIFY system is received.
|
||||
OnNotification NotificationHandler
|
||||
|
||||
// OnPgError is a callback function called when a Postgres error is received by the server. The default handler will close
|
||||
// the connection on any FATAL errors. If you override this handler you should call the previously set handler or ensure
|
||||
// that you close on FATAL errors by returning false.
|
||||
OnPgError PgErrorHandler
|
||||
|
||||
createdByParseConfig bool // Used to enforce created by ParseConfig rule.
|
||||
}
|
||||
|
||||
// ParseConfigOptions contains options that control how a config is built such as GetSSLPassword.
|
||||
type ParseConfigOptions struct {
|
||||
// GetSSLPassword gets the password to decrypt a SSL client certificate. This is analogous to the libpq function
|
||||
// PQsetSSLKeyPassHook_OpenSSL.
|
||||
GetSSLPassword GetSSLPasswordFunc
|
||||
}
|
||||
|
||||
// Copy returns a deep copy of the config that is safe to use and modify.
|
||||
// The only exception is the TLSConfig field:
|
||||
// according to the tls.Config docs it must not be modified after creation.
|
||||
func (c *Config) Copy() *Config {
|
||||
newConf := new(Config)
|
||||
*newConf = *c
|
||||
if newConf.TLSConfig != nil {
|
||||
newConf.TLSConfig = c.TLSConfig.Clone()
|
||||
}
|
||||
if newConf.RuntimeParams != nil {
|
||||
newConf.RuntimeParams = make(map[string]string, len(c.RuntimeParams))
|
||||
for k, v := range c.RuntimeParams {
|
||||
newConf.RuntimeParams[k] = v
|
||||
}
|
||||
}
|
||||
if newConf.Fallbacks != nil {
|
||||
newConf.Fallbacks = make([]*FallbackConfig, len(c.Fallbacks))
|
||||
for i, fallback := range c.Fallbacks {
|
||||
newFallback := new(FallbackConfig)
|
||||
*newFallback = *fallback
|
||||
if newFallback.TLSConfig != nil {
|
||||
newFallback.TLSConfig = fallback.TLSConfig.Clone()
|
||||
}
|
||||
newConf.Fallbacks[i] = newFallback
|
||||
}
|
||||
}
|
||||
return newConf
|
||||
}
|
||||
|
||||
// FallbackConfig is additional settings to attempt a connection with when the primary Config fails to establish a
|
||||
// network connection. It is used for TLS fallback such as sslmode=prefer and high availability (HA) connections.
|
||||
type FallbackConfig struct {
|
||||
Host string // host (e.g. localhost) or path to unix domain socket directory (e.g. /private/tmp)
|
||||
Port uint16
|
||||
TLSConfig *tls.Config // nil disables TLS
|
||||
}
|
||||
|
||||
// connectOneConfig is the configuration for a single attempt to connect to a single host.
|
||||
type connectOneConfig struct {
|
||||
network string
|
||||
address string
|
||||
originalHostname string // original hostname before resolving
|
||||
tlsConfig *tls.Config // nil disables TLS
|
||||
}
|
||||
|
||||
// isAbsolutePath checks if the provided value is an absolute path either
|
||||
// beginning with a forward slash (as on Linux-based systems) or with a capital
|
||||
// letter A-Z followed by a colon and a backslash, e.g., "C:\", (as on Windows).
|
||||
func isAbsolutePath(path string) bool {
|
||||
isWindowsPath := func(p string) bool {
|
||||
if len(p) < 3 {
|
||||
return false
|
||||
}
|
||||
drive := p[0]
|
||||
colon := p[1]
|
||||
backslash := p[2]
|
||||
if drive >= 'A' && drive <= 'Z' && colon == ':' && backslash == '\\' {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
return strings.HasPrefix(path, "/") || isWindowsPath(path)
|
||||
}
|
||||
|
||||
// NetworkAddress converts a PostgreSQL host and port into network and address suitable for use with
|
||||
// net.Dial.
|
||||
func NetworkAddress(host string, port uint16) (network, address string) {
|
||||
if isAbsolutePath(host) {
|
||||
network = "unix"
|
||||
address = filepath.Join(host, ".s.PGSQL.") + strconv.FormatInt(int64(port), 10)
|
||||
} else {
|
||||
network = "tcp"
|
||||
address = net.JoinHostPort(host, strconv.Itoa(int(port)))
|
||||
}
|
||||
return network, address
|
||||
}
|
||||
|
||||
// ParseConfig builds a *Config from connString with similar behavior to the PostgreSQL standard C library libpq. It
|
||||
// uses the same defaults as libpq (e.g. port=5432) and understands most PG* environment variables. ParseConfig closely
|
||||
// matches the parsing behavior of libpq. connString may either be in URL format or keyword = value format. See
|
||||
// https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING for details. connString also may be empty
|
||||
// to only read from the environment. If a password is not supplied it will attempt to read the .pgpass file.
|
||||
//
|
||||
// # Example Keyword/Value
|
||||
// user=jack password=secret host=pg.example.com port=5432 dbname=mydb sslmode=verify-ca
|
||||
//
|
||||
// # Example URL
|
||||
// postgres://jack:secret@pg.example.com:5432/mydb?sslmode=verify-ca
|
||||
//
|
||||
// The returned *Config may be modified. However, it is strongly recommended that any configuration that can be done
|
||||
// through the connection string be done there. In particular the fields Host, Port, TLSConfig, and Fallbacks can be
|
||||
// interdependent (e.g. TLSConfig needs knowledge of the host to validate the server certificate). These fields should
|
||||
// not be modified individually. They should all be modified or all left unchanged.
|
||||
//
|
||||
// ParseConfig supports specifying multiple hosts in similar manner to libpq. Host and port may include comma separated
|
||||
// values that will be tried in order. This can be used as part of a high availability system. See
|
||||
// https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-MULTIPLE-HOSTS for more information.
|
||||
//
|
||||
// # Example URL
|
||||
// postgres://jack:secret@foo.example.com:5432,bar.example.com:5432/mydb
|
||||
//
|
||||
// ParseConfig currently recognizes the following environment variable and their parameter key word equivalents passed
|
||||
// via database URL or keyword/value:
|
||||
//
|
||||
// PGHOST
|
||||
// PGPORT
|
||||
// PGDATABASE
|
||||
// PGUSER
|
||||
// PGPASSWORD
|
||||
// PGPASSFILE
|
||||
// PGSERVICE
|
||||
// PGSERVICEFILE
|
||||
// PGSSLMODE
|
||||
// PGSSLCERT
|
||||
// PGSSLKEY
|
||||
// PGSSLROOTCERT
|
||||
// PGSSLPASSWORD
|
||||
// PGOPTIONS
|
||||
// PGAPPNAME
|
||||
// PGCONNECT_TIMEOUT
|
||||
// PGTARGETSESSIONATTRS
|
||||
// PGTZ
|
||||
//
|
||||
// See http://www.postgresql.org/docs/current/static/libpq-envars.html for details on the meaning of environment variables.
|
||||
//
|
||||
// See https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS for parameter key word names. They are
|
||||
// usually but not always the environment variable name downcased and without the "PG" prefix.
|
||||
//
|
||||
// Important Security Notes:
|
||||
//
|
||||
// ParseConfig tries to match libpq behavior with regard to PGSSLMODE. This includes defaulting to "prefer" behavior if
|
||||
// not set.
|
||||
//
|
||||
// See http://www.postgresql.org/docs/current/static/libpq-ssl.html#LIBPQ-SSL-PROTECTION for details on what level of
|
||||
// security each sslmode provides.
|
||||
//
|
||||
// The sslmode "prefer" (the default), sslmode "allow", and multiple hosts are implemented via the Fallbacks field of
|
||||
// the Config struct. If TLSConfig is manually changed it will not affect the fallbacks. For example, in the case of
|
||||
// sslmode "prefer" this means it will first try the main Config settings which use TLS, then it will try the fallback
|
||||
// which does not use TLS. This can lead to an unexpected unencrypted connection if the main TLS config is manually
|
||||
// changed later but the unencrypted fallback is present. Ensure there are no stale fallbacks when manually setting
|
||||
// TLSConfig.
|
||||
//
|
||||
// Other known differences with libpq:
|
||||
//
|
||||
// When multiple hosts are specified, libpq allows them to have different passwords set via the .pgpass file. pgconn
|
||||
// does not.
|
||||
//
|
||||
// In addition, ParseConfig accepts the following options:
|
||||
//
|
||||
// - servicefile.
|
||||
// libpq only reads servicefile from the PGSERVICEFILE environment variable. ParseConfig accepts servicefile as a
|
||||
// part of the connection string.
|
||||
func ParseConfig(connString string) (*Config, error) {
|
||||
var parseConfigOptions ParseConfigOptions
|
||||
return ParseConfigWithOptions(connString, parseConfigOptions)
|
||||
}
|
||||
|
||||
// ParseConfigWithOptions builds a *Config from connString and options with similar behavior to the PostgreSQL standard
|
||||
// C library libpq. options contains settings that cannot be specified in a connString such as providing a function to
|
||||
// get the SSL password.
|
||||
func ParseConfigWithOptions(connString string, options ParseConfigOptions) (*Config, error) {
|
||||
defaultSettings := defaultSettings()
|
||||
envSettings := parseEnvSettings()
|
||||
|
||||
connStringSettings := make(map[string]string)
|
||||
if connString != "" {
|
||||
var err error
|
||||
// connString may be a database URL or in PostgreSQL keyword/value format
|
||||
if strings.HasPrefix(connString, "postgres://") || strings.HasPrefix(connString, "postgresql://") {
|
||||
connStringSettings, err = parseURLSettings(connString)
|
||||
if err != nil {
|
||||
return nil, &ParseConfigError{ConnString: connString, msg: "failed to parse as URL", err: err}
|
||||
}
|
||||
} else {
|
||||
connStringSettings, err = parseKeywordValueSettings(connString)
|
||||
if err != nil {
|
||||
return nil, &ParseConfigError{ConnString: connString, msg: "failed to parse as keyword/value", err: err}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
settings := mergeSettings(defaultSettings, envSettings, connStringSettings)
|
||||
if service, present := settings["service"]; present {
|
||||
serviceSettings, err := parseServiceSettings(settings["servicefile"], service)
|
||||
if err != nil {
|
||||
return nil, &ParseConfigError{ConnString: connString, msg: "failed to read service", err: err}
|
||||
}
|
||||
|
||||
settings = mergeSettings(defaultSettings, envSettings, serviceSettings, connStringSettings)
|
||||
}
|
||||
|
||||
config := &Config{
|
||||
createdByParseConfig: true,
|
||||
Database: settings["database"],
|
||||
User: settings["user"],
|
||||
Password: settings["password"],
|
||||
RuntimeParams: make(map[string]string),
|
||||
BuildFrontend: func(r io.Reader, w io.Writer) *pgproto3.Frontend {
|
||||
return pgproto3.NewFrontend(r, w)
|
||||
},
|
||||
BuildContextWatcherHandler: func(pgConn *PgConn) ctxwatch.Handler {
|
||||
return &DeadlineContextWatcherHandler{Conn: pgConn.conn}
|
||||
},
|
||||
OnPgError: func(_ *PgConn, pgErr *PgError) bool {
|
||||
// we want to automatically close any fatal errors
|
||||
if strings.EqualFold(pgErr.Severity, "FATAL") {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
},
|
||||
}
|
||||
|
||||
if connectTimeoutSetting, present := settings["connect_timeout"]; present {
|
||||
connectTimeout, err := parseConnectTimeoutSetting(connectTimeoutSetting)
|
||||
if err != nil {
|
||||
return nil, &ParseConfigError{ConnString: connString, msg: "invalid connect_timeout", err: err}
|
||||
}
|
||||
config.ConnectTimeout = connectTimeout
|
||||
config.DialFunc = makeConnectTimeoutDialFunc(connectTimeout)
|
||||
} else {
|
||||
defaultDialer := makeDefaultDialer()
|
||||
config.DialFunc = defaultDialer.DialContext
|
||||
}
|
||||
|
||||
config.LookupFunc = makeDefaultResolver().LookupHost
|
||||
|
||||
notRuntimeParams := map[string]struct{}{
|
||||
"host": {},
|
||||
"port": {},
|
||||
"database": {},
|
||||
"user": {},
|
||||
"password": {},
|
||||
"passfile": {},
|
||||
"connect_timeout": {},
|
||||
"sslmode": {},
|
||||
"sslkey": {},
|
||||
"sslcert": {},
|
||||
"sslrootcert": {},
|
||||
"sslnegotiation": {},
|
||||
"sslpassword": {},
|
||||
"sslsni": {},
|
||||
"krbspn": {},
|
||||
"krbsrvname": {},
|
||||
"target_session_attrs": {},
|
||||
"service": {},
|
||||
"servicefile": {},
|
||||
}
|
||||
|
||||
// Adding kerberos configuration
|
||||
if _, present := settings["krbsrvname"]; present {
|
||||
config.KerberosSrvName = settings["krbsrvname"]
|
||||
}
|
||||
if _, present := settings["krbspn"]; present {
|
||||
config.KerberosSpn = settings["krbspn"]
|
||||
}
|
||||
|
||||
for k, v := range settings {
|
||||
if _, present := notRuntimeParams[k]; present {
|
||||
continue
|
||||
}
|
||||
config.RuntimeParams[k] = v
|
||||
}
|
||||
|
||||
fallbacks := []*FallbackConfig{}
|
||||
|
||||
hosts := strings.Split(settings["host"], ",")
|
||||
ports := strings.Split(settings["port"], ",")
|
||||
|
||||
for i, host := range hosts {
|
||||
var portStr string
|
||||
if i < len(ports) {
|
||||
portStr = ports[i]
|
||||
} else {
|
||||
portStr = ports[0]
|
||||
}
|
||||
|
||||
port, err := parsePort(portStr)
|
||||
if err != nil {
|
||||
return nil, &ParseConfigError{ConnString: connString, msg: "invalid port", err: err}
|
||||
}
|
||||
|
||||
var tlsConfigs []*tls.Config
|
||||
|
||||
// Ignore TLS settings if Unix domain socket like libpq
|
||||
if network, _ := NetworkAddress(host, port); network == "unix" {
|
||||
tlsConfigs = append(tlsConfigs, nil)
|
||||
} else {
|
||||
var err error
|
||||
tlsConfigs, err = configTLS(settings, host, options)
|
||||
if err != nil {
|
||||
return nil, &ParseConfigError{ConnString: connString, msg: "failed to configure TLS", err: err}
|
||||
}
|
||||
}
|
||||
|
||||
for _, tlsConfig := range tlsConfigs {
|
||||
fallbacks = append(fallbacks, &FallbackConfig{
|
||||
Host: host,
|
||||
Port: port,
|
||||
TLSConfig: tlsConfig,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
config.Host = fallbacks[0].Host
|
||||
config.Port = fallbacks[0].Port
|
||||
config.TLSConfig = fallbacks[0].TLSConfig
|
||||
config.Fallbacks = fallbacks[1:]
|
||||
config.SSLNegotiation = settings["sslnegotiation"]
|
||||
|
||||
passfile, err := pgpassfile.ReadPassfile(settings["passfile"])
|
||||
if err == nil {
|
||||
if config.Password == "" {
|
||||
host := config.Host
|
||||
if network, _ := NetworkAddress(config.Host, config.Port); network == "unix" {
|
||||
host = "localhost"
|
||||
}
|
||||
|
||||
config.Password = passfile.FindPassword(host, strconv.Itoa(int(config.Port)), config.Database, config.User)
|
||||
}
|
||||
}
|
||||
|
||||
switch tsa := settings["target_session_attrs"]; tsa {
|
||||
case "read-write":
|
||||
config.ValidateConnect = ValidateConnectTargetSessionAttrsReadWrite
|
||||
case "read-only":
|
||||
config.ValidateConnect = ValidateConnectTargetSessionAttrsReadOnly
|
||||
case "primary":
|
||||
config.ValidateConnect = ValidateConnectTargetSessionAttrsPrimary
|
||||
case "standby":
|
||||
config.ValidateConnect = ValidateConnectTargetSessionAttrsStandby
|
||||
case "prefer-standby":
|
||||
config.ValidateConnect = ValidateConnectTargetSessionAttrsPreferStandby
|
||||
case "any":
|
||||
// do nothing
|
||||
default:
|
||||
return nil, &ParseConfigError{ConnString: connString, msg: fmt.Sprintf("unknown target_session_attrs value: %v", tsa)}
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func mergeSettings(settingSets ...map[string]string) map[string]string {
|
||||
settings := make(map[string]string)
|
||||
|
||||
for _, s2 := range settingSets {
|
||||
for k, v := range s2 {
|
||||
settings[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
return settings
|
||||
}
|
||||
|
||||
func parseEnvSettings() map[string]string {
|
||||
settings := make(map[string]string)
|
||||
|
||||
nameMap := map[string]string{
|
||||
"PGHOST": "host",
|
||||
"PGPORT": "port",
|
||||
"PGDATABASE": "database",
|
||||
"PGUSER": "user",
|
||||
"PGPASSWORD": "password",
|
||||
"PGPASSFILE": "passfile",
|
||||
"PGAPPNAME": "application_name",
|
||||
"PGCONNECT_TIMEOUT": "connect_timeout",
|
||||
"PGSSLMODE": "sslmode",
|
||||
"PGSSLKEY": "sslkey",
|
||||
"PGSSLCERT": "sslcert",
|
||||
"PGSSLSNI": "sslsni",
|
||||
"PGSSLROOTCERT": "sslrootcert",
|
||||
"PGSSLPASSWORD": "sslpassword",
|
||||
"PGSSLNEGOTIATION": "sslnegotiation",
|
||||
"PGTARGETSESSIONATTRS": "target_session_attrs",
|
||||
"PGSERVICE": "service",
|
||||
"PGSERVICEFILE": "servicefile",
|
||||
"PGTZ": "timezone",
|
||||
"PGOPTIONS": "options",
|
||||
}
|
||||
|
||||
for envname, realname := range nameMap {
|
||||
value := os.Getenv(envname)
|
||||
if value != "" {
|
||||
settings[realname] = value
|
||||
}
|
||||
}
|
||||
|
||||
return settings
|
||||
}
|
||||
|
||||
func parseURLSettings(connString string) (map[string]string, error) {
|
||||
settings := make(map[string]string)
|
||||
|
||||
parsedURL, err := url.Parse(connString)
|
||||
if err != nil {
|
||||
if urlErr := new(url.Error); errors.As(err, &urlErr) {
|
||||
return nil, urlErr.Err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if parsedURL.User != nil {
|
||||
settings["user"] = parsedURL.User.Username()
|
||||
if password, present := parsedURL.User.Password(); present {
|
||||
settings["password"] = password
|
||||
}
|
||||
}
|
||||
|
||||
// Handle multiple host:port's in url.Host by splitting them into host,host,host and port,port,port.
|
||||
var hosts []string
|
||||
var ports []string
|
||||
for _, host := range strings.Split(parsedURL.Host, ",") {
|
||||
if host == "" {
|
||||
continue
|
||||
}
|
||||
if isIPOnly(host) {
|
||||
hosts = append(hosts, strings.Trim(host, "[]"))
|
||||
continue
|
||||
}
|
||||
h, p, err := net.SplitHostPort(host)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to split host:port in '%s', err: %w", host, err)
|
||||
}
|
||||
if h != "" {
|
||||
hosts = append(hosts, h)
|
||||
}
|
||||
if p != "" {
|
||||
ports = append(ports, p)
|
||||
}
|
||||
}
|
||||
if len(hosts) > 0 {
|
||||
settings["host"] = strings.Join(hosts, ",")
|
||||
}
|
||||
if len(ports) > 0 {
|
||||
settings["port"] = strings.Join(ports, ",")
|
||||
}
|
||||
|
||||
database := strings.TrimLeft(parsedURL.Path, "/")
|
||||
if database != "" {
|
||||
settings["database"] = database
|
||||
}
|
||||
|
||||
nameMap := map[string]string{
|
||||
"dbname": "database",
|
||||
}
|
||||
|
||||
for k, v := range parsedURL.Query() {
|
||||
if k2, present := nameMap[k]; present {
|
||||
k = k2
|
||||
}
|
||||
|
||||
settings[k] = v[0]
|
||||
}
|
||||
|
||||
return settings, nil
|
||||
}
|
||||
|
||||
func isIPOnly(host string) bool {
|
||||
return net.ParseIP(strings.Trim(host, "[]")) != nil || !strings.Contains(host, ":")
|
||||
}
|
||||
|
||||
var asciiSpace = [256]uint8{'\t': 1, '\n': 1, '\v': 1, '\f': 1, '\r': 1, ' ': 1}
|
||||
|
||||
func parseKeywordValueSettings(s string) (map[string]string, error) {
|
||||
settings := make(map[string]string)
|
||||
|
||||
nameMap := map[string]string{
|
||||
"dbname": "database",
|
||||
}
|
||||
|
||||
for len(s) > 0 {
|
||||
var key, val string
|
||||
eqIdx := strings.IndexRune(s, '=')
|
||||
if eqIdx < 0 {
|
||||
return nil, errors.New("invalid keyword/value")
|
||||
}
|
||||
|
||||
key = strings.Trim(s[:eqIdx], " \t\n\r\v\f")
|
||||
s = strings.TrimLeft(s[eqIdx+1:], " \t\n\r\v\f")
|
||||
if len(s) == 0 {
|
||||
} else if s[0] != '\'' {
|
||||
end := 0
|
||||
for ; end < len(s); end++ {
|
||||
if asciiSpace[s[end]] == 1 {
|
||||
break
|
||||
}
|
||||
if s[end] == '\\' {
|
||||
end++
|
||||
if end == len(s) {
|
||||
return nil, errors.New("invalid backslash")
|
||||
}
|
||||
}
|
||||
}
|
||||
val = strings.Replace(strings.Replace(s[:end], "\\\\", "\\", -1), "\\'", "'", -1)
|
||||
if end == len(s) {
|
||||
s = ""
|
||||
} else {
|
||||
s = s[end+1:]
|
||||
}
|
||||
} else { // quoted string
|
||||
s = s[1:]
|
||||
end := 0
|
||||
for ; end < len(s); end++ {
|
||||
if s[end] == '\'' {
|
||||
break
|
||||
}
|
||||
if s[end] == '\\' {
|
||||
end++
|
||||
}
|
||||
}
|
||||
if end == len(s) {
|
||||
return nil, errors.New("unterminated quoted string in connection info string")
|
||||
}
|
||||
val = strings.Replace(strings.Replace(s[:end], "\\\\", "\\", -1), "\\'", "'", -1)
|
||||
if end == len(s) {
|
||||
s = ""
|
||||
} else {
|
||||
s = s[end+1:]
|
||||
}
|
||||
}
|
||||
|
||||
if k, ok := nameMap[key]; ok {
|
||||
key = k
|
||||
}
|
||||
|
||||
if key == "" {
|
||||
return nil, errors.New("invalid keyword/value")
|
||||
}
|
||||
|
||||
settings[key] = val
|
||||
}
|
||||
|
||||
return settings, nil
|
||||
}
|
||||
|
||||
func parseServiceSettings(servicefilePath, serviceName string) (map[string]string, error) {
|
||||
servicefile, err := pgservicefile.ReadServicefile(servicefilePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read service file: %v", servicefilePath)
|
||||
}
|
||||
|
||||
service, err := servicefile.GetService(serviceName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to find service: %v", serviceName)
|
||||
}
|
||||
|
||||
nameMap := map[string]string{
|
||||
"dbname": "database",
|
||||
}
|
||||
|
||||
settings := make(map[string]string, len(service.Settings))
|
||||
for k, v := range service.Settings {
|
||||
if k2, present := nameMap[k]; present {
|
||||
k = k2
|
||||
}
|
||||
settings[k] = v
|
||||
}
|
||||
|
||||
return settings, nil
|
||||
}
|
||||
|
||||
// configTLS uses libpq's TLS parameters to construct []*tls.Config. It is
|
||||
// necessary to allow returning multiple TLS configs as sslmode "allow" and
|
||||
// "prefer" allow fallback.
|
||||
func configTLS(settings map[string]string, thisHost string, parseConfigOptions ParseConfigOptions) ([]*tls.Config, error) {
|
||||
host := thisHost
|
||||
sslmode := settings["sslmode"]
|
||||
sslrootcert := settings["sslrootcert"]
|
||||
sslcert := settings["sslcert"]
|
||||
sslkey := settings["sslkey"]
|
||||
sslpassword := settings["sslpassword"]
|
||||
sslsni := settings["sslsni"]
|
||||
sslnegotiation := settings["sslnegotiation"]
|
||||
|
||||
// Match libpq default behavior
|
||||
if sslmode == "" {
|
||||
sslmode = "prefer"
|
||||
}
|
||||
if sslsni == "" {
|
||||
sslsni = "1"
|
||||
}
|
||||
|
||||
tlsConfig := &tls.Config{}
|
||||
|
||||
if sslnegotiation == "direct" {
|
||||
tlsConfig.NextProtos = []string{"postgresql"}
|
||||
if sslmode == "prefer" {
|
||||
sslmode = "require"
|
||||
}
|
||||
}
|
||||
|
||||
if sslrootcert != "" {
|
||||
var caCertPool *x509.CertPool
|
||||
|
||||
if sslrootcert == "system" {
|
||||
var err error
|
||||
|
||||
caCertPool, err = x509.SystemCertPool()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to load system certificate pool: %w", err)
|
||||
}
|
||||
|
||||
sslmode = "verify-full"
|
||||
} else {
|
||||
caCertPool = x509.NewCertPool()
|
||||
|
||||
caPath := sslrootcert
|
||||
caCert, err := os.ReadFile(caPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read CA file: %w", err)
|
||||
}
|
||||
|
||||
if !caCertPool.AppendCertsFromPEM(caCert) {
|
||||
return nil, errors.New("unable to add CA to cert pool")
|
||||
}
|
||||
}
|
||||
|
||||
tlsConfig.RootCAs = caCertPool
|
||||
tlsConfig.ClientCAs = caCertPool
|
||||
}
|
||||
|
||||
switch sslmode {
|
||||
case "disable":
|
||||
return []*tls.Config{nil}, nil
|
||||
case "allow", "prefer":
|
||||
tlsConfig.InsecureSkipVerify = true
|
||||
case "require":
|
||||
// According to PostgreSQL documentation, if a root CA file exists,
|
||||
// the behavior of sslmode=require should be the same as that of verify-ca
|
||||
//
|
||||
// See https://www.postgresql.org/docs/current/libpq-ssl.html
|
||||
if sslrootcert != "" {
|
||||
goto nextCase
|
||||
}
|
||||
tlsConfig.InsecureSkipVerify = true
|
||||
break
|
||||
nextCase:
|
||||
fallthrough
|
||||
case "verify-ca":
|
||||
// Don't perform the default certificate verification because it
|
||||
// will verify the hostname. Instead, verify the server's
|
||||
// certificate chain ourselves in VerifyPeerCertificate and
|
||||
// ignore the server name. This emulates libpq's verify-ca
|
||||
// behavior.
|
||||
//
|
||||
// See https://github.com/golang/go/issues/21971#issuecomment-332693931
|
||||
// and https://pkg.go.dev/crypto/tls?tab=doc#example-Config-VerifyPeerCertificate
|
||||
// for more info.
|
||||
tlsConfig.InsecureSkipVerify = true
|
||||
tlsConfig.VerifyPeerCertificate = func(certificates [][]byte, _ [][]*x509.Certificate) error {
|
||||
certs := make([]*x509.Certificate, len(certificates))
|
||||
for i, asn1Data := range certificates {
|
||||
cert, err := x509.ParseCertificate(asn1Data)
|
||||
if err != nil {
|
||||
return errors.New("failed to parse certificate from server: " + err.Error())
|
||||
}
|
||||
certs[i] = cert
|
||||
}
|
||||
|
||||
// Leave DNSName empty to skip hostname verification.
|
||||
opts := x509.VerifyOptions{
|
||||
Roots: tlsConfig.RootCAs,
|
||||
Intermediates: x509.NewCertPool(),
|
||||
}
|
||||
// Skip the first cert because it's the leaf. All others
|
||||
// are intermediates.
|
||||
for _, cert := range certs[1:] {
|
||||
opts.Intermediates.AddCert(cert)
|
||||
}
|
||||
_, err := certs[0].Verify(opts)
|
||||
return err
|
||||
}
|
||||
case "verify-full":
|
||||
tlsConfig.ServerName = host
|
||||
default:
|
||||
return nil, errors.New("sslmode is invalid")
|
||||
}
|
||||
|
||||
if (sslcert != "" && sslkey == "") || (sslcert == "" && sslkey != "") {
|
||||
return nil, errors.New(`both "sslcert" and "sslkey" are required`)
|
||||
}
|
||||
|
||||
if sslcert != "" && sslkey != "" {
|
||||
buf, err := os.ReadFile(sslkey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read sslkey: %w", err)
|
||||
}
|
||||
block, _ := pem.Decode(buf)
|
||||
if block == nil {
|
||||
return nil, errors.New("failed to decode sslkey")
|
||||
}
|
||||
var pemKey []byte
|
||||
var decryptedKey []byte
|
||||
var decryptedError error
|
||||
// If PEM is encrypted, attempt to decrypt using pass phrase
|
||||
if x509.IsEncryptedPEMBlock(block) {
|
||||
// Attempt decryption with pass phrase
|
||||
// NOTE: only supports RSA (PKCS#1)
|
||||
if sslpassword != "" {
|
||||
decryptedKey, decryptedError = x509.DecryptPEMBlock(block, []byte(sslpassword))
|
||||
}
|
||||
// if sslpassword not provided or has decryption error when use it
|
||||
// try to find sslpassword with callback function
|
||||
if sslpassword == "" || decryptedError != nil {
|
||||
if parseConfigOptions.GetSSLPassword != nil {
|
||||
sslpassword = parseConfigOptions.GetSSLPassword(context.Background())
|
||||
}
|
||||
if sslpassword == "" {
|
||||
return nil, fmt.Errorf("unable to find sslpassword")
|
||||
}
|
||||
}
|
||||
decryptedKey, decryptedError = x509.DecryptPEMBlock(block, []byte(sslpassword))
|
||||
// Should we also provide warning for PKCS#1 needed?
|
||||
if decryptedError != nil {
|
||||
return nil, fmt.Errorf("unable to decrypt key: %w", err)
|
||||
}
|
||||
|
||||
pemBytes := pem.Block{
|
||||
Type: "RSA PRIVATE KEY",
|
||||
Bytes: decryptedKey,
|
||||
}
|
||||
pemKey = pem.EncodeToMemory(&pemBytes)
|
||||
} else {
|
||||
pemKey = pem.EncodeToMemory(block)
|
||||
}
|
||||
certfile, err := os.ReadFile(sslcert)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read cert: %w", err)
|
||||
}
|
||||
cert, err := tls.X509KeyPair(certfile, pemKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to load cert: %w", err)
|
||||
}
|
||||
tlsConfig.Certificates = []tls.Certificate{cert}
|
||||
}
|
||||
|
||||
// Set Server Name Indication (SNI), if enabled by connection parameters.
|
||||
// Per RFC 6066, do not set it if the host is a literal IP address (IPv4
|
||||
// or IPv6).
|
||||
if sslsni == "1" && net.ParseIP(host) == nil {
|
||||
tlsConfig.ServerName = host
|
||||
}
|
||||
|
||||
switch sslmode {
|
||||
case "allow":
|
||||
return []*tls.Config{nil, tlsConfig}, nil
|
||||
case "prefer":
|
||||
return []*tls.Config{tlsConfig, nil}, nil
|
||||
case "require", "verify-ca", "verify-full":
|
||||
return []*tls.Config{tlsConfig}, nil
|
||||
default:
|
||||
panic("BUG: bad sslmode should already have been caught")
|
||||
}
|
||||
}
|
||||
|
||||
func parsePort(s string) (uint16, error) {
|
||||
port, err := strconv.ParseUint(s, 10, 16)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if port < 1 || port > math.MaxUint16 {
|
||||
return 0, errors.New("outside range")
|
||||
}
|
||||
return uint16(port), nil
|
||||
}
|
||||
|
||||
func makeDefaultDialer() *net.Dialer {
|
||||
// rely on GOLANG KeepAlive settings
|
||||
return &net.Dialer{}
|
||||
}
|
||||
|
||||
func makeDefaultResolver() *net.Resolver {
|
||||
return net.DefaultResolver
|
||||
}
|
||||
|
||||
func parseConnectTimeoutSetting(s string) (time.Duration, error) {
|
||||
timeout, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if timeout < 0 {
|
||||
return 0, errors.New("negative timeout")
|
||||
}
|
||||
return time.Duration(timeout) * time.Second, nil
|
||||
}
|
||||
|
||||
func makeConnectTimeoutDialFunc(timeout time.Duration) DialFunc {
|
||||
d := makeDefaultDialer()
|
||||
d.Timeout = timeout
|
||||
return d.DialContext
|
||||
}
|
||||
|
||||
// ValidateConnectTargetSessionAttrsReadWrite is a ValidateConnectFunc that implements libpq compatible
|
||||
// target_session_attrs=read-write.
|
||||
func ValidateConnectTargetSessionAttrsReadWrite(ctx context.Context, pgConn *PgConn) error {
|
||||
result, err := pgConn.Exec(ctx, "show transaction_read_only").ReadAll()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if string(result[0].Rows[0][0]) == "on" {
|
||||
return errors.New("read only connection")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateConnectTargetSessionAttrsReadOnly is a ValidateConnectFunc that implements libpq compatible
|
||||
// target_session_attrs=read-only.
|
||||
func ValidateConnectTargetSessionAttrsReadOnly(ctx context.Context, pgConn *PgConn) error {
|
||||
result, err := pgConn.Exec(ctx, "show transaction_read_only").ReadAll()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if string(result[0].Rows[0][0]) != "on" {
|
||||
return errors.New("connection is not read only")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateConnectTargetSessionAttrsStandby is a ValidateConnectFunc that implements libpq compatible
|
||||
// target_session_attrs=standby.
|
||||
func ValidateConnectTargetSessionAttrsStandby(ctx context.Context, pgConn *PgConn) error {
|
||||
result, err := pgConn.Exec(ctx, "select pg_is_in_recovery()").ReadAll()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if string(result[0].Rows[0][0]) != "t" {
|
||||
return errors.New("server is not in hot standby mode")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateConnectTargetSessionAttrsPrimary is a ValidateConnectFunc that implements libpq compatible
|
||||
// target_session_attrs=primary.
|
||||
func ValidateConnectTargetSessionAttrsPrimary(ctx context.Context, pgConn *PgConn) error {
|
||||
result, err := pgConn.Exec(ctx, "select pg_is_in_recovery()").ReadAll()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if string(result[0].Rows[0][0]) == "t" {
|
||||
return errors.New("server is in standby mode")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateConnectTargetSessionAttrsPreferStandby is a ValidateConnectFunc that implements libpq compatible
|
||||
// target_session_attrs=prefer-standby.
|
||||
func ValidateConnectTargetSessionAttrsPreferStandby(ctx context.Context, pgConn *PgConn) error {
|
||||
result, err := pgConn.Exec(ctx, "select pg_is_in_recovery()").ReadAll()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if string(result[0].Rows[0][0]) != "t" {
|
||||
return &NotPreferredError{err: errors.New("server is not in hot standby mode")}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,80 +0,0 @@
|
||||
package ctxwatch
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ContextWatcher watches a context and performs an action when the context is canceled. It can watch one context at a
|
||||
// time.
|
||||
type ContextWatcher struct {
|
||||
handler Handler
|
||||
unwatchChan chan struct{}
|
||||
|
||||
lock sync.Mutex
|
||||
watchInProgress bool
|
||||
onCancelWasCalled bool
|
||||
}
|
||||
|
||||
// NewContextWatcher returns a ContextWatcher. onCancel will be called when a watched context is canceled.
|
||||
// OnUnwatchAfterCancel will be called when Unwatch is called and the watched context had already been canceled and
|
||||
// onCancel called.
|
||||
func NewContextWatcher(handler Handler) *ContextWatcher {
|
||||
cw := &ContextWatcher{
|
||||
handler: handler,
|
||||
unwatchChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
return cw
|
||||
}
|
||||
|
||||
// Watch starts watching ctx. If ctx is canceled then the onCancel function passed to NewContextWatcher will be called.
|
||||
func (cw *ContextWatcher) Watch(ctx context.Context) {
|
||||
cw.lock.Lock()
|
||||
defer cw.lock.Unlock()
|
||||
|
||||
if cw.watchInProgress {
|
||||
panic("Watch already in progress")
|
||||
}
|
||||
|
||||
cw.onCancelWasCalled = false
|
||||
|
||||
if ctx.Done() != nil {
|
||||
cw.watchInProgress = true
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
cw.handler.HandleCancel(ctx)
|
||||
cw.onCancelWasCalled = true
|
||||
<-cw.unwatchChan
|
||||
case <-cw.unwatchChan:
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
cw.watchInProgress = false
|
||||
}
|
||||
}
|
||||
|
||||
// Unwatch stops watching the previously watched context. If the onCancel function passed to NewContextWatcher was
|
||||
// called then onUnwatchAfterCancel will also be called.
|
||||
func (cw *ContextWatcher) Unwatch() {
|
||||
cw.lock.Lock()
|
||||
defer cw.lock.Unlock()
|
||||
|
||||
if cw.watchInProgress {
|
||||
cw.unwatchChan <- struct{}{}
|
||||
if cw.onCancelWasCalled {
|
||||
cw.handler.HandleUnwatchAfterCancel()
|
||||
}
|
||||
cw.watchInProgress = false
|
||||
}
|
||||
}
|
||||
|
||||
type Handler interface {
|
||||
// HandleCancel is called when the context that a ContextWatcher is currently watching is canceled. canceledCtx is the
|
||||
// context that was canceled.
|
||||
HandleCancel(canceledCtx context.Context)
|
||||
|
||||
// HandleUnwatchAfterCancel is called when a ContextWatcher that called HandleCancel on this Handler is unwatched.
|
||||
HandleUnwatchAfterCancel()
|
||||
}
|
@ -1,185 +0,0 @@
|
||||
package ctxwatch_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgconn/ctxwatch"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type testHandler struct {
|
||||
handleCancel func(context.Context)
|
||||
handleUnwatchAfterCancel func()
|
||||
}
|
||||
|
||||
func (h *testHandler) HandleCancel(ctx context.Context) {
|
||||
h.handleCancel(ctx)
|
||||
}
|
||||
|
||||
func (h *testHandler) HandleUnwatchAfterCancel() {
|
||||
h.handleUnwatchAfterCancel()
|
||||
}
|
||||
|
||||
func TestContextWatcherContextCancelled(t *testing.T) {
|
||||
canceledChan := make(chan struct{})
|
||||
cleanupCalled := false
|
||||
cw := ctxwatch.NewContextWatcher(&testHandler{
|
||||
handleCancel: func(context.Context) {
|
||||
canceledChan <- struct{}{}
|
||||
}, handleUnwatchAfterCancel: func() {
|
||||
cleanupCalled = true
|
||||
},
|
||||
})
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cw.Watch(ctx)
|
||||
cancel()
|
||||
|
||||
select {
|
||||
case <-canceledChan:
|
||||
case <-time.NewTimer(time.Second).C:
|
||||
t.Fatal("Timed out waiting for cancel func to be called")
|
||||
}
|
||||
|
||||
cw.Unwatch()
|
||||
|
||||
require.True(t, cleanupCalled, "Cleanup func was not called")
|
||||
}
|
||||
|
||||
func TestContextWatcherUnwatchedBeforeContextCancelled(t *testing.T) {
|
||||
cw := ctxwatch.NewContextWatcher(&testHandler{
|
||||
handleCancel: func(context.Context) {
|
||||
t.Error("cancel func should not have been called")
|
||||
}, handleUnwatchAfterCancel: func() {
|
||||
t.Error("cleanup func should not have been called")
|
||||
},
|
||||
})
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cw.Watch(ctx)
|
||||
cw.Unwatch()
|
||||
cancel()
|
||||
}
|
||||
|
||||
func TestContextWatcherMultipleWatchPanics(t *testing.T) {
|
||||
cw := ctxwatch.NewContextWatcher(&testHandler{handleCancel: func(context.Context) {}, handleUnwatchAfterCancel: func() {}})
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
cw.Watch(ctx)
|
||||
defer cw.Unwatch()
|
||||
|
||||
ctx2, cancel2 := context.WithCancel(context.Background())
|
||||
defer cancel2()
|
||||
require.Panics(t, func() { cw.Watch(ctx2) }, "Expected panic when Watch called multiple times")
|
||||
}
|
||||
|
||||
func TestContextWatcherUnwatchWhenNotWatchingIsSafe(t *testing.T) {
|
||||
cw := ctxwatch.NewContextWatcher(&testHandler{handleCancel: func(context.Context) {}, handleUnwatchAfterCancel: func() {}})
|
||||
cw.Unwatch() // unwatch when not / never watching
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
cw.Watch(ctx)
|
||||
cw.Unwatch()
|
||||
cw.Unwatch() // double unwatch
|
||||
}
|
||||
|
||||
func TestContextWatcherUnwatchIsConcurrencySafe(t *testing.T) {
|
||||
cw := ctxwatch.NewContextWatcher(&testHandler{handleCancel: func(context.Context) {}, handleUnwatchAfterCancel: func() {}})
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
||||
defer cancel()
|
||||
cw.Watch(ctx)
|
||||
|
||||
go cw.Unwatch()
|
||||
go cw.Unwatch()
|
||||
|
||||
<-ctx.Done()
|
||||
}
|
||||
|
||||
func TestContextWatcherStress(t *testing.T) {
|
||||
var cancelFuncCalls int64
|
||||
var cleanupFuncCalls int64
|
||||
|
||||
cw := ctxwatch.NewContextWatcher(&testHandler{
|
||||
handleCancel: func(context.Context) {
|
||||
atomic.AddInt64(&cancelFuncCalls, 1)
|
||||
}, handleUnwatchAfterCancel: func() {
|
||||
atomic.AddInt64(&cleanupFuncCalls, 1)
|
||||
},
|
||||
})
|
||||
|
||||
cycleCount := 100000
|
||||
|
||||
for i := 0; i < cycleCount; i++ {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cw.Watch(ctx)
|
||||
if i%2 == 0 {
|
||||
cancel()
|
||||
}
|
||||
|
||||
// Without time.Sleep, cw.Unwatch will almost always run before the cancel func which means cancel will never happen. This gives us a better mix.
|
||||
if i%333 == 0 {
|
||||
// on Windows Sleep takes more time than expected so we try to get here less frequently to avoid
|
||||
// the CI takes a long time
|
||||
time.Sleep(time.Nanosecond)
|
||||
}
|
||||
|
||||
cw.Unwatch()
|
||||
if i%2 == 1 {
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
|
||||
actualCancelFuncCalls := atomic.LoadInt64(&cancelFuncCalls)
|
||||
actualCleanupFuncCalls := atomic.LoadInt64(&cleanupFuncCalls)
|
||||
|
||||
if actualCancelFuncCalls == 0 {
|
||||
t.Fatal("actualCancelFuncCalls == 0")
|
||||
}
|
||||
|
||||
maxCancelFuncCalls := int64(cycleCount) / 2
|
||||
if actualCancelFuncCalls > maxCancelFuncCalls {
|
||||
t.Errorf("cancel func calls should be no more than %d but was %d", actualCancelFuncCalls, maxCancelFuncCalls)
|
||||
}
|
||||
|
||||
if actualCancelFuncCalls != actualCleanupFuncCalls {
|
||||
t.Errorf("cancel func calls (%d) should be equal to cleanup func calls (%d) but was not", actualCancelFuncCalls, actualCleanupFuncCalls)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkContextWatcherUncancellable(b *testing.B) {
|
||||
cw := ctxwatch.NewContextWatcher(&testHandler{handleCancel: func(context.Context) {}, handleUnwatchAfterCancel: func() {}})
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
cw.Watch(context.Background())
|
||||
cw.Unwatch()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkContextWatcherCancelled(b *testing.B) {
|
||||
cw := ctxwatch.NewContextWatcher(&testHandler{handleCancel: func(context.Context) {}, handleUnwatchAfterCancel: func() {}})
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cw.Watch(ctx)
|
||||
cancel()
|
||||
cw.Unwatch()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkContextWatcherCancellable(b *testing.B) {
|
||||
cw := ctxwatch.NewContextWatcher(&testHandler{handleCancel: func(context.Context) {}, handleUnwatchAfterCancel: func() {}})
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
cw.Watch(ctx)
|
||||
cw.Unwatch()
|
||||
}
|
||||
}
|
@ -1,63 +0,0 @@
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package pgconn
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func defaultSettings() map[string]string {
|
||||
settings := make(map[string]string)
|
||||
|
||||
settings["host"] = defaultHost()
|
||||
settings["port"] = "5432"
|
||||
|
||||
// Default to the OS user name. Purposely ignoring err getting user name from
|
||||
// OS. The client application will simply have to specify the user in that
|
||||
// case (which they typically will be doing anyway).
|
||||
user, err := user.Current()
|
||||
if err == nil {
|
||||
settings["user"] = user.Username
|
||||
settings["passfile"] = filepath.Join(user.HomeDir, ".pgpass")
|
||||
settings["servicefile"] = filepath.Join(user.HomeDir, ".pg_service.conf")
|
||||
sslcert := filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt")
|
||||
sslkey := filepath.Join(user.HomeDir, ".postgresql", "postgresql.key")
|
||||
if _, err := os.Stat(sslcert); err == nil {
|
||||
if _, err := os.Stat(sslkey); err == nil {
|
||||
// Both the cert and key must be present to use them, or do not use either
|
||||
settings["sslcert"] = sslcert
|
||||
settings["sslkey"] = sslkey
|
||||
}
|
||||
}
|
||||
sslrootcert := filepath.Join(user.HomeDir, ".postgresql", "root.crt")
|
||||
if _, err := os.Stat(sslrootcert); err == nil {
|
||||
settings["sslrootcert"] = sslrootcert
|
||||
}
|
||||
}
|
||||
|
||||
settings["target_session_attrs"] = "any"
|
||||
|
||||
return settings
|
||||
}
|
||||
|
||||
// defaultHost attempts to mimic libpq's default host. libpq uses the default unix socket location on *nix and localhost
|
||||
// on Windows. The default socket location is compiled into libpq. Since pgx does not have access to that default it
|
||||
// checks the existence of common locations.
|
||||
func defaultHost() string {
|
||||
candidatePaths := []string{
|
||||
"/var/run/postgresql", // Debian
|
||||
"/private/tmp", // OSX - homebrew
|
||||
"/tmp", // standard PostgreSQL
|
||||
}
|
||||
|
||||
for _, path := range candidatePaths {
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
return path
|
||||
}
|
||||
}
|
||||
|
||||
return "localhost"
|
||||
}
|
@ -1,57 +0,0 @@
|
||||
package pgconn
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func defaultSettings() map[string]string {
|
||||
settings := make(map[string]string)
|
||||
|
||||
settings["host"] = defaultHost()
|
||||
settings["port"] = "5432"
|
||||
|
||||
// Default to the OS user name. Purposely ignoring err getting user name from
|
||||
// OS. The client application will simply have to specify the user in that
|
||||
// case (which they typically will be doing anyway).
|
||||
user, err := user.Current()
|
||||
appData := os.Getenv("APPDATA")
|
||||
if err == nil {
|
||||
// Windows gives us the username here as `DOMAIN\user` or `LOCALPCNAME\user`,
|
||||
// but the libpq default is just the `user` portion, so we strip off the first part.
|
||||
username := user.Username
|
||||
if strings.Contains(username, "\\") {
|
||||
username = username[strings.LastIndex(username, "\\")+1:]
|
||||
}
|
||||
|
||||
settings["user"] = username
|
||||
settings["passfile"] = filepath.Join(appData, "postgresql", "pgpass.conf")
|
||||
settings["servicefile"] = filepath.Join(user.HomeDir, ".pg_service.conf")
|
||||
sslcert := filepath.Join(appData, "postgresql", "postgresql.crt")
|
||||
sslkey := filepath.Join(appData, "postgresql", "postgresql.key")
|
||||
if _, err := os.Stat(sslcert); err == nil {
|
||||
if _, err := os.Stat(sslkey); err == nil {
|
||||
// Both the cert and key must be present to use them, or do not use either
|
||||
settings["sslcert"] = sslcert
|
||||
settings["sslkey"] = sslkey
|
||||
}
|
||||
}
|
||||
sslrootcert := filepath.Join(appData, "postgresql", "root.crt")
|
||||
if _, err := os.Stat(sslrootcert); err == nil {
|
||||
settings["sslrootcert"] = sslrootcert
|
||||
}
|
||||
}
|
||||
|
||||
settings["target_session_attrs"] = "any"
|
||||
|
||||
return settings
|
||||
}
|
||||
|
||||
// defaultHost attempts to mimic libpq's default host. libpq uses the default unix socket location on *nix and localhost
|
||||
// on Windows. The default socket location is compiled into libpq. Since pgx does not have access to that default it
|
||||
// checks the existence of common locations.
|
||||
func defaultHost() string {
|
||||
return "localhost"
|
||||
}
|
@ -1,38 +0,0 @@
|
||||
// Package pgconn is a low-level PostgreSQL database driver.
|
||||
/*
|
||||
pgconn provides lower level access to a PostgreSQL connection than a database/sql or pgx connection. It operates at
|
||||
nearly the same level is the C library libpq.
|
||||
|
||||
Establishing a Connection
|
||||
|
||||
Use Connect to establish a connection. It accepts a connection string in URL or keyword/value format and will read the
|
||||
environment for libpq style environment variables.
|
||||
|
||||
Executing a Query
|
||||
|
||||
ExecParams and ExecPrepared execute a single query. They return readers that iterate over each row. The Read method
|
||||
reads all rows into memory.
|
||||
|
||||
Executing Multiple Queries in a Single Round Trip
|
||||
|
||||
Exec and ExecBatch can execute multiple queries in a single round trip. They return readers that iterate over each query
|
||||
result. The ReadAll method reads all query results into memory.
|
||||
|
||||
Pipeline Mode
|
||||
|
||||
Pipeline mode allows sending queries without having read the results of previously sent queries. It allows control of
|
||||
exactly how many and when network round trips occur.
|
||||
|
||||
Context Support
|
||||
|
||||
All potentially blocking operations take a context.Context. The default behavior when a context is canceled is for the
|
||||
method to immediately return. In most circumstances, this will also close the underlying connection. This behavior can
|
||||
be customized by using BuildContextWatcherHandler on the Config to create a ctxwatch.Handler with different behavior.
|
||||
This can be especially useful when queries that are frequently canceled and the overhead of creating new connections is
|
||||
a problem. DeadlineContextWatcherHandler and CancelRequestContextWatcherHandler can be used to introduce a delay before
|
||||
interrupting the query in such a way as to close the connection.
|
||||
|
||||
The CancelRequest method may be used to request the PostgreSQL server cancel an in-progress query without forcing the
|
||||
client to abort.
|
||||
*/
|
||||
package pgconn
|
256
pgconn/errors.go
256
pgconn/errors.go
@ -1,256 +0,0 @@
|
||||
package pgconn
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// SafeToRetry checks if the err is guaranteed to have occurred before sending any data to the server.
|
||||
func SafeToRetry(err error) bool {
|
||||
var retryableErr interface{ SafeToRetry() bool }
|
||||
if errors.As(err, &retryableErr) {
|
||||
return retryableErr.SafeToRetry()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Timeout checks if err was caused by a timeout. To be specific, it is true if err was caused within pgconn by a
|
||||
// context.DeadlineExceeded or an implementer of net.Error where Timeout() is true.
|
||||
func Timeout(err error) bool {
|
||||
var timeoutErr *errTimeout
|
||||
return errors.As(err, &timeoutErr)
|
||||
}
|
||||
|
||||
// PgError represents an error reported by the PostgreSQL server. See
|
||||
// http://www.postgresql.org/docs/current/static/protocol-error-fields.html for
|
||||
// detailed field description.
|
||||
type PgError struct {
|
||||
Severity string
|
||||
SeverityUnlocalized string
|
||||
Code string
|
||||
Message string
|
||||
Detail string
|
||||
Hint string
|
||||
Position int32
|
||||
InternalPosition int32
|
||||
InternalQuery string
|
||||
Where string
|
||||
SchemaName string
|
||||
TableName string
|
||||
ColumnName string
|
||||
DataTypeName string
|
||||
ConstraintName string
|
||||
File string
|
||||
Line int32
|
||||
Routine string
|
||||
}
|
||||
|
||||
func (pe *PgError) Error() string {
|
||||
return pe.Severity + ": " + pe.Message + " (SQLSTATE " + pe.Code + ")"
|
||||
}
|
||||
|
||||
// SQLState returns the SQLState of the error.
|
||||
func (pe *PgError) SQLState() string {
|
||||
return pe.Code
|
||||
}
|
||||
|
||||
// ConnectError is the error returned when a connection attempt fails.
|
||||
type ConnectError struct {
|
||||
Config *Config // The configuration that was used in the connection attempt.
|
||||
err error
|
||||
}
|
||||
|
||||
func (e *ConnectError) Error() string {
|
||||
prefix := fmt.Sprintf("failed to connect to `user=%s database=%s`:", e.Config.User, e.Config.Database)
|
||||
details := e.err.Error()
|
||||
if strings.Contains(details, "\n") {
|
||||
return prefix + "\n\t" + strings.ReplaceAll(details, "\n", "\n\t")
|
||||
} else {
|
||||
return prefix + " " + details
|
||||
}
|
||||
}
|
||||
|
||||
func (e *ConnectError) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
type perDialConnectError struct {
|
||||
address string
|
||||
originalHostname string
|
||||
err error
|
||||
}
|
||||
|
||||
func (e *perDialConnectError) Error() string {
|
||||
return fmt.Sprintf("%s (%s): %s", e.address, e.originalHostname, e.err.Error())
|
||||
}
|
||||
|
||||
func (e *perDialConnectError) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
type connLockError struct {
|
||||
status string
|
||||
}
|
||||
|
||||
func (e *connLockError) SafeToRetry() bool {
|
||||
return true // a lock failure by definition happens before the connection is used.
|
||||
}
|
||||
|
||||
func (e *connLockError) Error() string {
|
||||
return e.status
|
||||
}
|
||||
|
||||
// ParseConfigError is the error returned when a connection string cannot be parsed.
|
||||
type ParseConfigError struct {
|
||||
ConnString string // The connection string that could not be parsed.
|
||||
msg string
|
||||
err error
|
||||
}
|
||||
|
||||
func NewParseConfigError(conn, msg string, err error) error {
|
||||
return &ParseConfigError{
|
||||
ConnString: conn,
|
||||
msg: msg,
|
||||
err: err,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *ParseConfigError) Error() string {
|
||||
// Now that ParseConfigError is public and ConnString is available to the developer, perhaps it would be better only
|
||||
// return a static string. That would ensure that the error message cannot leak a password. The ConnString field would
|
||||
// allow access to the original string if desired and Unwrap would allow access to the underlying error.
|
||||
connString := redactPW(e.ConnString)
|
||||
if e.err == nil {
|
||||
return fmt.Sprintf("cannot parse `%s`: %s", connString, e.msg)
|
||||
}
|
||||
return fmt.Sprintf("cannot parse `%s`: %s (%s)", connString, e.msg, e.err.Error())
|
||||
}
|
||||
|
||||
func (e *ParseConfigError) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
func normalizeTimeoutError(ctx context.Context, err error) error {
|
||||
var netErr net.Error
|
||||
if errors.As(err, &netErr) && netErr.Timeout() {
|
||||
if ctx.Err() == context.Canceled {
|
||||
// Since the timeout was caused by a context cancellation, the actual error is context.Canceled not the timeout error.
|
||||
return context.Canceled
|
||||
} else if ctx.Err() == context.DeadlineExceeded {
|
||||
return &errTimeout{err: ctx.Err()}
|
||||
} else {
|
||||
return &errTimeout{err: netErr}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type pgconnError struct {
|
||||
msg string
|
||||
err error
|
||||
safeToRetry bool
|
||||
}
|
||||
|
||||
func (e *pgconnError) Error() string {
|
||||
if e.msg == "" {
|
||||
return e.err.Error()
|
||||
}
|
||||
if e.err == nil {
|
||||
return e.msg
|
||||
}
|
||||
return fmt.Sprintf("%s: %s", e.msg, e.err.Error())
|
||||
}
|
||||
|
||||
func (e *pgconnError) SafeToRetry() bool {
|
||||
return e.safeToRetry
|
||||
}
|
||||
|
||||
func (e *pgconnError) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
// errTimeout occurs when an error was caused by a timeout. Specifically, it wraps an error which is
|
||||
// context.Canceled, context.DeadlineExceeded, or an implementer of net.Error where Timeout() is true.
|
||||
type errTimeout struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (e *errTimeout) Error() string {
|
||||
return fmt.Sprintf("timeout: %s", e.err.Error())
|
||||
}
|
||||
|
||||
func (e *errTimeout) SafeToRetry() bool {
|
||||
return SafeToRetry(e.err)
|
||||
}
|
||||
|
||||
func (e *errTimeout) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
type contextAlreadyDoneError struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (e *contextAlreadyDoneError) Error() string {
|
||||
return fmt.Sprintf("context already done: %s", e.err.Error())
|
||||
}
|
||||
|
||||
func (e *contextAlreadyDoneError) SafeToRetry() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e *contextAlreadyDoneError) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
// newContextAlreadyDoneError double-wraps a context error in `contextAlreadyDoneError` and `errTimeout`.
|
||||
func newContextAlreadyDoneError(ctx context.Context) (err error) {
|
||||
return &errTimeout{&contextAlreadyDoneError{err: ctx.Err()}}
|
||||
}
|
||||
|
||||
func redactPW(connString string) string {
|
||||
if strings.HasPrefix(connString, "postgres://") || strings.HasPrefix(connString, "postgresql://") {
|
||||
if u, err := url.Parse(connString); err == nil {
|
||||
return redactURL(u)
|
||||
}
|
||||
}
|
||||
quotedKV := regexp.MustCompile(`password='[^']*'`)
|
||||
connString = quotedKV.ReplaceAllLiteralString(connString, "password=xxxxx")
|
||||
plainKV := regexp.MustCompile(`password=[^ ]*`)
|
||||
connString = plainKV.ReplaceAllLiteralString(connString, "password=xxxxx")
|
||||
brokenURL := regexp.MustCompile(`:[^:@]+?@`)
|
||||
connString = brokenURL.ReplaceAllLiteralString(connString, ":xxxxxx@")
|
||||
return connString
|
||||
}
|
||||
|
||||
func redactURL(u *url.URL) string {
|
||||
if u == nil {
|
||||
return ""
|
||||
}
|
||||
if _, pwSet := u.User.Password(); pwSet {
|
||||
u.User = url.UserPassword(u.User.Username(), "xxxxx")
|
||||
}
|
||||
return u.String()
|
||||
}
|
||||
|
||||
type NotPreferredError struct {
|
||||
err error
|
||||
safeToRetry bool
|
||||
}
|
||||
|
||||
func (e *NotPreferredError) Error() string {
|
||||
return fmt.Sprintf("standby server not found: %s", e.err.Error())
|
||||
}
|
||||
|
||||
func (e *NotPreferredError) SafeToRetry() bool {
|
||||
return e.safeToRetry
|
||||
}
|
||||
|
||||
func (e *NotPreferredError) Unwrap() error {
|
||||
return e.err
|
||||
}
|
@ -1,54 +0,0 @@
|
||||
package pgconn_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestConfigError(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
err error
|
||||
expectedMsg string
|
||||
}{
|
||||
{
|
||||
name: "url with password",
|
||||
err: pgconn.NewParseConfigError("postgresql://foo:password@host", "msg", nil),
|
||||
expectedMsg: "cannot parse `postgresql://foo:xxxxx@host`: msg",
|
||||
},
|
||||
{
|
||||
name: "keyword/value with password unquoted",
|
||||
err: pgconn.NewParseConfigError("host=host password=password user=user", "msg", nil),
|
||||
expectedMsg: "cannot parse `host=host password=xxxxx user=user`: msg",
|
||||
},
|
||||
{
|
||||
name: "keyword/value with password quoted",
|
||||
err: pgconn.NewParseConfigError("host=host password='pass word' user=user", "msg", nil),
|
||||
expectedMsg: "cannot parse `host=host password=xxxxx user=user`: msg",
|
||||
},
|
||||
{
|
||||
name: "weird url",
|
||||
err: pgconn.NewParseConfigError("postgresql://foo::password@host:1:", "msg", nil),
|
||||
expectedMsg: "cannot parse `postgresql://foo:xxxxx@host:1:`: msg",
|
||||
},
|
||||
{
|
||||
name: "weird url with slash in password",
|
||||
err: pgconn.NewParseConfigError("postgres://user:pass/word@host:5432/db_name", "msg", nil),
|
||||
expectedMsg: "cannot parse `postgres://user:xxxxxx@host:5432/db_name`: msg",
|
||||
},
|
||||
{
|
||||
name: "url without password",
|
||||
err: pgconn.NewParseConfigError("postgresql://other@host/db", "msg", nil),
|
||||
expectedMsg: "cannot parse `postgresql://other@host/db`: msg",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
assert.EqualError(t, tt.err, tt.expectedMsg)
|
||||
})
|
||||
}
|
||||
}
|
@ -1,3 +0,0 @@
|
||||
// File export_test exports some methods for better testing.
|
||||
|
||||
package pgconn
|
@ -1,36 +0,0 @@
|
||||
package pgconn_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func closeConn(t testing.TB, conn *pgconn.PgConn) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
require.NoError(t, conn.Close(ctx))
|
||||
select {
|
||||
case <-conn.CleanupDone():
|
||||
case <-time.After(30 * time.Second):
|
||||
t.Fatal("Connection cleanup exceeded maximum time")
|
||||
}
|
||||
}
|
||||
|
||||
// Do a simple query to ensure the connection is still usable
|
||||
func ensureConnValid(t *testing.T, pgConn *pgconn.PgConn) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
result := pgConn.ExecParams(ctx, "select generate_series(1,$1)", [][]byte{[]byte("3")}, nil, nil, nil).Read()
|
||||
cancel()
|
||||
|
||||
require.Nil(t, result.Err)
|
||||
assert.Equal(t, 3, len(result.Rows))
|
||||
assert.Equal(t, "1", string(result.Rows[0][0]))
|
||||
assert.Equal(t, "2", string(result.Rows[1][0]))
|
||||
assert.Equal(t, "3", string(result.Rows[2][0]))
|
||||
}
|
@ -1,139 +0,0 @@
|
||||
// Package bgreader provides a io.Reader that can optionally buffer reads in the background.
|
||||
package bgreader
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/jackc/pgx/v5/internal/iobufpool"
|
||||
)
|
||||
|
||||
const (
|
||||
StatusStopped = iota
|
||||
StatusRunning
|
||||
StatusStopping
|
||||
)
|
||||
|
||||
// BGReader is an io.Reader that can optionally buffer reads in the background. It is safe for concurrent use.
|
||||
type BGReader struct {
|
||||
r io.Reader
|
||||
|
||||
cond *sync.Cond
|
||||
status int32
|
||||
readResults []readResult
|
||||
}
|
||||
|
||||
type readResult struct {
|
||||
buf *[]byte
|
||||
err error
|
||||
}
|
||||
|
||||
// Start starts the backgrounder reader. If the background reader is already running this is a no-op. The background
|
||||
// reader will stop automatically when the underlying reader returns an error.
|
||||
func (r *BGReader) Start() {
|
||||
r.cond.L.Lock()
|
||||
defer r.cond.L.Unlock()
|
||||
|
||||
switch r.status {
|
||||
case StatusStopped:
|
||||
r.status = StatusRunning
|
||||
go r.bgRead()
|
||||
case StatusRunning:
|
||||
// no-op
|
||||
case StatusStopping:
|
||||
r.status = StatusRunning
|
||||
}
|
||||
}
|
||||
|
||||
// Stop tells the background reader to stop after the in progress Read returns. It is safe to call Stop when the
|
||||
// background reader is not running.
|
||||
func (r *BGReader) Stop() {
|
||||
r.cond.L.Lock()
|
||||
defer r.cond.L.Unlock()
|
||||
|
||||
switch r.status {
|
||||
case StatusStopped:
|
||||
// no-op
|
||||
case StatusRunning:
|
||||
r.status = StatusStopping
|
||||
case StatusStopping:
|
||||
// no-op
|
||||
}
|
||||
}
|
||||
|
||||
// Status returns the current status of the background reader.
|
||||
func (r *BGReader) Status() int32 {
|
||||
r.cond.L.Lock()
|
||||
defer r.cond.L.Unlock()
|
||||
return r.status
|
||||
}
|
||||
|
||||
func (r *BGReader) bgRead() {
|
||||
keepReading := true
|
||||
for keepReading {
|
||||
buf := iobufpool.Get(8192)
|
||||
n, err := r.r.Read(*buf)
|
||||
*buf = (*buf)[:n]
|
||||
|
||||
r.cond.L.Lock()
|
||||
r.readResults = append(r.readResults, readResult{buf: buf, err: err})
|
||||
if r.status == StatusStopping || err != nil {
|
||||
r.status = StatusStopped
|
||||
keepReading = false
|
||||
}
|
||||
r.cond.L.Unlock()
|
||||
r.cond.Broadcast()
|
||||
}
|
||||
}
|
||||
|
||||
// Read implements the io.Reader interface.
|
||||
func (r *BGReader) Read(p []byte) (int, error) {
|
||||
r.cond.L.Lock()
|
||||
defer r.cond.L.Unlock()
|
||||
|
||||
if len(r.readResults) > 0 {
|
||||
return r.readFromReadResults(p)
|
||||
}
|
||||
|
||||
// There are no unread background read results and the background reader is stopped.
|
||||
if r.status == StatusStopped {
|
||||
return r.r.Read(p)
|
||||
}
|
||||
|
||||
// Wait for results from the background reader
|
||||
for len(r.readResults) == 0 {
|
||||
r.cond.Wait()
|
||||
}
|
||||
return r.readFromReadResults(p)
|
||||
}
|
||||
|
||||
// readBackgroundResults reads a result previously read by the background reader. r.cond.L must be held.
|
||||
func (r *BGReader) readFromReadResults(p []byte) (int, error) {
|
||||
buf := r.readResults[0].buf
|
||||
var err error
|
||||
|
||||
n := copy(p, *buf)
|
||||
if n == len(*buf) {
|
||||
err = r.readResults[0].err
|
||||
iobufpool.Put(buf)
|
||||
if len(r.readResults) == 1 {
|
||||
r.readResults = nil
|
||||
} else {
|
||||
r.readResults = r.readResults[1:]
|
||||
}
|
||||
} else {
|
||||
*buf = (*buf)[n:]
|
||||
r.readResults[0].buf = buf
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
func New(r io.Reader) *BGReader {
|
||||
return &BGReader{
|
||||
r: r,
|
||||
cond: &sync.Cond{
|
||||
L: &sync.Mutex{},
|
||||
},
|
||||
}
|
||||
}
|
@ -1,140 +0,0 @@
|
||||
package bgreader_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgconn/internal/bgreader"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBGReaderReadWhenStopped(t *testing.T) {
|
||||
r := bytes.NewReader([]byte("foo bar baz"))
|
||||
bgr := bgreader.New(r)
|
||||
buf, err := io.ReadAll(bgr)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("foo bar baz"), buf)
|
||||
}
|
||||
|
||||
func TestBGReaderReadWhenStarted(t *testing.T) {
|
||||
r := bytes.NewReader([]byte("foo bar baz"))
|
||||
bgr := bgreader.New(r)
|
||||
bgr.Start()
|
||||
buf, err := io.ReadAll(bgr)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("foo bar baz"), buf)
|
||||
}
|
||||
|
||||
type mockReadFunc func(p []byte) (int, error)
|
||||
|
||||
type mockReader struct {
|
||||
readFuncs []mockReadFunc
|
||||
}
|
||||
|
||||
func (r *mockReader) Read(p []byte) (int, error) {
|
||||
if len(r.readFuncs) == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
fn := r.readFuncs[0]
|
||||
r.readFuncs = r.readFuncs[1:]
|
||||
|
||||
return fn(p)
|
||||
}
|
||||
|
||||
func TestBGReaderReadWaitsForBackgroundRead(t *testing.T) {
|
||||
rr := &mockReader{
|
||||
readFuncs: []mockReadFunc{
|
||||
func(p []byte) (int, error) { time.Sleep(1 * time.Second); return copy(p, []byte("foo")), nil },
|
||||
func(p []byte) (int, error) { return copy(p, []byte("bar")), nil },
|
||||
func(p []byte) (int, error) { return copy(p, []byte("baz")), nil },
|
||||
},
|
||||
}
|
||||
bgr := bgreader.New(rr)
|
||||
bgr.Start()
|
||||
buf := make([]byte, 3)
|
||||
n, err := bgr.Read(buf)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 3, n)
|
||||
require.Equal(t, []byte("foo"), buf)
|
||||
}
|
||||
|
||||
func TestBGReaderErrorWhenStarted(t *testing.T) {
|
||||
rr := &mockReader{
|
||||
readFuncs: []mockReadFunc{
|
||||
func(p []byte) (int, error) { return copy(p, []byte("foo")), nil },
|
||||
func(p []byte) (int, error) { return copy(p, []byte("bar")), nil },
|
||||
func(p []byte) (int, error) { return copy(p, []byte("baz")), errors.New("oops") },
|
||||
},
|
||||
}
|
||||
|
||||
bgr := bgreader.New(rr)
|
||||
bgr.Start()
|
||||
buf, err := io.ReadAll(bgr)
|
||||
require.Equal(t, []byte("foobarbaz"), buf)
|
||||
require.EqualError(t, err, "oops")
|
||||
}
|
||||
|
||||
func TestBGReaderErrorWhenStopped(t *testing.T) {
|
||||
rr := &mockReader{
|
||||
readFuncs: []mockReadFunc{
|
||||
func(p []byte) (int, error) { return copy(p, []byte("foo")), nil },
|
||||
func(p []byte) (int, error) { return copy(p, []byte("bar")), nil },
|
||||
func(p []byte) (int, error) { return copy(p, []byte("baz")), errors.New("oops") },
|
||||
},
|
||||
}
|
||||
|
||||
bgr := bgreader.New(rr)
|
||||
buf, err := io.ReadAll(bgr)
|
||||
require.Equal(t, []byte("foobarbaz"), buf)
|
||||
require.EqualError(t, err, "oops")
|
||||
}
|
||||
|
||||
type numberReader struct {
|
||||
v uint8
|
||||
rng *rand.Rand
|
||||
}
|
||||
|
||||
func (nr *numberReader) Read(p []byte) (int, error) {
|
||||
n := nr.rng.Intn(len(p))
|
||||
for i := 0; i < n; i++ {
|
||||
p[i] = nr.v
|
||||
nr.v++
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// TestBGReaderStress stress tests BGReader by reading a lot of bytes in random sizes while randomly starting and
|
||||
// stopping the background worker from other goroutines.
|
||||
func TestBGReaderStress(t *testing.T) {
|
||||
nr := &numberReader{rng: rand.New(rand.NewSource(0))}
|
||||
bgr := bgreader.New(nr)
|
||||
|
||||
bytesRead := 0
|
||||
var expected uint8
|
||||
buf := make([]byte, 10_000)
|
||||
rng := rand.New(rand.NewSource(0))
|
||||
|
||||
for bytesRead < 1_000_000 {
|
||||
randomNumber := rng.Intn(100)
|
||||
switch {
|
||||
case randomNumber < 10:
|
||||
go bgr.Start()
|
||||
case randomNumber < 20:
|
||||
go bgr.Stop()
|
||||
default:
|
||||
n, err := bgr.Read(buf)
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < n; i++ {
|
||||
require.Equal(t, expected, buf[i])
|
||||
expected++
|
||||
}
|
||||
bytesRead += n
|
||||
}
|
||||
}
|
||||
}
|
100
pgconn/krb5.go
100
pgconn/krb5.go
@ -1,100 +0,0 @@
|
||||
package pgconn
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgproto3"
|
||||
)
|
||||
|
||||
// NewGSSFunc creates a GSS authentication provider, for use with
|
||||
// RegisterGSSProvider.
|
||||
type NewGSSFunc func() (GSS, error)
|
||||
|
||||
var newGSS NewGSSFunc
|
||||
|
||||
// RegisterGSSProvider registers a GSS authentication provider. For example, if
|
||||
// you need to use Kerberos to authenticate with your server, add this to your
|
||||
// main package:
|
||||
//
|
||||
// import "github.com/otan/gopgkrb5"
|
||||
//
|
||||
// func init() {
|
||||
// pgconn.RegisterGSSProvider(func() (pgconn.GSS, error) { return gopgkrb5.NewGSS() })
|
||||
// }
|
||||
func RegisterGSSProvider(newGSSArg NewGSSFunc) {
|
||||
newGSS = newGSSArg
|
||||
}
|
||||
|
||||
// GSS provides GSSAPI authentication (e.g., Kerberos).
|
||||
type GSS interface {
|
||||
GetInitToken(host, service string) ([]byte, error)
|
||||
GetInitTokenFromSPN(spn string) ([]byte, error)
|
||||
Continue(inToken []byte) (done bool, outToken []byte, err error)
|
||||
}
|
||||
|
||||
func (c *PgConn) gssAuth() error {
|
||||
if newGSS == nil {
|
||||
return errors.New("kerberos error: no GSSAPI provider registered, see https://github.com/otan/gopgkrb5")
|
||||
}
|
||||
cli, err := newGSS()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var nextData []byte
|
||||
if c.config.KerberosSpn != "" {
|
||||
// Use the supplied SPN if provided.
|
||||
nextData, err = cli.GetInitTokenFromSPN(c.config.KerberosSpn)
|
||||
} else {
|
||||
// Allow the kerberos service name to be overridden
|
||||
service := "postgres"
|
||||
if c.config.KerberosSrvName != "" {
|
||||
service = c.config.KerberosSrvName
|
||||
}
|
||||
nextData, err = cli.GetInitToken(c.config.Host, service)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
gssResponse := &pgproto3.GSSResponse{
|
||||
Data: nextData,
|
||||
}
|
||||
c.frontend.Send(gssResponse)
|
||||
err = c.flushWithPotentialWriteReadDeadlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := c.rxGSSContinue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var done bool
|
||||
done, nextData, err = cli.Continue(resp.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if done {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *PgConn) rxGSSContinue() (*pgproto3.AuthenticationGSSContinue, error) {
|
||||
msg, err := c.receiveMessage()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch m := msg.(type) {
|
||||
case *pgproto3.AuthenticationGSSContinue:
|
||||
return m, nil
|
||||
case *pgproto3.ErrorResponse:
|
||||
return nil, ErrorResponseToPgError(m)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("expected AuthenticationGSSContinue message but received unexpected message %T", msg)
|
||||
}
|
2504
pgconn/pgconn.go
2504
pgconn/pgconn.go
File diff suppressed because it is too large
Load Diff
@ -1,41 +0,0 @@
|
||||
package pgconn
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestCommandTag(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
commandTag CommandTag
|
||||
rowsAffected int64
|
||||
isInsert bool
|
||||
isUpdate bool
|
||||
isDelete bool
|
||||
isSelect bool
|
||||
}{
|
||||
{commandTag: CommandTag{s: "INSERT 0 5"}, rowsAffected: 5, isInsert: true},
|
||||
{commandTag: CommandTag{s: "UPDATE 0"}, rowsAffected: 0, isUpdate: true},
|
||||
{commandTag: CommandTag{s: "UPDATE 1"}, rowsAffected: 1, isUpdate: true},
|
||||
{commandTag: CommandTag{s: "DELETE 0"}, rowsAffected: 0, isDelete: true},
|
||||
{commandTag: CommandTag{s: "DELETE 1"}, rowsAffected: 1, isDelete: true},
|
||||
{commandTag: CommandTag{s: "DELETE 1234567890"}, rowsAffected: 1234567890, isDelete: true},
|
||||
{commandTag: CommandTag{s: "SELECT 1"}, rowsAffected: 1, isSelect: true},
|
||||
{commandTag: CommandTag{s: "SELECT 99999999999"}, rowsAffected: 99999999999, isSelect: true},
|
||||
{commandTag: CommandTag{s: "CREATE TABLE"}, rowsAffected: 0},
|
||||
{commandTag: CommandTag{s: "ALTER TABLE"}, rowsAffected: 0},
|
||||
{commandTag: CommandTag{s: "DROP TABLE"}, rowsAffected: 0},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
ct := tt.commandTag
|
||||
assert.Equalf(t, tt.rowsAffected, ct.RowsAffected(), "%d. %v", i, tt.commandTag)
|
||||
assert.Equalf(t, tt.isInsert, ct.Insert(), "%d. %v", i, tt.commandTag)
|
||||
assert.Equalf(t, tt.isUpdate, ct.Update(), "%d. %v", i, tt.commandTag)
|
||||
assert.Equalf(t, tt.isDelete, ct.Delete(), "%d. %v", i, tt.commandTag)
|
||||
assert.Equalf(t, tt.isSelect, ct.Select(), "%d. %v", i, tt.commandTag)
|
||||
}
|
||||
}
|
@ -1,90 +0,0 @@
|
||||
package pgconn_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConnStress(t *testing.T) {
|
||||
pgConn, err := pgconn.Connect(context.Background(), os.Getenv("PGX_TEST_DATABASE"))
|
||||
require.NoError(t, err)
|
||||
defer closeConn(t, pgConn)
|
||||
|
||||
actionCount := 10000
|
||||
if s := os.Getenv("PGX_TEST_STRESS_FACTOR"); s != "" {
|
||||
stressFactor, err := strconv.ParseInt(s, 10, 64)
|
||||
require.Nil(t, err, "Failed to parse PGX_TEST_STRESS_FACTOR")
|
||||
actionCount *= int(stressFactor)
|
||||
}
|
||||
|
||||
setupStressDB(t, pgConn)
|
||||
|
||||
actions := []struct {
|
||||
name string
|
||||
fn func(*pgconn.PgConn) error
|
||||
}{
|
||||
{"Exec Select", stressExecSelect},
|
||||
{"ExecParams Select", stressExecParamsSelect},
|
||||
{"Batch", stressBatch},
|
||||
}
|
||||
|
||||
for i := 0; i < actionCount; i++ {
|
||||
action := actions[rand.Intn(len(actions))]
|
||||
err := action.fn(pgConn)
|
||||
require.Nilf(t, err, "%d: %s", i, action.name)
|
||||
}
|
||||
|
||||
// Each call with a context starts a goroutine. Ensure they are cleaned up when context is not canceled.
|
||||
numGoroutine := runtime.NumGoroutine()
|
||||
require.Truef(t, numGoroutine < 1000, "goroutines appear to be orphaned: %d in process", numGoroutine)
|
||||
}
|
||||
|
||||
func setupStressDB(t *testing.T, pgConn *pgconn.PgConn) {
|
||||
_, err := pgConn.Exec(context.Background(), `
|
||||
create temporary table widgets(
|
||||
id serial primary key,
|
||||
name varchar not null,
|
||||
description text,
|
||||
creation_time timestamptz default now()
|
||||
);
|
||||
|
||||
insert into widgets(name, description) values
|
||||
('Foo', 'bar'),
|
||||
('baz', 'Something really long Something really long Something really long Something really long Something really long'),
|
||||
('a', 'b')`).ReadAll()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func stressExecSelect(pgConn *pgconn.PgConn) error {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
_, err := pgConn.Exec(ctx, "select * from widgets").ReadAll()
|
||||
return err
|
||||
}
|
||||
|
||||
func stressExecParamsSelect(pgConn *pgconn.PgConn) error {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
result := pgConn.ExecParams(ctx, "select * from widgets where id < $1", [][]byte{[]byte("10")}, nil, nil, nil).Read()
|
||||
return result.Err
|
||||
}
|
||||
|
||||
func stressBatch(pgConn *pgconn.PgConn) error {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
batch := &pgconn.Batch{}
|
||||
|
||||
batch.ExecParams("select * from widgets", nil, nil, nil, nil)
|
||||
batch.ExecParams("select * from widgets where id < $1", [][]byte{[]byte("10")}, nil, nil, nil)
|
||||
_, err := pgConn.ExecBatch(ctx, batch).ReadAll()
|
||||
return err
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,7 +0,0 @@
|
||||
# pgproto3
|
||||
|
||||
Package pgproto3 is an encoder and decoder of the PostgreSQL wire protocol version 3.
|
||||
|
||||
pgproto3 can be used as a foundation for PostgreSQL drivers, proxies, mock servers, load balancers and more.
|
||||
|
||||
See example/pgfortune for a playful example of a fake PostgreSQL server.
|
@ -1,50 +0,0 @@
|
||||
package pgproto3
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
||||
"github.com/jackc/pgx/v5/internal/pgio"
|
||||
)
|
||||
|
||||
// AuthenticationCleartextPassword is a message sent from the backend indicating that a clear-text password is required.
|
||||
type AuthenticationCleartextPassword struct{}
|
||||
|
||||
// Backend identifies this message as sendable by the PostgreSQL backend.
|
||||
func (*AuthenticationCleartextPassword) Backend() {}
|
||||
|
||||
// Backend identifies this message as an authentication response.
|
||||
func (*AuthenticationCleartextPassword) AuthenticationResponse() {}
|
||||
|
||||
// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
|
||||
// type identifier and 4 byte message length.
|
||||
func (dst *AuthenticationCleartextPassword) Decode(src []byte) error {
|
||||
if len(src) != 4 {
|
||||
return errors.New("bad authentication message size")
|
||||
}
|
||||
|
||||
authType := binary.BigEndian.Uint32(src)
|
||||
|
||||
if authType != AuthTypeCleartextPassword {
|
||||
return errors.New("bad auth type")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
|
||||
func (src *AuthenticationCleartextPassword) Encode(dst []byte) ([]byte, error) {
|
||||
dst, sp := beginMessage(dst, 'R')
|
||||
dst = pgio.AppendUint32(dst, AuthTypeCleartextPassword)
|
||||
return finishMessage(dst, sp)
|
||||
}
|
||||
|
||||
// MarshalJSON implements encoding/json.Marshaler.
|
||||
func (src AuthenticationCleartextPassword) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Type string
|
||||
}{
|
||||
Type: "AuthenticationCleartextPassword",
|
||||
})
|
||||
}
|
@ -1,58 +0,0 @@
|
||||
package pgproto3
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
||||
"github.com/jackc/pgx/v5/internal/pgio"
|
||||
)
|
||||
|
||||
type AuthenticationGSS struct{}
|
||||
|
||||
func (a *AuthenticationGSS) Backend() {}
|
||||
|
||||
func (a *AuthenticationGSS) AuthenticationResponse() {}
|
||||
|
||||
func (a *AuthenticationGSS) Decode(src []byte) error {
|
||||
if len(src) < 4 {
|
||||
return errors.New("authentication message too short")
|
||||
}
|
||||
|
||||
authType := binary.BigEndian.Uint32(src)
|
||||
|
||||
if authType != AuthTypeGSS {
|
||||
return errors.New("bad auth type")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *AuthenticationGSS) Encode(dst []byte) ([]byte, error) {
|
||||
dst, sp := beginMessage(dst, 'R')
|
||||
dst = pgio.AppendUint32(dst, AuthTypeGSS)
|
||||
return finishMessage(dst, sp)
|
||||
}
|
||||
|
||||
func (a *AuthenticationGSS) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Type string
|
||||
Data []byte
|
||||
}{
|
||||
Type: "AuthenticationGSS",
|
||||
})
|
||||
}
|
||||
|
||||
func (a *AuthenticationGSS) UnmarshalJSON(data []byte) error {
|
||||
// Ignore null, like in the main JSON package.
|
||||
if string(data) == "null" {
|
||||
return nil
|
||||
}
|
||||
|
||||
var msg struct {
|
||||
Type string
|
||||
}
|
||||
if err := json.Unmarshal(data, &msg); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,67 +0,0 @@
|
||||
package pgproto3
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
||||
"github.com/jackc/pgx/v5/internal/pgio"
|
||||
)
|
||||
|
||||
type AuthenticationGSSContinue struct {
|
||||
Data []byte
|
||||
}
|
||||
|
||||
func (a *AuthenticationGSSContinue) Backend() {}
|
||||
|
||||
func (a *AuthenticationGSSContinue) AuthenticationResponse() {}
|
||||
|
||||
func (a *AuthenticationGSSContinue) Decode(src []byte) error {
|
||||
if len(src) < 4 {
|
||||
return errors.New("authentication message too short")
|
||||
}
|
||||
|
||||
authType := binary.BigEndian.Uint32(src)
|
||||
|
||||
if authType != AuthTypeGSSCont {
|
||||
return errors.New("bad auth type")
|
||||
}
|
||||
|
||||
a.Data = src[4:]
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *AuthenticationGSSContinue) Encode(dst []byte) ([]byte, error) {
|
||||
dst, sp := beginMessage(dst, 'R')
|
||||
dst = pgio.AppendUint32(dst, AuthTypeGSSCont)
|
||||
dst = append(dst, a.Data...)
|
||||
return finishMessage(dst, sp)
|
||||
}
|
||||
|
||||
func (a *AuthenticationGSSContinue) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Type string
|
||||
Data []byte
|
||||
}{
|
||||
Type: "AuthenticationGSSContinue",
|
||||
Data: a.Data,
|
||||
})
|
||||
}
|
||||
|
||||
func (a *AuthenticationGSSContinue) UnmarshalJSON(data []byte) error {
|
||||
// Ignore null, like in the main JSON package.
|
||||
if string(data) == "null" {
|
||||
return nil
|
||||
}
|
||||
|
||||
var msg struct {
|
||||
Type string
|
||||
Data []byte
|
||||
}
|
||||
if err := json.Unmarshal(data, &msg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a.Data = msg.Data
|
||||
return nil
|
||||
}
|
@ -1,76 +0,0 @@
|
||||
package pgproto3
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
||||
"github.com/jackc/pgx/v5/internal/pgio"
|
||||
)
|
||||
|
||||
// AuthenticationMD5Password is a message sent from the backend indicating that an MD5 hashed password is required.
|
||||
type AuthenticationMD5Password struct {
|
||||
Salt [4]byte
|
||||
}
|
||||
|
||||
// Backend identifies this message as sendable by the PostgreSQL backend.
|
||||
func (*AuthenticationMD5Password) Backend() {}
|
||||
|
||||
// Backend identifies this message as an authentication response.
|
||||
func (*AuthenticationMD5Password) AuthenticationResponse() {}
|
||||
|
||||
// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
|
||||
// type identifier and 4 byte message length.
|
||||
func (dst *AuthenticationMD5Password) Decode(src []byte) error {
|
||||
if len(src) != 8 {
|
||||
return errors.New("bad authentication message size")
|
||||
}
|
||||
|
||||
authType := binary.BigEndian.Uint32(src)
|
||||
|
||||
if authType != AuthTypeMD5Password {
|
||||
return errors.New("bad auth type")
|
||||
}
|
||||
|
||||
copy(dst.Salt[:], src[4:8])
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
|
||||
func (src *AuthenticationMD5Password) Encode(dst []byte) ([]byte, error) {
|
||||
dst, sp := beginMessage(dst, 'R')
|
||||
dst = pgio.AppendUint32(dst, AuthTypeMD5Password)
|
||||
dst = append(dst, src.Salt[:]...)
|
||||
return finishMessage(dst, sp)
|
||||
}
|
||||
|
||||
// MarshalJSON implements encoding/json.Marshaler.
|
||||
func (src AuthenticationMD5Password) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Type string
|
||||
Salt [4]byte
|
||||
}{
|
||||
Type: "AuthenticationMD5Password",
|
||||
Salt: src.Salt,
|
||||
})
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements encoding/json.Unmarshaler.
|
||||
func (dst *AuthenticationMD5Password) UnmarshalJSON(data []byte) error {
|
||||
// Ignore null, like in the main JSON package.
|
||||
if string(data) == "null" {
|
||||
return nil
|
||||
}
|
||||
|
||||
var msg struct {
|
||||
Type string
|
||||
Salt [4]byte
|
||||
}
|
||||
if err := json.Unmarshal(data, &msg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dst.Salt = msg.Salt
|
||||
return nil
|
||||
}
|
@ -1,50 +0,0 @@
|
||||
package pgproto3
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
||||
"github.com/jackc/pgx/v5/internal/pgio"
|
||||
)
|
||||
|
||||
// AuthenticationOk is a message sent from the backend indicating that authentication was successful.
|
||||
type AuthenticationOk struct{}
|
||||
|
||||
// Backend identifies this message as sendable by the PostgreSQL backend.
|
||||
func (*AuthenticationOk) Backend() {}
|
||||
|
||||
// Backend identifies this message as an authentication response.
|
||||
func (*AuthenticationOk) AuthenticationResponse() {}
|
||||
|
||||
// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
|
||||
// type identifier and 4 byte message length.
|
||||
func (dst *AuthenticationOk) Decode(src []byte) error {
|
||||
if len(src) != 4 {
|
||||
return errors.New("bad authentication message size")
|
||||
}
|
||||
|
||||
authType := binary.BigEndian.Uint32(src)
|
||||
|
||||
if authType != AuthTypeOk {
|
||||
return errors.New("bad auth type")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
|
||||
func (src *AuthenticationOk) Encode(dst []byte) ([]byte, error) {
|
||||
dst, sp := beginMessage(dst, 'R')
|
||||
dst = pgio.AppendUint32(dst, AuthTypeOk)
|
||||
return finishMessage(dst, sp)
|
||||
}
|
||||
|
||||
// MarshalJSON implements encoding/json.Marshaler.
|
||||
func (src AuthenticationOk) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Type string
|
||||
}{
|
||||
Type: "AuthenticationOK",
|
||||
})
|
||||
}
|
@ -1,72 +0,0 @@
|
||||
package pgproto3
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
||||
"github.com/jackc/pgx/v5/internal/pgio"
|
||||
)
|
||||
|
||||
// AuthenticationSASL is a message sent from the backend indicating that SASL authentication is required.
|
||||
type AuthenticationSASL struct {
|
||||
AuthMechanisms []string
|
||||
}
|
||||
|
||||
// Backend identifies this message as sendable by the PostgreSQL backend.
|
||||
func (*AuthenticationSASL) Backend() {}
|
||||
|
||||
// Backend identifies this message as an authentication response.
|
||||
func (*AuthenticationSASL) AuthenticationResponse() {}
|
||||
|
||||
// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
|
||||
// type identifier and 4 byte message length.
|
||||
func (dst *AuthenticationSASL) Decode(src []byte) error {
|
||||
if len(src) < 4 {
|
||||
return errors.New("authentication message too short")
|
||||
}
|
||||
|
||||
authType := binary.BigEndian.Uint32(src)
|
||||
|
||||
if authType != AuthTypeSASL {
|
||||
return errors.New("bad auth type")
|
||||
}
|
||||
|
||||
authMechanisms := src[4:]
|
||||
for len(authMechanisms) > 1 {
|
||||
idx := bytes.IndexByte(authMechanisms, 0)
|
||||
if idx == -1 {
|
||||
return &invalidMessageFormatErr{messageType: "AuthenticationSASL", details: "unterminated string"}
|
||||
}
|
||||
dst.AuthMechanisms = append(dst.AuthMechanisms, string(authMechanisms[:idx]))
|
||||
authMechanisms = authMechanisms[idx+1:]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
|
||||
func (src *AuthenticationSASL) Encode(dst []byte) ([]byte, error) {
|
||||
dst, sp := beginMessage(dst, 'R')
|
||||
dst = pgio.AppendUint32(dst, AuthTypeSASL)
|
||||
|
||||
for _, s := range src.AuthMechanisms {
|
||||
dst = append(dst, []byte(s)...)
|
||||
dst = append(dst, 0)
|
||||
}
|
||||
dst = append(dst, 0)
|
||||
|
||||
return finishMessage(dst, sp)
|
||||
}
|
||||
|
||||
// MarshalJSON implements encoding/json.Marshaler.
|
||||
func (src AuthenticationSASL) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Type string
|
||||
AuthMechanisms []string
|
||||
}{
|
||||
Type: "AuthenticationSASL",
|
||||
AuthMechanisms: src.AuthMechanisms,
|
||||
})
|
||||
}
|
@ -1,75 +0,0 @@
|
||||
package pgproto3
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
||||
"github.com/jackc/pgx/v5/internal/pgio"
|
||||
)
|
||||
|
||||
// AuthenticationSASLContinue is a message sent from the backend containing a SASL challenge.
|
||||
type AuthenticationSASLContinue struct {
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// Backend identifies this message as sendable by the PostgreSQL backend.
|
||||
func (*AuthenticationSASLContinue) Backend() {}
|
||||
|
||||
// Backend identifies this message as an authentication response.
|
||||
func (*AuthenticationSASLContinue) AuthenticationResponse() {}
|
||||
|
||||
// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
|
||||
// type identifier and 4 byte message length.
|
||||
func (dst *AuthenticationSASLContinue) Decode(src []byte) error {
|
||||
if len(src) < 4 {
|
||||
return errors.New("authentication message too short")
|
||||
}
|
||||
|
||||
authType := binary.BigEndian.Uint32(src)
|
||||
|
||||
if authType != AuthTypeSASLContinue {
|
||||
return errors.New("bad auth type")
|
||||
}
|
||||
|
||||
dst.Data = src[4:]
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
|
||||
func (src *AuthenticationSASLContinue) Encode(dst []byte) ([]byte, error) {
|
||||
dst, sp := beginMessage(dst, 'R')
|
||||
dst = pgio.AppendUint32(dst, AuthTypeSASLContinue)
|
||||
dst = append(dst, src.Data...)
|
||||
return finishMessage(dst, sp)
|
||||
}
|
||||
|
||||
// MarshalJSON implements encoding/json.Marshaler.
|
||||
func (src AuthenticationSASLContinue) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Type string
|
||||
Data string
|
||||
}{
|
||||
Type: "AuthenticationSASLContinue",
|
||||
Data: string(src.Data),
|
||||
})
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements encoding/json.Unmarshaler.
|
||||
func (dst *AuthenticationSASLContinue) UnmarshalJSON(data []byte) error {
|
||||
// Ignore null, like in the main JSON package.
|
||||
if string(data) == "null" {
|
||||
return nil
|
||||
}
|
||||
|
||||
var msg struct {
|
||||
Data string
|
||||
}
|
||||
if err := json.Unmarshal(data, &msg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dst.Data = []byte(msg.Data)
|
||||
return nil
|
||||
}
|
@ -1,75 +0,0 @@
|
||||
package pgproto3
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
||||
"github.com/jackc/pgx/v5/internal/pgio"
|
||||
)
|
||||
|
||||
// AuthenticationSASLFinal is a message sent from the backend indicating a SASL authentication has completed.
|
||||
type AuthenticationSASLFinal struct {
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// Backend identifies this message as sendable by the PostgreSQL backend.
|
||||
func (*AuthenticationSASLFinal) Backend() {}
|
||||
|
||||
// Backend identifies this message as an authentication response.
|
||||
func (*AuthenticationSASLFinal) AuthenticationResponse() {}
|
||||
|
||||
// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
|
||||
// type identifier and 4 byte message length.
|
||||
func (dst *AuthenticationSASLFinal) Decode(src []byte) error {
|
||||
if len(src) < 4 {
|
||||
return errors.New("authentication message too short")
|
||||
}
|
||||
|
||||
authType := binary.BigEndian.Uint32(src)
|
||||
|
||||
if authType != AuthTypeSASLFinal {
|
||||
return errors.New("bad auth type")
|
||||
}
|
||||
|
||||
dst.Data = src[4:]
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
|
||||
func (src *AuthenticationSASLFinal) Encode(dst []byte) ([]byte, error) {
|
||||
dst, sp := beginMessage(dst, 'R')
|
||||
dst = pgio.AppendUint32(dst, AuthTypeSASLFinal)
|
||||
dst = append(dst, src.Data...)
|
||||
return finishMessage(dst, sp)
|
||||
}
|
||||
|
||||
// MarshalJSON implements encoding/json.Unmarshaler.
|
||||
func (src AuthenticationSASLFinal) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Type string
|
||||
Data string
|
||||
}{
|
||||
Type: "AuthenticationSASLFinal",
|
||||
Data: string(src.Data),
|
||||
})
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements encoding/json.Unmarshaler.
|
||||
func (dst *AuthenticationSASLFinal) UnmarshalJSON(data []byte) error {
|
||||
// Ignore null, like in the main JSON package.
|
||||
if string(data) == "null" {
|
||||
return nil
|
||||
}
|
||||
|
||||
var msg struct {
|
||||
Data string
|
||||
}
|
||||
if err := json.Unmarshal(data, &msg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dst.Data = []byte(msg.Data)
|
||||
return nil
|
||||
}
|
@ -1,299 +0,0 @@
|
||||
package pgproto3
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Backend acts as a server for the PostgreSQL wire protocol version 3.
|
||||
type Backend struct {
|
||||
cr *chunkReader
|
||||
w io.Writer
|
||||
|
||||
// tracer is used to trace messages when Send or Receive is called. This means an outbound message is traced
|
||||
// before it is actually transmitted (i.e. before Flush).
|
||||
tracer *tracer
|
||||
|
||||
wbuf []byte
|
||||
encodeError error
|
||||
|
||||
// Frontend message flyweights
|
||||
bind Bind
|
||||
cancelRequest CancelRequest
|
||||
_close Close
|
||||
copyFail CopyFail
|
||||
copyData CopyData
|
||||
copyDone CopyDone
|
||||
describe Describe
|
||||
execute Execute
|
||||
flush Flush
|
||||
functionCall FunctionCall
|
||||
gssEncRequest GSSEncRequest
|
||||
parse Parse
|
||||
query Query
|
||||
sslRequest SSLRequest
|
||||
startupMessage StartupMessage
|
||||
sync Sync
|
||||
terminate Terminate
|
||||
|
||||
bodyLen int
|
||||
maxBodyLen int // maxBodyLen is the maximum length of a message body in octets. If a message body exceeds this length, Receive will return an error.
|
||||
msgType byte
|
||||
partialMsg bool
|
||||
authType uint32
|
||||
}
|
||||
|
||||
const (
|
||||
minStartupPacketLen = 4 // minStartupPacketLen is a single 32-bit int version or code.
|
||||
maxStartupPacketLen = 10000 // maxStartupPacketLen is MAX_STARTUP_PACKET_LENGTH from PG source.
|
||||
)
|
||||
|
||||
// NewBackend creates a new Backend.
|
||||
func NewBackend(r io.Reader, w io.Writer) *Backend {
|
||||
cr := newChunkReader(r, 0)
|
||||
return &Backend{cr: cr, w: w}
|
||||
}
|
||||
|
||||
// Send sends a message to the frontend (i.e. the client). The message is buffered until Flush is called. Any error
|
||||
// encountered will be returned from Flush.
|
||||
func (b *Backend) Send(msg BackendMessage) {
|
||||
if b.encodeError != nil {
|
||||
return
|
||||
}
|
||||
|
||||
prevLen := len(b.wbuf)
|
||||
newBuf, err := msg.Encode(b.wbuf)
|
||||
if err != nil {
|
||||
b.encodeError = err
|
||||
return
|
||||
}
|
||||
b.wbuf = newBuf
|
||||
|
||||
if b.tracer != nil {
|
||||
b.tracer.traceMessage('B', int32(len(b.wbuf)-prevLen), msg)
|
||||
}
|
||||
}
|
||||
|
||||
// Flush writes any pending messages to the frontend (i.e. the client).
|
||||
func (b *Backend) Flush() error {
|
||||
if err := b.encodeError; err != nil {
|
||||
b.encodeError = nil
|
||||
b.wbuf = b.wbuf[:0]
|
||||
return &writeError{err: err, safeToRetry: true}
|
||||
}
|
||||
|
||||
n, err := b.w.Write(b.wbuf)
|
||||
|
||||
const maxLen = 1024
|
||||
if len(b.wbuf) > maxLen {
|
||||
b.wbuf = make([]byte, 0, maxLen)
|
||||
} else {
|
||||
b.wbuf = b.wbuf[:0]
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return &writeError{err: err, safeToRetry: n == 0}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Trace starts tracing the message traffic to w. It writes in a similar format to that produced by the libpq function
|
||||
// PQtrace.
|
||||
func (b *Backend) Trace(w io.Writer, options TracerOptions) {
|
||||
b.tracer = &tracer{
|
||||
w: w,
|
||||
buf: &bytes.Buffer{},
|
||||
TracerOptions: options,
|
||||
}
|
||||
}
|
||||
|
||||
// Untrace stops tracing.
|
||||
func (b *Backend) Untrace() {
|
||||
b.tracer = nil
|
||||
}
|
||||
|
||||
// ReceiveStartupMessage receives the initial connection message. This method is used of the normal Receive method
|
||||
// because the initial connection message is "special" and does not include the message type as the first byte. This
|
||||
// will return either a StartupMessage, SSLRequest, GSSEncRequest, or CancelRequest.
|
||||
func (b *Backend) ReceiveStartupMessage() (FrontendMessage, error) {
|
||||
buf, err := b.cr.Next(4)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
msgSize := int(binary.BigEndian.Uint32(buf) - 4)
|
||||
|
||||
if msgSize < minStartupPacketLen || msgSize > maxStartupPacketLen {
|
||||
return nil, fmt.Errorf("invalid length of startup packet: %d", msgSize)
|
||||
}
|
||||
|
||||
buf, err = b.cr.Next(msgSize)
|
||||
if err != nil {
|
||||
return nil, translateEOFtoErrUnexpectedEOF(err)
|
||||
}
|
||||
|
||||
code := binary.BigEndian.Uint32(buf)
|
||||
|
||||
switch code {
|
||||
case ProtocolVersionNumber:
|
||||
err = b.startupMessage.Decode(buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &b.startupMessage, nil
|
||||
case sslRequestNumber:
|
||||
err = b.sslRequest.Decode(buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &b.sslRequest, nil
|
||||
case cancelRequestCode:
|
||||
err = b.cancelRequest.Decode(buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &b.cancelRequest, nil
|
||||
case gssEncReqNumber:
|
||||
err = b.gssEncRequest.Decode(buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &b.gssEncRequest, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown startup message code: %d", code)
|
||||
}
|
||||
}
|
||||
|
||||
// Receive receives a message from the frontend. The returned message is only valid until the next call to Receive.
|
||||
func (b *Backend) Receive() (FrontendMessage, error) {
|
||||
if !b.partialMsg {
|
||||
header, err := b.cr.Next(5)
|
||||
if err != nil {
|
||||
return nil, translateEOFtoErrUnexpectedEOF(err)
|
||||
}
|
||||
|
||||
b.msgType = header[0]
|
||||
|
||||
msgLength := int(binary.BigEndian.Uint32(header[1:]))
|
||||
if msgLength < 4 {
|
||||
return nil, fmt.Errorf("invalid message length: %d", msgLength)
|
||||
}
|
||||
|
||||
b.bodyLen = msgLength - 4
|
||||
if b.maxBodyLen > 0 && b.bodyLen > b.maxBodyLen {
|
||||
return nil, &ExceededMaxBodyLenErr{b.maxBodyLen, b.bodyLen}
|
||||
}
|
||||
b.partialMsg = true
|
||||
}
|
||||
|
||||
var msg FrontendMessage
|
||||
switch b.msgType {
|
||||
case 'B':
|
||||
msg = &b.bind
|
||||
case 'C':
|
||||
msg = &b._close
|
||||
case 'D':
|
||||
msg = &b.describe
|
||||
case 'E':
|
||||
msg = &b.execute
|
||||
case 'F':
|
||||
msg = &b.functionCall
|
||||
case 'f':
|
||||
msg = &b.copyFail
|
||||
case 'd':
|
||||
msg = &b.copyData
|
||||
case 'c':
|
||||
msg = &b.copyDone
|
||||
case 'H':
|
||||
msg = &b.flush
|
||||
case 'P':
|
||||
msg = &b.parse
|
||||
case 'p':
|
||||
switch b.authType {
|
||||
case AuthTypeSASL:
|
||||
msg = &SASLInitialResponse{}
|
||||
case AuthTypeSASLContinue:
|
||||
msg = &SASLResponse{}
|
||||
case AuthTypeSASLFinal:
|
||||
msg = &SASLResponse{}
|
||||
case AuthTypeGSS, AuthTypeGSSCont:
|
||||
msg = &GSSResponse{}
|
||||
case AuthTypeCleartextPassword, AuthTypeMD5Password:
|
||||
fallthrough
|
||||
default:
|
||||
// to maintain backwards compatibility
|
||||
msg = &PasswordMessage{}
|
||||
}
|
||||
case 'Q':
|
||||
msg = &b.query
|
||||
case 'S':
|
||||
msg = &b.sync
|
||||
case 'X':
|
||||
msg = &b.terminate
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown message type: %c", b.msgType)
|
||||
}
|
||||
|
||||
msgBody, err := b.cr.Next(b.bodyLen)
|
||||
if err != nil {
|
||||
return nil, translateEOFtoErrUnexpectedEOF(err)
|
||||
}
|
||||
|
||||
b.partialMsg = false
|
||||
|
||||
err = msg.Decode(msgBody)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if b.tracer != nil {
|
||||
b.tracer.traceMessage('F', int32(5+len(msgBody)), msg)
|
||||
}
|
||||
|
||||
return msg, nil
|
||||
}
|
||||
|
||||
// SetAuthType sets the authentication type in the backend.
|
||||
// Since multiple message types can start with 'p', SetAuthType allows
|
||||
// contextual identification of FrontendMessages. For example, in the
|
||||
// PG message flow documentation for PasswordMessage:
|
||||
//
|
||||
// Byte1('p')
|
||||
//
|
||||
// Identifies the message as a password response. Note that this is also used for
|
||||
// GSSAPI, SSPI and SASL response messages. The exact message type can be deduced from
|
||||
// the context.
|
||||
//
|
||||
// Since the Frontend does not know about the state of a backend, it is important
|
||||
// to call SetAuthType() after an authentication request is received by the Frontend.
|
||||
func (b *Backend) SetAuthType(authType uint32) error {
|
||||
switch authType {
|
||||
case AuthTypeOk,
|
||||
AuthTypeCleartextPassword,
|
||||
AuthTypeMD5Password,
|
||||
AuthTypeSCMCreds,
|
||||
AuthTypeGSS,
|
||||
AuthTypeGSSCont,
|
||||
AuthTypeSSPI,
|
||||
AuthTypeSASL,
|
||||
AuthTypeSASLContinue,
|
||||
AuthTypeSASLFinal:
|
||||
b.authType = authType
|
||||
default:
|
||||
return fmt.Errorf("authType not recognized: %d", authType)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetMaxBodyLen sets the maximum length of a message body in octets.
|
||||
// If a message body exceeds this length, Receive will return an error.
|
||||
// This is useful for protecting against malicious clients that send
|
||||
// large messages with the intent of causing memory exhaustion.
|
||||
// The default value is 0.
|
||||
// If maxBodyLen is 0, then no maximum is enforced.
|
||||
func (b *Backend) SetMaxBodyLen(maxBodyLen int) {
|
||||
b.maxBodyLen = maxBodyLen
|
||||
}
|
@ -1,50 +0,0 @@
|
||||
package pgproto3
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/jackc/pgx/v5/internal/pgio"
|
||||
)
|
||||
|
||||
type BackendKeyData struct {
|
||||
ProcessID uint32
|
||||
SecretKey uint32
|
||||
}
|
||||
|
||||
// Backend identifies this message as sendable by the PostgreSQL backend.
|
||||
func (*BackendKeyData) Backend() {}
|
||||
|
||||
// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
|
||||
// type identifier and 4 byte message length.
|
||||
func (dst *BackendKeyData) Decode(src []byte) error {
|
||||
if len(src) != 8 {
|
||||
return &invalidMessageLenErr{messageType: "BackendKeyData", expectedLen: 8, actualLen: len(src)}
|
||||
}
|
||||
|
||||
dst.ProcessID = binary.BigEndian.Uint32(src[:4])
|
||||
dst.SecretKey = binary.BigEndian.Uint32(src[4:])
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
|
||||
func (src *BackendKeyData) Encode(dst []byte) ([]byte, error) {
|
||||
dst, sp := beginMessage(dst, 'K')
|
||||
dst = pgio.AppendUint32(dst, src.ProcessID)
|
||||
dst = pgio.AppendUint32(dst, src.SecretKey)
|
||||
return finishMessage(dst, sp)
|
||||
}
|
||||
|
||||
// MarshalJSON implements encoding/json.Marshaler.
|
||||
func (src BackendKeyData) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Type string
|
||||
ProcessID uint32
|
||||
SecretKey uint32
|
||||
}{
|
||||
Type: "BackendKeyData",
|
||||
ProcessID: src.ProcessID,
|
||||
SecretKey: src.SecretKey,
|
||||
})
|
||||
}
|
@ -1,140 +0,0 @@
|
||||
package pgproto3_test
|
||||
|
||||
import (
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/jackc/pgx/v5/internal/pgio"
|
||||
"github.com/jackc/pgx/v5/pgproto3"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBackendReceiveInterrupted(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := &interruptReader{}
|
||||
server.push([]byte{'Q', 0, 0, 0, 6})
|
||||
|
||||
backend := pgproto3.NewBackend(server, nil)
|
||||
|
||||
msg, err := backend.Receive()
|
||||
if err == nil {
|
||||
t.Fatal("expected err")
|
||||
}
|
||||
if msg != nil {
|
||||
t.Fatalf("did not expect msg, but %v", msg)
|
||||
}
|
||||
|
||||
server.push([]byte{'I', 0})
|
||||
|
||||
msg, err = backend.Receive()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if msg, ok := msg.(*pgproto3.Query); !ok || msg.String != "I" {
|
||||
t.Fatalf("unexpected msg: %v", msg)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackendReceiveUnexpectedEOF(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := &interruptReader{}
|
||||
server.push([]byte{'Q', 0, 0, 0, 6})
|
||||
|
||||
backend := pgproto3.NewBackend(server, nil)
|
||||
|
||||
// Receive regular msg
|
||||
msg, err := backend.Receive()
|
||||
assert.Nil(t, msg)
|
||||
assert.Equal(t, io.ErrUnexpectedEOF, err)
|
||||
|
||||
// Receive StartupMessage msg
|
||||
dst := []byte{}
|
||||
dst = pgio.AppendUint32(dst, 1000) // tell the backend we expect 1000 bytes to be read
|
||||
dst = pgio.AppendUint32(dst, 1) // only send 1 byte
|
||||
server.push(dst)
|
||||
|
||||
msg, err = backend.ReceiveStartupMessage()
|
||||
assert.Nil(t, msg)
|
||||
assert.Equal(t, io.ErrUnexpectedEOF, err)
|
||||
}
|
||||
|
||||
func TestStartupMessage(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("valid StartupMessage", func(t *testing.T) {
|
||||
want := &pgproto3.StartupMessage{
|
||||
ProtocolVersion: pgproto3.ProtocolVersionNumber,
|
||||
Parameters: map[string]string{
|
||||
"username": "tester",
|
||||
},
|
||||
}
|
||||
dst, err := want.Encode([]byte{})
|
||||
require.NoError(t, err)
|
||||
|
||||
server := &interruptReader{}
|
||||
server.push(dst)
|
||||
|
||||
backend := pgproto3.NewBackend(server, nil)
|
||||
|
||||
msg, err := backend.ReceiveStartupMessage()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, want, msg)
|
||||
})
|
||||
|
||||
t.Run("invalid packet length", func(t *testing.T) {
|
||||
wantErr := "invalid length of startup packet"
|
||||
tests := []struct {
|
||||
name string
|
||||
packetLen uint32
|
||||
}{
|
||||
{
|
||||
name: "large packet length",
|
||||
// Since the StartupMessage contains the "Length of message contents
|
||||
// in bytes, including self", the max startup packet length is actually
|
||||
// 10000+4. Therefore, let's go past the limit with 10005
|
||||
packetLen: 10005,
|
||||
},
|
||||
{
|
||||
name: "short packet length",
|
||||
packetLen: 3,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
server := &interruptReader{}
|
||||
dst := []byte{}
|
||||
dst = pgio.AppendUint32(dst, tt.packetLen)
|
||||
dst = pgio.AppendUint32(dst, pgproto3.ProtocolVersionNumber)
|
||||
server.push(dst)
|
||||
|
||||
backend := pgproto3.NewBackend(server, nil)
|
||||
|
||||
msg, err := backend.ReceiveStartupMessage()
|
||||
require.Error(t, err)
|
||||
require.Nil(t, msg)
|
||||
require.Contains(t, err.Error(), wantErr)
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestBackendReceiveExceededMaxBodyLen(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := &interruptReader{}
|
||||
server.push([]byte{'Q', 0, 0, 10, 10})
|
||||
|
||||
backend := pgproto3.NewBackend(server, nil)
|
||||
|
||||
// Set max body len to 5
|
||||
backend.SetMaxBodyLen(5)
|
||||
|
||||
// Receive regular msg
|
||||
msg, err := backend.Receive()
|
||||
assert.Nil(t, msg)
|
||||
var invalidBodyLenErr *pgproto3.ExceededMaxBodyLenErr
|
||||
assert.ErrorAs(t, err, &invalidBodyLenErr)
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user