Compare commits

...

2108 Commits

Author SHA1 Message Date
Jack Christensen
95fc31294f Add link to github.com/KoNekoD/pgx-colon-query-rewriter 2025-08-31 08:38:21 -05:00
Jack Christensen
5534fa9a02 Improve Rows docs
fixes https://github.com/jackc/pgx/issues/2373
2025-08-31 08:24:22 -05:00
Jack Christensen
a295d68811
Merge pull request #2368 from zeghong/zeronull-int-valuer
Implement Int64Valuer for zeronull int types
2025-08-30 18:51:03 -05:00
Jack Christensen
03f32c06bd
Merge branch 'master' into zeronull-int-valuer 2025-08-30 18:50:54 -05:00
Jack Christensen
82fbe49fec
Merge pull request #2372 from WGH-/improve-batch-doc
Add note about QueuedQuery.Exec and BatchResults.Close
2025-08-30 18:40:12 -05:00
Jack Christensen
594d9d65dc
Merge pull request #2367 from zeghong/zeronull-int-scanner
Ensure zeronull int types implement Int64Scanner
2025-08-30 18:40:05 -05:00
Jack Christensen
5a18241971
Merge branch 'master' into zeronull-int-scanner 2025-08-30 18:36:19 -05:00
Jack Christensen
cc34da5884
Merge pull request #2370 from Saurabh2402/improvement/setup-linters
Improvement/setup formatters
2025-08-30 18:24:13 -05:00
Jack Christensen
dd81f81e2f
Merge pull request #2369 from zeghong/go-doc-links
Add support for Go doc comment links
2025-08-30 18:00:27 -05:00
Jack Christensen
839acbaf18 Disable test on CRDB 2025-08-30 17:57:10 -05:00
Jack Christensen
d1a00a6cd4
Merge pull request #2361 from Jk1484/statementCacheBug
invalidate cache on batch error for batchResults and pipelineBatchResults
2025-08-30 17:54:41 -05:00
Jack Christensen
81c0db4f49 Skip TestConnCopyFromConnectionTerminated on CRDB 2025-08-30 17:44:07 -05:00
Jack Christensen
1516fb8125 Fix race condition in test 2025-08-30 17:26:34 -05:00
Jack Christensen
b1ef6d90c0 Merge commit 'cec5ebac' 2025-08-30 12:45:04 -05:00
Jack Christensen
59c73af6bb Add failing test for CopyFrom connection terminated
https://github.com/jackc/pgx/issues/2364
2025-08-30 12:39:12 -05:00
WGH
248afe61b1 Add note about QueuedQuery.Exec and BatchResults.Close
When reviewing my own old code, I noticed several monstrosities like
this:

    batchResult := tx.SendBatch(ctx, batch)
    defer batchResult.Close()
    for range batch.Len() {
            if _, err := batchResult.Exec(); err != nil {
                    return err
            }
    }
    return nil

All of them can replaced with just this:

    return tx.SendBatch(ctx, batch).Close()

So I thought it might be a good idea to give a hint in the docs
explicitly. This trick is not so apparent, after all.
2025-08-20 09:41:06 +03:00
Saurabh2402
88500ac027 format code as per the configuration in .golangci-lint.yaml file 2025-08-16 02:05:40 +05:30
Saurabh2402
f1c8fcd5c2 add formatter configuration in golangci-lint 2025-08-16 02:04:39 +05:30
Li Zeghong
fc289cbbe8 Add support for Go doc comment links 2025-08-15 19:58:19 +08:00
Li Zeghong
5cb495fb94 Implement Int64Valuer for zeronull int types 2025-08-15 17:11:20 +08:00
Li Zeghong
562761a083 Ensure zeronull int types implement Int64Scanner
Also correct error string when comparing minimum value.
2025-08-15 16:04:45 +08:00
Muhammadali Nazarov
fce1a04dbf passing err by value 2025-08-10 21:45:32 -05:00
Jack Christensen
25cba15299
Merge pull request #2362 from christiankiely/shouldping
Support configurable ShouldPing func when acquiring conns
2025-08-09 09:20:46 -05:00
Michal Drausowski
cec5ebac5b Workaround panic crash when receiving terminate connection message while performing CopyFrom 2025-08-06 19:28:53 +02:00
Christian Kiely
f43091fc80
Support configurable ShouldPing func when acquiring conns 2025-08-06 16:06:03 +10:00
Jack Christensen
a11da9a629
Merge pull request #2357 from pkoutsovasilis/master
Add MarshalJSON and UnmarshalJSON for UINT32
2025-08-02 18:30:24 -05:00
Jack Christensen
2f77a63ce2
Merge pull request #2356 from philippgille/patch-1
Fix typo in Godoc
2025-08-02 18:28:21 -05:00
Jack Christensen
39b85ce8d1
Merge pull request #2352 from flimzy/fixGoDoc
Correction and further clarification of PrepareConn GoDoc comment
2025-08-02 08:21:47 -05:00
Muhammadali Nazarov
e04a6de072 invalidate cache on batch error for batchResults and pipelineBatchResults 2025-07-31 05:27:02 -05:00
Panos Koutsovasilis
c39a0608a3
Add MarshalJSON and UnmarshalJSON for UINT32 2025-07-28 09:09:03 +03:00
Philipp Gillé
f661c47dc8
Fix typo in Godoc 2025-07-22 20:18:33 +02:00
Jack Christensen
e68ff102de
Merge pull request #2349 from dominiquelefevre/fewer-allocations-in-QueryContext
Make fewer allocations in QueryContext().
2025-07-12 15:40:16 -05:00
Jonathan Hall
30ff631878 Correction and further clarification of PrepareConn GoDoc comment 2025-07-08 11:45:25 -04:00
Dominique Lefevre
69934dcd95 Make fewer allocations in QueryContext().
For a simple query like `SELECT * FROM t WHERE id = $1`,
QueryContext() used to make 6 allocations on average.
Reduce that to 4 by avoiding two allocations that
are immediately discarded:

  // the variadic arguments of c.conn.Query() escape,
  // so args is heap-allocated
  args := []any{databaseSQLResultFormats}
  // append() reallocates args to add enough space +
  // an array returned by namedValueToInterface()
  // is immediately discarded
  args = append(args, namedValueToInterface(argsV)...)
2025-07-04 14:11:46 +03:00
Jack Christensen
ecc9203ef4
Merge pull request #2336 from pashagolub/fix-docs-links
fix documentation links
2025-06-25 19:44:25 -05:00
Pavlo Golub
33163eefca fix documentation links 2025-06-12 16:01:14 +02:00
Jack Christensen
d2ee7464e8
Merge pull request #2335 from flimzy/acquireBailOut
Attempt to acquire a connection MaxConns + 1 times before aborting
2025-06-03 17:17:42 -05:00
Jack Christensen
1320d13f8a
Merge pull request #2333 from costela/patch-1
chore(docs): add mention to otelpgx lib
2025-06-03 17:14:33 -05:00
Jonathan Hall
f118bb6033 Attempt to acquire a connection MaxConns + 1 times before aborting 2025-06-03 15:09:37 -04:00
Leo Antunes
b0572f79e6
chore(docs): add mention to otelpgx lib 2025-06-03 09:05:22 +02:00
Jack Christensen
4015a0c123
Merge pull request #2329 from flimzy/prepareConn
Add PrepareConn hook, which extends BeforeAcquire's behavior to allow…
2025-05-31 09:55:27 -05:00
Jonathan Hall
48d27a9fff Add PrepareConn hook, which extends BeforeAcquire's behavior to allow canceling the instigating query
- BeforeAcquire now marked as deprecated, and re-implemented in terms of PrepareConn
- PrepareConn now takes precidence over BeforeAcquire if both are provided
- New tests added, so both old and new behavior are tested
- One niggle: AcquireAllIdle does not return an error, so the only recourse
  that seems reasonable when PrepareConn returns an error in that context,
  is to destroy the connection.  This more or less retains the spirit of
  existing functionality, without changing the public API of that method.
  Although maybe an error-returning variant would be a useful addition as
  well.
2025-05-21 11:41:04 -04:00
Jack Christensen
fc334e4c75
Merge pull request #2322 from Nomlsbad/better-parse-config-errors
Use `ParseConfigError` in `pgx.ParseConfig` and `pgxpool.ParseConfig`
2025-05-18 09:03:38 -05:00
Yurasov Ilia
3f5509fe98 fix: remove fmt import from the pgxpool 2025-05-18 02:28:24 +03:00
Jack Christensen
15bca4a4e1 Release v5.7.5 2025-05-17 17:14:14 -05:00
Jack Christensen
1d557f9116 Remove PlanScan memoization
Previously, PlanScan used a cache to improve performance. However, the
cache could get confused in certain cases. For example, the following
would fail:

m := pgtype.NewMap()
var err error

var tags any
err = m.Scan(pgtype.TextArrayOID, pgx.TextFormatCode, []byte("{foo,bar,baz}"), &tags)
require.NoError(t, err)

var cells [][]string
err = m.Scan(pgtype.TextArrayOID, pgx.TextFormatCode, []byte("{{foo,bar},{baz,quz}}"), &cells)
require.NoError(t, err)

This commit removes the memoization and adds a test to ensure that this
case works.

The benchmarks were also updated to include an array of strings to
ensure this path is benchmarked. As it turned out, there was next to no
performance difference between the cached and non-cached versions.

It's possible there may be a performance impact in certain complicated
cases, but I have not encountered any. If there are any performance
issues, we can optimize the narrower case rather than adding memoization
everywhere.
2025-05-17 16:34:01 -05:00
Jack Christensen
de7fe81d78 Use reflect.TypeFor instead of reflect.TypeOf
Simplified function became available in Go 1.22.
2025-05-17 09:11:31 -05:00
Jack Christensen
d9eb089bd7 Remove unused function 2025-05-17 09:09:07 -05:00
Jack Christensen
6be24eb08d Fix comment typo 2025-05-17 09:01:55 -05:00
Jack Christensen
07871c0a34 Zero internal baseRows references to allow GC earlier
See https://github.com/jackc/pgx/pull/2269
2025-05-17 08:39:54 -05:00
Yurasov Ilia
de806a11e7 fix: return pgconn.ParseConfigError inside ParseConfig functions 2025-05-14 03:50:09 +03:00
Yurasov Ilia
ce13266e90 fix: move pgconn.NewParseConfigError to errors.go
Move pgconn.NewParseConfigError function to errors.go from
export_test.go for creatin pgconn.ParseConfigError inside the pgx and
pgxpool packages.
2025-05-14 03:43:21 +03:00
Jack Christensen
777e7e5cdf
Merge pull request #2313 from stampy88/tracelog_pool_additions
Implement AcquireTracer and ReleaseTracer for TraceLog
2025-05-10 11:30:57 -05:00
dave sinclair
151bd026ec Switched to LogLevelDebug 2025-05-10 12:28:26 -04:00
Jack Christensen
540fcaa9b9 Add support for PGOPTIONS environment variable
Match libpq behavior for PGOPTIONS environment variable. See
https://www.postgresql.org/docs/current/libpq-envars.html
2025-05-10 11:09:39 -05:00
Jack Christensen
3a248e3822 Add support for PGTZ environment variable
Match libpq behavior for PGTZ environment variable. See
https://www.postgresql.org/docs/current/libpq-envars.html
2025-05-10 10:58:35 -05:00
dave sinclair
baca2d848a Implement AcquireTracer and ReleaseTracer for TraceLog
- `TraceLog` now implements the `pgxpool.AcquireTracer` and `pgxpool.ReleaseTracer` interfaces to log connection pool interactions.
2025-05-08 16:28:21 -04:00
Jack Christensen
c911d86cff
Merge pull request #2309 from dzherb/fix_doc
chore: fix typo in doc
2025-05-03 13:46:46 -05:00
dzherb
2bac99e2ae chore: fix typo 2025-04-28 20:58:11 +03:00
Jack Christensen
c92d0a9045 Update golang.org/x/crypto to v0.37.0
This required bumping the minimum Go version to 1.23.0.
2025-04-26 10:09:29 -05:00
Jack Christensen
e9aad0fb0b Add test for tracer within transaction
https://github.com/jackc/pgx/issues/2304
2025-04-26 09:55:31 -05:00
Jack Christensen
9e7f38cd50
Merge pull request #2302 from usernameisnull/pgconn-error
chore: should be pgconn.PgError
2025-04-17 08:52:47 -05:00
bing.ma
e779a5c072 chore: should be pgconn.PgError 2025-04-17 18:30:06 +08:00
Jack Christensen
ff9c26d851 Make OpenDBFromPool docs explicit about closing the *sql.DB
https://github.com/jackc/pgx/issues/2295
2025-04-05 09:01:11 -05:00
Jack Christensen
0f77a2d028
Merge pull request #2293 from divyam234/master
feat: add support for direct sslnegotiation
2025-03-31 08:13:19 -05:00
divyam234
ddd966f09f
update 2025-03-31 15:06:55 +02:00
divyam234
924834b5b4
add pgmock tests 2025-03-31 15:02:07 +02:00
divyam234
9b15554c51
respect sslmode set by user 2025-03-30 16:35:43 +02:00
divyam234
037e4cf9a2
feat: add support for direct sslnegotiation 2025-03-30 16:21:52 +02:00
Jack Christensen
04bcc0219d Add v5.7.4 to changelog 2025-03-24 20:04:45 -05:00
Jack Christensen
0e0a7d8344
Merge pull request #2288 from felix-roehrich/fr/fix-plan-scan
Revert change in `if` from #2236.
2025-03-24 19:46:22 -05:00
Felix Röhrich
63422c7d6c revert change in if 2025-03-24 15:01:50 +01:00
Jack Christensen
5c1fbf4806 Update changelog for v5.7.3 2025-03-21 21:02:03 -05:00
Jack Christensen
05fe5f8b05 Explain seemingly redundant rows.Close() in CollectOneRow
fixes https://github.com/jackc/pgx/issues/2282
2025-03-21 20:33:32 -05:00
Jack Christensen
70c9a147a2
Merge pull request #2279 from djahandarie/min-idle-conns
Add MinIdleConns
2025-03-21 20:25:19 -05:00
Darius Jahandarie
6603ddfbe4
add MinIdleConns 2025-03-15 19:14:26 +09:00
Jack Christensen
70f7cad222 Add link to https://github.com/Arlandaren/pgxWrappy 2025-02-28 20:59:28 -06:00
Jack Christensen
6bf1b0b1b9 Add database/sql to overview of scanning 2025-02-22 08:42:26 -06:00
Jack Christensen
14bda65a0c Correct pgtype docs 2025-02-22 08:34:31 -06:00
Jack Christensen
9e3c4fb40f
Merge pull request #2257 from felix-roehrich/fr/change-connect-logic
Change connection logic to be more forgiving
2025-02-19 07:36:35 -06:00
Felix Röhrich
05e72a5ab1 make connection logic more forgiving 2025-02-17 21:24:38 +01:00
Jack Christensen
47d631e34b Added missed change to v5.7.2 changelog 2025-02-08 10:21:39 -06:00
Jack Christensen
58b05f567c Add https://github.com/nikolayk812/pgx-outbox to README.md
fixes https://github.com/jackc/pgx/issues/2239
2025-01-25 08:59:52 -06:00
Jack Christensen
dcb7193669
Merge pull request #2236 from felix-roehrich/fr/fix-plan-scan
Alternative implementation for JSONCodec.PlanScan
2025-01-25 08:56:38 -06:00
Jack Christensen
1abf7d9050
Merge pull request #2240 from bonnefoa/fix-watch-panic
Unwatch and close connection on a batch write error
2025-01-25 08:38:33 -06:00
Jack Christensen
b5efc90a32
Merge pull request #2028 from jackc/enable-composite-tests-on-cockroachdb
Enable composite tests on cockroachdb
2025-01-25 08:22:32 -06:00
Jack Christensen
a26c93551f Skip TestCompositeCodecTranscodeStructWrapperForTable 2025-01-25 08:15:40 -06:00
Jack Christensen
2100e1da46 Use latest version of CockroachDB for CI 2025-01-25 08:04:42 -06:00
Jack Christensen
2d21a2b80d
Merge pull request #2228 from jackc/fix-xml-decode-value
XMLCodec: fix DecodeValue to return a []byte
2025-01-25 07:24:30 -06:00
Jack Christensen
5f33ee5f07 Call out []byte in QueryExecModeSimpleProtocol documentation
https://github.com/jackc/pgx/issues/2231
2025-01-25 07:15:02 -06:00
Anthonin Bonnefoy
228cfffc20 Unwatch and close connection on a batch write error
Previously, a conn.Write would simply unlock pgconn, leaving the
connection as Idle and reusable while the multiResultReader would be
closed. From this state, calling multiResultReader.Close won't try to
receiveMessage and thus won't unwatch and close the connection since it
is already closed. This leaves the connection "open" and the next time
it's used, a "Watch already in progress" panic could be triggered.

This patch fixes the issue by unwatching and closing the connection on a
batch write error. The same was done on Sync.Encode error even if the
path is unreachable as Sync.Error never returns an error.
2025-01-24 08:49:07 +01:00
Felix Röhrich
a5353af354 rework JSONCodec.PlanScan 2025-01-22 22:35:35 +01:00
Jack Christensen
0bc29e3000
Merge pull request #2225 from logicbomb/improve-error-message
Include the field name in error messages when scanning structs
2025-01-18 10:41:13 -06:00
Jack Christensen
9cce05944a
Merge pull request #2216 from pconstantinou/master
Timestamp incorrectly adds 'Z' when serializing from JSON to indicate GMT, fixes bug #2215
2025-01-18 10:17:43 -06:00
Jason Turim
9c0ad690a9 Include the field name in error messages when scanning structs 2025-01-11 14:31:24 -05:00
Jack Christensen
03f08abda3 Fix in Unmarshal function rather than DecodeValue
This preserves backwards compatibility in the unlikely event someone is
using an alternative XML unmarshaler that does support unmarshalling
into *any.
2025-01-11 11:26:46 -06:00
Jack Christensen
2c1b1c389a
Merge pull request #2200 from zenkovev/flush_request_in_pipeline
add flush request in pipeline
2025-01-11 11:15:36 -06:00
Jack Christensen
329cb45913 XMLCodec: fix DecodeValue to return a []byte
Previously, DecodeValue would always return nil with the default
Unmarshal function.

fixes https://github.com/jackc/pgx/issues/2227
2025-01-11 10:55:48 -06:00
zenkovev
c96a55f8c0 private const for pipelineRequestType 2025-01-11 19:54:18 +03:00
Jack Christensen
e87760682f Update oldest supported Go version to 1.22 2025-01-11 07:49:50 -06:00
Jack Christensen
f681632c68 Drop PG 12 support and add PG 17 to CI 2025-01-11 07:49:26 -06:00
Phil Constantinou
3c640a44b6 Making the tests a little cleaner and clear 2025-01-06 09:24:55 -08:00
zenkovev
de3f868c1d pipeline queue for client requests 2025-01-06 13:54:48 +03:00
Phil Constantinou
5424d3c873 Return error and make sure they are unit tested 2025-01-05 19:45:45 -08:00
Phil Constantinou
42d3d00734 Parse as a UTC time 2025-01-05 19:19:17 -08:00
Phil Constantinou
cdc672cf3f Make JSON output confirm to ISO8601 timestamp without a timezone 2025-01-05 13:05:51 -08:00
Phil Constantinou
52e2858629 Added unit test and fixed typo 2025-01-02 13:36:33 -08:00
Phil Constantinou
e352784fed Add Z only if needed. 2025-01-02 12:50:29 -08:00
Jack Christensen
c2175fe46e
Merge pull request #2213 from moukoublen/fix_2204
Fix #2204
2024-12-30 20:35:41 -06:00
Jack Christensen
659823f8f3 Add link to github.com/amirsalarsafaei/sqlc-pgx-monitoring
fixes https://github.com/jackc/pgx/issues/2212
2024-12-30 20:27:10 -06:00
Jack Christensen
ca04098fab
Merge pull request #2136 from ninedraft/optimize-sanitize
Reduce SQL sanitizer allocations
2024-12-30 20:24:13 -06:00
Jack Christensen
4ff0a454e0
Merge pull request #2211 from EinoPlasma/master
Fixes for Method Comment and Typo in Test Function Name
2024-12-30 20:12:22 -06:00
Jack Christensen
00b86ca3db
Merge pull request #2208 from vamshiaruru/feat/expose_empty_acquire_wait_time_from_puddle
Expose puddle.Pool's EmptyAcquireWaitTime in pgxpool's Stats
2024-12-30 20:03:51 -06:00
Kostas Stamatakis
61a0227241
simplify test 2024-12-30 23:15:46 +02:00
Kostas Stamatakis
2190a8e0d1
cleanup and add test for json codec 2024-12-30 23:09:19 +02:00
Kostas Stamatakis
6e9fa42fef
fix #2204 2024-12-30 22:54:42 +02:00
EinoPlasma
6d9e6a726e Fix typo in test function name 2024-12-29 21:03:38 +08:00
EinoPlasma
02e387ea64 Fix method comment in PasswordMessage 2024-12-29 20:59:24 +08:00
merlin
e452f80b1d
TestErrNoRows: remove bad test case 2024-12-28 13:39:01 +02:00
merlin
da0315d1a4
optimisations of quote functions by @sean- 2024-12-28 13:31:09 +02:00
merlin
120c89fe0d
fix preallocations of quoted string 2024-12-28 13:31:09 +02:00
merlin
057937db27
add prefix to quoters tests 2024-12-28 13:31:09 +02:00
merlin
47cbd8edb8
drop too large values from memory pools 2024-12-28 13:31:09 +02:00
merlin
90a77b13b2
add docs to sanitize tests 2024-12-28 13:31:08 +02:00
merlin
59d6aa87b9
rework QuoteString and QuoteBytes as append-style 2024-12-28 13:31:08 +02:00
merlin
39ffc8b7a4
add lexer and query pools
use lexer pool
2024-12-28 13:31:08 +02:00
merlin
c4c1076d28
add FuzzQuoteString and FuzzQuoteBytes 2024-12-28 13:31:08 +02:00
merlin
4293b25262
decrease number of samples in go benchmark 2024-12-28 13:31:08 +02:00
merlin
ea1e13a660
quoteString 2024-12-28 13:31:08 +02:00
merlin
58d4c0c94f
quoteBytes
check new quoteBytes
2024-12-28 13:31:08 +02:00
merlin
1752f7b4c1
docs 2024-12-28 13:31:08 +02:00
merlin
ee718a110d
append AvailableBuffer 2024-12-28 13:31:08 +02:00
merlin
546ad2f4e2
shared bytestring 2024-12-28 13:31:08 +02:00
merlin
efc2c9ff44
buf pool 2024-12-28 13:31:08 +02:00
merlin
aabed18db8
add benchmark tool
fix benchmmark script

fix benchmark script
2024-12-28 13:31:08 +02:00
merlin
afa974fb05
base case
make benchmark more extensive

add quote to string

add BenchmarkSanitizeSQL
2024-12-28 13:31:08 +02:00
Vamshi Aruru
12b37f3218 Expose puddle.Pool's EmptyAcquireWaitTime in pgxpool's Stats
Addresses: https://github.com/jackc/pgx/issues/2205
2024-12-26 13:46:49 +05:30
Jack Christensen
bcf3fbd780
Merge pull request #2206 from alexandear/refactor-impossible-cond
Refactor Conn.LoadTypes by removing redundant check
2024-12-24 11:14:17 -06:00
Jack Christensen
f7c3d190ad
Merge pull request #2203 from martinyonatann/chore/check-array-and-remove-imposible-condition
check array just using `len` and remove `imposible condition`
2024-12-24 11:10:45 -06:00
Jack Christensen
473a241b96
Merge pull request #2202 from martinyonatann/chore/remove-unused-parameter
remove unused func and parameter
2024-12-24 09:32:07 -06:00
Oleksandr Redko
311f72afdc Refactor Conn.LoadTypes by removing redundant check 2024-12-24 12:58:15 +02:00
martinpasaribu
877111ceeb
check array just using len and remove imposible condition 2024-12-22 23:57:28 +07:00
martinpasaribu
dc3aea06b5
remove unused func and parameter 2024-12-22 23:48:08 +07:00
Jack Christensen
e5d321f920
Merge pull request #2197 from alexandear/fix-generated-hdr
Update comments in generated code to align with Go standards
2024-12-21 12:40:23 -06:00
Oleksandr Redko
17cd36818c Update comments in generated code to align with Go standards 2024-12-21 20:21:32 +02:00
Jack Christensen
24fbe353ed Create changelog for v5.7.2 2024-12-21 09:25:36 -06:00
Jack Christensen
3a1593b25b
Merge pull request #2198 from alexandear/fix-nilness
Handle errors  in generate_certs
2024-12-21 08:27:55 -06:00
Jack Christensen
9d851d7c98 Fix integration benchmarks 2024-12-21 08:22:12 -06:00
Jack Christensen
dacffdc7e2
Merge pull request #2196 from alexandear/docs-improve-links
Improve links in README
2024-12-21 08:13:57 -06:00
Jack Christensen
bc7c840770
Merge pull request #2195 from LucasHild/master
Add CommitQuery to transaction options
2024-12-21 08:12:58 -06:00
Oleksandr Redko
043685147f Handle errors in generate_certs 2024-12-18 02:31:56 +02:00
Oleksandr Redko
25329273da Improve links in README 2024-12-18 02:02:06 +02:00
Jack Christensen
ad87d47089
Merge pull request #2194 from alexandear/refactor/pgconn-tests
Simplify pgconn tests by using T.TempDir
2024-12-17 17:45:16 -06:00
Oleksandr Redko
7cf7bc6054 Simplify pgconn tests by using T.TempDir 2024-12-17 16:09:32 +02:00
zenkovev
76593f37f7 add flush request in pipeline 2024-12-17 11:49:13 +03:00
Jack Christensen
3e6c719698
Merge pull request #2189 from pankona/update-crypto
Update golang.org/x/crypto v0.27.0 => v0.31.0 to fix vulnerability
2024-12-13 07:54:25 -06:00
Yosuke Akatsuka
5ee33320c6 update golang.org/x/crypto v0.27.0 => v0.31.0 2024-12-12 12:58:14 +00:00
Jack Christensen
ac0b46f2f9 Warn not to create table and use it in the same batch
fixes https://github.com/jackc/pgx/issues/2182
2024-12-05 16:30:48 -06:00
Jack Christensen
e3c81cc153
Merge pull request #2169 from thedolphin/master
Switch from ExecParams to Exec in ValidateConnectTargetSessionAttrs
2024-11-28 18:09:29 -06:00
Alexander Rumyantsev
4b7e9942b2 Switch from ExecParams to Exec in ValidateConnectTargetSessionAttrs functions 2024-11-19 19:42:49 +03:00
Jack Christensen
b9e2b20fb1
Merge pull request #2162 from evellior/patch-1
Update pgxpool.ParseConfig documentation
2024-11-05 18:14:46 -06:00
Peyton Foley
06a0abb75e
Update pgxpool.ParseConfig documentation
Added default values and example of valid duration string to inline documentation.
2024-11-05 03:09:43 +08:00
Jack Christensen
c76a650f75 Improve documentation for QueryExecModes
https://github.com/jackc/pgx/issues/2157
2024-10-29 19:36:33 -05:00
Jack Christensen
f57b2854f8
Merge pull request #2151 from ludusrusso/fix-2146
handling double pointer on sql.Scanner interface when scanning rows
2024-10-22 18:52:46 -05:00
Ludovico Russo
5c9b565116 fix: #2146
[![Open Source Saturday](https://img.shields.io/badge/%E2%9D%A4%EF%B8%8F-open%20source%20saturday-F64060.svg)](https://www.meetup.com/it-IT/Open-Source-Saturday-Milano/)

Co-authored-by: Alessio Izzo <alessio.izzo86@gmail.com>
2024-10-19 15:43:56 +02:00
Jack Christensen
2ec900454b
Merge pull request #2145 from grachevko/string
Implement pgtype.UUID.String()
2024-10-09 08:46:03 -05:00
Konstantin Grachev
8723855d95
Implement pgtype.UUID.String() 2024-10-09 14:22:10 +03:00
Jack Christensen
3f84e891de
Merge pull request #2142 from jackc/add-xid8
Add xid8 type
2024-10-08 19:04:07 -05:00
Jack Christensen
cc05954369
Merge pull request #2138 from zenkovev/message_body_size_limit
add message body size limits in frontend and backend
2024-10-05 12:35:36 -05:00
Jack Christensen
123b59a57e Ensure planning encodes and scans cannot infinitely recurse
https://github.com/jackc/pgx/issues/2141
2024-10-05 12:20:50 -05:00
zene
10e11952bd changed style of two comments 2024-10-05 19:54:02 +03:00
Jack Christensen
32a6b1b200 Skip xid8 test on PG < 13 and CRDB 2024-10-05 10:44:13 -05:00
Jack Christensen
f0783c6fbe Add xid8 type
https://github.com/jackc/pgx/discussions/2137
2024-10-05 10:16:42 -05:00
zene
0290507ff2 remove global atomics 2024-10-04 09:26:37 +03:00
zene
8f8470edaf add message body size limits in frontend and backend 2024-09-27 15:17:47 +03:00
Jack Christensen
a95cfbb433
Merge pull request #2129 from s-montigny-desautels/fix/timestamp-json-unmarshal
Fix pgtype.Timestamp json unmarshal
2024-09-24 17:47:28 -05:00
Shean de Montigny-Desautels
7803ec3661
Fix pgtype.Timestamp json unmarshal
Add the missing 'Z' at the end of the timestamp string, so it can be
parsed as timestamp in the RFC3339 format.
2024-09-23 18:12:32 -04:00
Lucas Hild
64ca07e31b Add commit query to tx options 2024-09-23 16:46:58 +02:00
Jack Christensen
fd0c65478e Fix prepared statement already exists on batch prepare failure
When a batch successfully prepared some statements, but then failed to
prepare others, the prepared statements that were successfully prepared
were not properly cleaned up. This could lead to a "prepared statement
already exists" error on subsequent attempts to prepare the same
statement.

https://github.com/jackc/pgx/issues/1847#issuecomment-2347858887
2024-09-13 08:03:37 -05:00
Jack Christensen
672c4a3a24 Release v5.7.1 2024-09-10 07:25:07 -05:00
Jack Christensen
f8a5a5c9e3 Update golang.org/x/crypto and golang.org/x/text 2024-09-10 07:17:03 -05:00
Jack Christensen
ab36c2c0dd Upgrade puddle to v2.2.2
This removes the import of nanotime via linkname.
2024-09-10 07:11:44 -05:00
Jack Christensen
ce66b1dae4 Fix data race with TraceLog.Config initialization
https://github.com/jackc/pgx/pull/2120
2024-09-10 07:06:39 -05:00
Jack Christensen
d1205a6dbc Release v5.7.0 2024-09-07 10:23:34 -05:00
Jack Christensen
97d20ccfad
Merge pull request #2115 from ninedraft/sql-err-no-rows
Use sql.ErrNoRows as value for pgx.ErrNoRows
2024-08-26 07:40:46 -05:00
Jack Christensen
e9bd382c51
Merge pull request #2114 from jennifersp/master
add byte length check to uint32
2024-08-26 07:28:47 -05:00
Jack Christensen
603f2337d6
Merge pull request #2113 from mateuszkowalke/master
Add comment for pgtype.Interval struct
2024-08-26 07:28:29 -05:00
merlin
035bbbe0cb
Use sql.ErrNoRows as value for pgx.ErrNoRows 2024-08-26 14:01:37 +03:00
jennifersp
73bbced270 add byte length check to uint32 2024-08-23 16:17:07 -07:00
mateuszkowalke
4171f554d4 Add additional info for nullable pgtype types
Additional information warns about using nullable types being
used as parameters to query with Valid set to false.
2024-08-23 15:15:40 +02:00
Jack Christensen
b197994b1f
Merge pull request #2112 from jennifersp/master
support text scanner for binary format for uint32
2024-08-23 07:05:15 -05:00
jennifersp
57fd684068 update struct name 2024-08-22 16:51:42 -07:00
jennifersp
926913ad66 rm bound check 2024-08-21 15:12:36 -07:00
jennifersp
b9f77cb1b3 fix typo 2024-08-21 12:27:23 -07:00
jennifersp
218c15a4eb support text scanner for binary format for uint32 2024-08-21 12:04:54 -07:00
Jack Christensen
4f7e19d67d
Merge pull request #2108 from jackc/ci-tests-go-1.23
CI tests Go 1.23
2024-08-15 18:41:48 -05:00
Jack Christensen
0cbc5db39d CI tests Go 1.23 2024-08-15 18:33:43 -05:00
Jack Christensen
5747f37d9c Fix: Scan and encode types with underlying types of arrays
Rather than special case the reported issue with UUID and [16]byte, this
commit allows the system to find the underlying type of any type that is
an array.

fixes https://github.com/jackc/pgx/issues/2107
2024-08-15 18:20:07 -05:00
Jack Christensen
d6fc8b02b4
Merge pull request #2098 from stringintech/tracelog-time-key
add TraceLogConfig with customizable TimeKey and ensureConfig method for default initialization
2024-08-13 18:39:21 -05:00
Jack Christensen
c457de62c9 Fix doc discrepancies between Tx interface and pgxpool implementation
The error is not wrapped at the moment, but document that it may be.

fixes https://github.com/jackc/pgx/issues/2104
2024-08-07 08:03:41 -05:00
stringintech
216049c62b add TraceLogConfig with customizable TimeKey and ensureConfig method for default initialization 2024-07-26 22:58:14 +03:30
Jack Christensen
a68e14fe5a Explicitly disclaim support for time with time zone 2024-07-23 17:27:05 -05:00
Jack Christensen
ea9610f672
Merge pull request #2084 from EpicStep/multiple-tracing
Implement 'MultiTracer'
2024-07-15 08:22:27 -05:00
Stepan Rabotkin
7af618e423
feat: add pool tracing to 'MultiTracer' & move it to another package 2024-07-13 17:04:04 +03:00
Stepan Rabotkin
3f270eec7d
feat: add 'MultiTracer' to go doc & cover it 2024-07-13 02:02:22 +03:00
Stepan Rabotkin
8e46d2117c
refac: export 'MultiTracer' fields 2024-07-13 01:40:46 +03:00
Jack Christensen
9530aea47b
Merge pull request #2083 from sodahealth/xml-codec
V1 XMLCodec supports encoding + scanning XML column type
2024-07-12 16:49:08 -05:00
nickcruess-soda
a8aaa37363 fix(test): skip CockroachDB since it doesn't support XML 2024-07-12 09:56:59 -05:00
Jack Christensen
67aa0e5a65
Merge pull request #2085 from nolandseigler/rows-snake-case
RowToStructByName Snake Case Collision
2024-07-12 09:00:27 -05:00
Jack Christensen
96791c88cd
Merge pull request #2082 from heavycrystal/url-parse-err-fix
don't print URL when url.Parse returns an error
2024-07-12 08:52:09 -05:00
nolandseigler
71a8e53574
use normalized equality or strict equality check in rows.go fieldPosByName 2024-07-12 08:50:54 -04:00
Kevin Biju
13e212430d
address review feedback 2024-07-12 18:11:09 +05:30
nolandseigler
b25d092d20
formatting 2024-07-11 23:30:28 -04:00
nolandseigler
7fceb64dee
in rows.go 'fieldPosByName' use boolean to replace '_' and only execution replacements when there are no db tags present 2024-07-11 23:28:21 -04:00
nolandseigler
7a35585143
example test case that demonstrates snake case collision in db tags caused by rows.go 'fieldPosByName' 2024-07-11 22:39:29 -04:00
Stepan Rabotkin
a787630988
feat: implement 'MultiTracer' 2024-07-11 23:41:17 +03:00
nickcruess-soda
37681a4f48 chore: remove unused JSONCodec code, correct typo 2024-07-11 15:18:20 -05:00
nickcruess-soda
c7b9dc0e00 feat: add pgtype.XMLCodec based on pgtype.JSONCodec 2024-07-11 15:17:55 -05:00
Kevin Biju
f007d84675
don't print url when url.Parse returns an error 2024-07-10 22:46:32 +05:30
Jack Christensen
3563a2b048
Merge pull request #2077 from nicois/nicois/pgtype-other-schema-fix
Fix bug relating to reuse of types
2024-07-08 17:29:44 -05:00
Nick Farrell
b770252a3b
Fix bug relating to reuse of types
When `LoadTypes` is being called, it does not include the
namespace-qualified types in its result. While these namespaces are
visible to `LoadTypes` itself, `RegisterTypes` will not recognise this
form of the types, only allowing them to be used if they are on the
schema path, and referred to without their namespace component.
2024-07-07 11:26:19 +10:00
Jack Christensen
c64fa0f0f2 Document that batched queries should not contain multiple statements 2024-07-03 22:49:02 -05:00
Jack Christensen
dced53f796 Better error message when reading past end of batch
https://github.com/jackc/pgx/issues/1801
2024-07-03 22:39:29 -05:00
Jack Christensen
161ce73ec1
Merge pull request #2046 from nicois/nicois/load-types
Faster/easier loading of types
2024-07-01 06:52:10 -05:00
Jack Christensen
fa57a20518 Update go.mod to require go 1.21
pgx now uses slices package.
2024-07-01 06:48:51 -05:00
Jack Christensen
dd71547340
Merge pull request #2066 from yuki0920/use-slices-contains
Fix: use `slices.Contains` according to the TODO comment
2024-07-01 06:47:06 -05:00
Nick Farrell
47977703e1
Load types using a single SQL query
When loading even a single type into pgx's type map, multiple SQL
queries are performed in series. Over a slow link, this is not ideal.
Worse, if multiple types are being registered, this is repeated multiple
times.

This commit add LoadTypes, which can retrieve type
mapping information for multiple types in a single SQL call, including
recursive fetching of dependent types.
RegisterTypes performs the second stage of this operation.
2024-07-01 15:34:17 +10:00
yuki0920
a764746906
Fix: use slices.Contains according to the TODO comment
I used `slices.Contains` according to the TODO comment.

```
// TODO replace by slices.Contains when experimental package will be merged to stdlib
// https://pkg.go.dev/golang.org/x/exp/slices#Contains
```
2024-06-30 07:58:56 +09:00
Jack Christensen
6b9ff972a4
Merge pull request #2061 from yann-soubeyrand/support-sslrootcert-system
Add support for sslrootcert=system
2024-06-29 06:32:18 -05:00
Yann Soubeyrand
c407c42692 Add support for sslrootcert=system 2024-06-25 11:15:40 +02:00
Jack Christensen
9907b874c2 Update pgservicefile
Fixes panic when parsing invalid file.
2024-06-06 07:12:26 -05:00
Jack Christensen
ec557e87d5
Merge pull request #2035 from exekias/fix-interval
Fix interval encoding to allow 0s and avoid extra spaces
2024-05-30 20:13:33 -05:00
Carlos Pérez-Aradros Herce
9f4a264f89 Fix interval encoding to allow 0s and avoid extra spaces
Fix a bugs introduced by 01d649b, also add some tests
2024-05-30 09:48:53 +02:00
Jack Christensen
572d7fff32 Release v5.6.0 2024-05-25 11:35:25 -05:00
Jack Christensen
b4911f1da7
Merge pull request #2019 from jackc/fix-encode-driver-valuer-on-pointer
Fix encode driver.Valuer on pointer
2024-05-25 11:20:25 -05:00
Jack Christensen
29751194ef Test composites on CockroachDB 2024-05-25 07:49:00 -05:00
Jack Christensen
c1f4cbb5cd Upgrade CockroachDB on CI 2024-05-25 07:48:47 -05:00
Hans-Joachim Kliemeck
24c0a5e8ff remove keepalive and rely on GOLANG default (since go 1.13 default is 15s)
https://www.reddit.com/r/golang/comments/d7v7dn/psa_go_113_introduces_15_sec_server_tcp/
2024-05-21 10:37:13 -05:00
Jack Christensen
9ca9203afb Move typed nil handling to Map.Encode from anynil
The new logic checks for any type of nil at the beginning of Encode and
then either treats it as NULL or calls the driver.Valuer method if
appropriate.

This should preserve the existing nil normalization while restoring the
ability to encode nil driver.Valuer values.
2024-05-18 22:39:28 -05:00
Jack Christensen
79cab4640f Only use anynil inside of pgtype 2024-05-18 21:06:23 -05:00
Jack Christensen
6ea2d248a3 Remove anynil.NormalizeSlice
anynil.Is was already being called in all paths that
anynil.NormalizeSlice was used.
2024-05-18 21:01:34 -05:00
Jack Christensen
c1075bfff0 Remove some special casing for QueryExecModeExec 2024-05-18 20:59:01 -05:00
Jack Christensen
cf6074fe5c Remove unused anynil.Normalize 2024-05-18 20:37:25 -05:00
Jack Christensen
13beb380f5 Fix encode driver.Valuer on nil-able non-pointers
https://github.com/jackc/pgx/issues/1566
https://github.com/jackc/pgx/issues/1860
https://github.com/jackc/pgx/pull/2019#discussion_r1605806751
2024-05-18 17:17:46 -05:00
Jack Christensen
fec45c802b Refactor appendParamsForQueryExecModeExec
Extract logic for finding OID and converting argument to encodable
value. This is in preparation for a future change for better supporting
nil driver.Valuer values.
2024-05-18 17:00:41 -05:00
Jack Christensen
3b7fa4ce87 Use go 1.20 in go.mod
Future commit will be using bytes.Clone which was implemented in Go
1.20.

Also update README.md to reflect that minimum supported Go version is
1.21. But only requiring Go 1.20 in go.mod to avoid needlessly breaking
old Go when it still works.
2024-05-18 16:47:44 -05:00
Mitar
732889728f Add support for custom JSON marshal and unmarshal.
The Codec interface is now implemented by *pgtype.JSONCodec
and *pgtype.JSONBCodec instead of pgtype.JSONCodec and
pgtype.JSONBCodec, respectively. This is technically a breaking
change, but it is extremely unlikely that anyone is depending on this,
and if there is downstream breakage it is trivial to fix.

Fixes #2005.
2024-05-18 08:02:09 -05:00
Mitar
e1b90cf620 Add ltree extension requirement. 2024-05-18 07:56:47 -05:00
Jack Christensen
2a36a7032e Fix encode driver.Valuer on pointer
pgx v5 introduced nil normalization for typed nils. This means that
[]byte(nil) is normalized to nil at the edge of the encoding system.
This simplified encoding logic as nil could be encoded as NULL and type
specific handling was unneeded.

However, database/sql compatibility requires Value to be called on a
nil pointer that implements driver.Valuer. This was broken by
normalizing to nil.

This commit changes the normalization logic to not normalize pointers
that directly implement driver.Valuer to nil. It still normalizes
pointers that implement driver.Valuer through implicit derefence.

e.g.

type T struct{}

func (t *T) Value() (driver.Value, error) {
  return nil, nil
}

type S struct{}

func (s S) Value() (driver.Value, error) {
  return nil, nil
}

(*T)(nil) will not be normalized to nil but (*S)(nil) will be.

https://github.com/jackc/pgx/issues/1566
2024-05-18 07:41:10 -05:00
Jack Christensen
ded01c0cd9 Fix TestParseConfigEnvLibpq unsetting envars
This would cause tests to fail if PG* variables were used for the
default connection config for other tests.

Previously broken by 0080acf318d162a1128928bc32eadf45cef61fd2.
2024-05-17 09:19:36 -05:00
ngavinsir
532bf8f583 adjust test 2024-05-14 20:28:02 -05:00
ngavinsir
169067a364 remove ctx from release tracer 2024-05-14 20:28:02 -05:00
ngavinsir
659525c961 trace release 2024-05-14 20:28:02 -05:00
ngavinsir
4dd1810d8b persist ctx in pgxpool conn 2024-05-14 20:28:02 -05:00
ngavinsir
25914e21f3 add release tracer 2024-05-14 20:28:02 -05:00
ngavinsir
19fcb54564 add pool to trace acquire 2024-05-14 20:28:02 -05:00
ngavinsir
a39632db43 feat: pgx pool acquire tracer 2024-05-14 20:28:02 -05:00
Oleksandr Redko
c05cce7d41 Fix test asserts: reverse expected-actual 2024-05-14 20:07:10 -05:00
Oleksandr Redko
0080acf318 Simplify config tests by using T.Setenv, T.TempDir 2024-05-14 20:06:18 -05:00
Mitar
c81bba8690 Use pgtype.PreallocBytes in LargeObject's Read.
Fixes #1876.
2024-05-14 07:03:24 -05:00
Pavlo Golub
523411a3fb make QueuedQuery.Fn property public, closes #1878
This commit fixes the overlook of the #1886 where SQL and Arguments
properties were exposed
2024-05-12 09:03:47 -05:00
Jack Christensen
a966716860 Replace DSN with keyword/value in comments and documentation
The term DSN is not used in the PostgreSQL documentation. I'm not sure
why it was originally used. Use the correct PostgreSQL terminology.
2024-05-11 14:33:35 -05:00
Jack Christensen
cf50c60869 Fix error check on CI 2024-05-11 14:33:13 -05:00
Jack Christensen
8db971660e Failed connection attempts include all errors
A single Connect("connstring") may actually make multiple connection
requests due to TLS or HA configuration. Previously, when all attempts
failed only the last error was returned. This could be confusing.
Now details of all failed attempts are included.

For example, the following connection string:

host=localhost,127.0.0.1,foo.invalid port=1,2,3

Will now return an error like the following:

failed to connect to `user=postgres database=pgx_test`:
	lookup foo.invalid: no such host
	[::1]:1 (localhost): dial error: dial tcp [::1]:1: connect: connection refused
	127.0.0.1:1 (localhost): dial error: dial tcp 127.0.0.1:1: connect: connection refused
	127.0.0.1:2 (127.0.0.1): dial error: dial tcp 127.0.0.1:2: connect: connection refused

https://github.com/jackc/pgx/issues/1929
2024-05-11 14:25:03 -05:00
Jack Christensen
48cdd7bab0 Allow scanning time without time zone into string
https://github.com/jackc/pgx/issues/2002
2024-05-10 10:52:41 -05:00
Jack Christensen
579a320c1c pgconn.SafeToRetry checks for wrapped errors
Use errors.As instead of type assertion.

Port 4e2e7a040579c1999c0766642d836eb28c6e2018 to v5

Credit to tjasko
2024-05-09 17:59:16 -05:00
Carlos Pérez-Aradros Herce
01d649b2bf Do not encode interval microseconds when they are 0
This make the encode match what postgres does
2024-05-09 17:29:13 -05:00
Jack Christensen
48ae1f4b2c Fix ResultReader.Read() to handle nil values
The ResultReader.Read() method was erroneously converting nil values
to []byte{}.

https://github.com/jackc/pgx/issues/1987
2024-05-09 17:13:26 -05:00
WGH
e4f72071f8 Document that generic helpers call rows.Close()
Existing generic helpers always call defer rows.Close().
Examples of their usage also omit external defer rows.Close() call.

For clarity, state that explicitly, because that's another point
why one would want to switch to generic helpers from manually written
rows.Next() loop.
2024-05-09 15:54:48 -05:00
Jack Christensen
6f0deff015 Add custom data to pgconn.PgConn
https://github.com/jackc/pgx/issues/1896
2024-05-09 15:39:28 -05:00
Jack Christensen
8649231bb3 Add ScanLocation to pgtype.TimestampCodec
If ScanLocation is set, the timestamps will be assumed to be in the
given location when scanning from the database.

The Codec interface is now implemented by *pgtype.TimestampCodec instead
of pgtype.TimestampCodec. This is technically a breaking change, but it
is extremely unlikely that anyone is depending on this, and if there is
downstream breakage it is trivial to fix.

https://github.com/jackc/pgx/issues/1195
https://github.com/jackc/pgx/issues/1945
2024-05-08 08:35:05 -05:00
Jack Christensen
33360ab479 Add ScanLocation to pgtype.TimestamptzCodec
If ScanLocation is set, it will be used to convert the time to the given
location when scanning from the database.

The Codec interface is now implemented by *pgtype.TimestamptzCodec
instead of pgtype.TimestamptzCodec. This is technically a breaking
change, but it is extremely unlikely that anyone is depending on this,
and if there is downstream breakage it is trivial to fix.

https://github.com/jackc/pgx/issues/1195
https://github.com/jackc/pgx/issues/1945
2024-05-08 08:35:05 -05:00
Jack Christensen
c31619d08b Improve docs for customizing context cancellation 2024-05-08 08:08:21 -05:00
Jack Christensen
ec9bb2ace7 Improve flickering test on CI 2024-05-08 07:54:17 -05:00
Jack Christensen
93a579754b Add CancelRequestContextWatcherHandler
This allows a context to cancel a query by sending a cancel request to
the server before falling back to setting a deadline.
2024-05-08 07:41:02 -05:00
Jack Christensen
42c9e9070a Allow customizing context canceled behavior for pgconn
This feature made the ctxwatch package public.
2024-05-08 07:41:02 -05:00
Oleksandr Redko
60a01d044a Fix typos in doc comments 2024-04-17 12:00:02 -05:00
Zach Olstein
8f69e45a53 fixup! Cache reflection analysis in RowToStructBy... 2024-04-16 13:08:16 -05:00
Zach Olstein
ec98406207 Cache reflection analysis in RowToStructBy...
Modify the RowToStructByPos/Name functions to store the computed mapping
of columns to struct field locations in a cache to reuse between calls.
Because this computation can be expensive and the same few results will
frequently be reused, caching these results provides a significant
speedup.

For positional mappings, we can key the cache by just the struct-type.
However, for named mappings, the key must include a representation of
the columns, in order, since different columns produce different
mappings.
2024-04-16 13:08:16 -05:00
Jack Christensen
8db0f280fb Add benchmarks for RowToStructBy(Pos|Name) 2024-04-16 12:59:40 -05:00
Felix Röhrich
fc416d237a make parsing stricter and add corresponding test 2024-04-16 12:18:06 -05:00
Jack Christensen
a3d9120636 Add SeverityUnlocalized field to PgError / Notice
https://github.com/jackc/pgx/issues/1971
2024-04-07 08:58:10 -05:00
Carlos Pérez-Aradros Herce
78b22c3d2f fix tests 2024-03-20 18:21:11 -05:00
Carlos Pérez-Aradros Herce
221ad1b84c Add support for macaddr8 type
Postgres also has a `macaddr8` type, this PR adds support for it, using
the same codec as `macaddr`
2024-03-20 18:21:11 -05:00
Tomas Zahradnicek
b6e5548341 StrictNamedArgs 2024-03-16 10:59:31 -05:00
Jack Christensen
1b6227af11 Remove verbose flag from go test command on CI
It is more often that interesting information is buried by the verbose
output than the verbose output is useful. It can be reenabled later if
necessary.
2024-03-16 09:52:50 -05:00
Jack Christensen
c1fce377ee Test Go 1.22 and drop Go 1.20 from testing on CI 2024-03-16 09:44:23 -05:00
Jack Christensen
7fd6f2a4f5 Disable parallel testing on Github Actions CI
Tests were failing with:
Error: Process completed with exit code 143.

This appears to mean that Github Actions killed the runner.

See https://github.com/jackc/pgx/actions/runs/8216337993/job/22470808811
for an example.

It appears Github Actions kills runners based on resource usage. Running
tests one at a time reduces the resource usage and avoids the problem.

Or at least that's what I presume is happening. It sure is fun debugging
issues on cloud systems where you have limited visibility... :(

fixes https://github.com/jackc/pgx/issues/1934
2024-03-16 09:41:51 -05:00
Jack Christensen
78a0a2bf41 Fix spelling in changelog 2024-03-09 12:16:20 -06:00
Jack Christensen
a17f064492 Update changelog 2024-03-09 12:12:41 -06:00
Jack Christensen
49b6aad319 Use spaces instead of parentheses for SQL sanitization
This still solves the problem of negative numbers creating a line
comment, but this avoids breaking edge cases such as `set foo to $1`
where the substition is taking place in a location where an arbitrary
expression is not allowed.

https://github.com/jackc/pgx/issues/1928
2024-03-09 12:09:42 -06:00
Felix
0cc4c14e62 Add test to validate CollectRows for empty Rows
https://github.com/jackc/pgx/issues/1924
https://github.com/jackc/pgx/issues/1925
2024-03-06 22:05:32 -06:00
Jack Christensen
da6f2c98f2 Update changelog 2024-03-04 09:12:06 -06:00
Jack Christensen
c543134753 SQL sanitizer wraps arguments in parentheses
pgx v5 was not vulnerable to CVE-2024-27289 do to how the sanitizer was
being called. But the sanitizer itself still had the underlying issue.
This commit ports the fix from pgx v4 to v5 to ensure that the issue
does not emerge if pgx uses the sanitizer differently in the future.
2024-03-04 09:09:42 -06:00
Jack Christensen
20344dfae8 Check for overflow on uint16 sizes in pgproto3 2024-03-04 09:09:29 -06:00
Jack Christensen
adbb38f298 Do not allow protocol messages larger than ~1GB
The PostgreSQL server will reject messages greater than ~1 GB anyway.
However, worse than that is that a message that is larger than 4 GB
could wrap the 32-bit integer message size and be interpreted by the
server as multiple messages. This could allow a malicious client to
inject arbitrary protocol messages.

https://github.com/jackc/pgx/security/advisories/GHSA-mrww-27vc-gghv
2024-03-04 09:09:29 -06:00
Felix
c1b0a01ca7 Fix behavior of CollectRows to return empty slice if Rows are empty
https://github.com/jackc/pgx/issues/1924
2024-03-03 07:52:18 -06:00
Jack Christensen
88dfc22ae4 Fix simple protocol encoding of json.RawMessage
The underlying type of json.RawMessage is a []byte so to avoid it being
considered binary data we need to handle it specifically. This is done
by registerDefaultPgTypeVariants. In addition, handle json.RawMessage in
the JSONCodec PlanEncode to avoid it being mutated by json.Marshal.

https://github.com/jackc/pgx/issues/1763
2024-03-02 15:12:20 -06:00
Jack Christensen
2e84dccaf5 *Pipeline.getResults should close pipeline on error
Otherwise, it might be possible to panic when closing the pipeline if it
tries to read a connection that should be closed but still has a fatal
error on the wire.

https://github.com/jackc/pgx/issues/1920
2024-02-29 18:44:01 -06:00
David Kurman
d149d3fe5c Fix panic in TryFindUnderlyingTypeScanPlan
Check if CanConvert before calling reflect.Value.Convert
2024-02-26 17:51:56 -06:00
Jack Christensen
046f497efb deallocateInvalidatedCachedStatements now runs in transactions
https://github.com/jackc/pgx/issues/1847
2024-02-24 10:16:18 -06:00
Jack Christensen
8896bd6977 Handle invalid sslkey file
https://github.com/jackc/pgx/issues/1915
2024-02-24 09:24:26 -06:00
Jack Christensen
85f15c4b3c Fix scan float4 into sql.Scanner
https://github.com/jackc/pgx/issues/1911
2024-02-23 18:18:03 -06:00
Jack Christensen
654dcab93e Fix: pgtype.Bits makes copy of data from read buffer
It was taking a reference. This would cause the data to be corrupted by
future reads.

fixes #1909
2024-02-23 17:40:11 -06:00
Tom Payne
5c63f646f8 Add link to github.com/twpayne/pgx-geos 2024-02-04 22:04:03 -06:00
Jack Christensen
6f8f6ede6c Update changelog for v5.5.3 2024-02-03 12:52:29 -06:00
Jack Christensen
576b6c88f6 Bump actions/setup-go version
This gets rid of some deprecation warnings on Github Actions.
2024-02-03 12:50:20 -06:00
Jack Christensen
7caa448ac8 Skip test on CockroachDB 2024-02-03 12:41:59 -06:00
Jack Christensen
832b4f9771 Fix: prepared statement already exists
When a conn is going to execute a query, the first thing it does is to
deallocate any invalidated prepared statements from the statement cache.
However, the statements were removed from the cache regardless of
whether the deallocation succeeded. This would cause subsequent calls of
the same SQL to fail with "prepared statement already exists" error.

This problem is easy to trigger by running a query with a context that
is already canceled.

This commit changes the deallocate invalidated cached statements logic
so that the statements are only removed from the cache if the
deallocation was successful on the server.

https://github.com/jackc/pgx/issues/1847
2024-02-03 12:33:17 -06:00
Jack Christensen
fd4411453f Improve Conn.LoadType documentation 2024-02-03 10:29:10 -06:00
Jack Christensen
34da2fed95 Improve CopyFrom auto-conversion of text-ish values
CopyFrom requires that all values are encoded in the binary format. It
already tried to parse strings to values that can then be encoded into
the binary format. But it didn't handle types that can be encoded as
text and then parsed and converted to binary. It now does.
2024-02-03 09:49:56 -06:00
Jack Christensen
7b5fcac465 Add timetz and []timetz OID constants
https://github.com/jackc/pgx/issues/1883
2024-01-27 18:55:59 -06:00
Jack Christensen
0819a17da8 Remove openssl from TLS test setup
TLS setup and tests were rather finicky. It seems that openssl 3
encrypts certificates differently than older openssl and it does it in
a way Go and/or pgx ssl handling code can't handle. It appears that
this related to the use of a deprecated client certificate encryption
system.

This caused CI to be stuck on Ubuntu 20.04 and recently caused the
contributing guide to fail to work on MacOS.

Remove openssl from the test setup and replace it with a Go program
that generates the certificates.
2024-01-27 09:04:19 -06:00
Florent Viel
bf1c1d7848 create ltree extension in pg setup for tests 2024-01-26 09:06:13 -06:00
Florent Viel
0fa533386c add ltree pgtype support 2024-01-26 09:06:13 -06:00
Pavlo Golub
c90f82a4e3 make properties of QueuedQuery and Batch public, closes #1878 2024-01-25 18:03:59 -06:00
Edoardo Spadolini
a57bb8caea Add AppendRows helper 2024-01-23 17:14:24 -06:00
Kirill Malikov
517c654e2c feat: fast encodeUUID 2024-01-20 20:50:01 -06:00
Mitar
a4ca0917da Support large large objects.
Fixes #1865.
2024-01-15 08:50:55 -06:00
Mitar
0c35c9e630 Revert "Document max read and write sizes for large objects"
This reverts commit b99e2bb7e0818428092e955cb0ee9cff45504bfd.
2024-01-15 08:50:55 -06:00
Jack Christensen
b7de418d46 Release v5.5.2 2024-01-13 11:08:35 -06:00
Jack Christensen
b99e2bb7e0 Document max read and write sizes for large objects
https://github.com/jackc/pgx/issues/1865
2024-01-13 10:43:35 -06:00
Jack Christensen
52f2151422 Allow NamedArgs to start with underscore
fixes #1869
2024-01-13 10:20:25 -06:00
Endre Kovács
dfb6489612 fix typo in doc.go 2024-01-13 09:40:00 -06:00
Chris Frank
9346d48035 fix OpenDBFromPool example 2024-01-13 09:39:16 -06:00
jeremy.spriet
1fdd17041a feat(pgproto3): expose MaxExpectedBodyLen and ActualBodyLen in ExceededMaxBodyLenErr struct 2024-01-12 18:21:07 -06:00
Jack Christensen
f654d61d79 Make note about possible parse config error message redaction change 2024-01-12 17:56:13 -06:00
Jack Christensen
5d26bbefd8 Make pgconn.ConnectError and pgconn.ParseConfigError public
fixes #1773
2024-01-12 17:52:25 -06:00
vahid-sohrabloo
44768b5a01 fix a typo in config_test.go
fix a typo in config_test.go
2024-01-12 17:36:43 -06:00
Jack Christensen
6f2ce92356 Upgrade golang.org/x/crypto to v0.17.0
pgx is unaffected by CVE-2023-48795 because it does not use SSH.
However, dependabot and other vulnerability scanners may complain so
bump the dependency anyway.
2023-12-29 18:14:09 -06:00
Tikhon Fedulov
4367ee0598 Update TestRowToStructByName with snake case support 2023-12-25 09:47:10 -06:00
Tikhon Fedulov
d2c9ebc2ef Use local variables in fieldPosByName and fix errors 2023-12-25 09:47:10 -06:00
Tikhon Fedulov
0c7acf9481 Add snake_case support to RowToStructByName 2023-12-25 09:47:10 -06:00
Jack Christensen
cbc5a7055f Fix: close conn on read failure in pipeline
Suggested by @jameshartig in https://github.com/jackc/pgx/issues/1847
2023-12-23 12:11:23 -06:00
James Hartig
4c14caae07 update description cache after exec prepare 2023-12-23 12:08:02 -06:00
James Hartig
22fe50149b pgconn: check if pipeline i closed in Sync/GetResults
Otherwise there will be a nil pointer exception accessing the conn
2023-12-23 12:04:21 -06:00
Ryan Fowler
dfd198003a Fix panic in Pipeline when PgConn is busy or closed 2023-12-23 10:30:59 -06:00
jeremy.spriet
603c8c1e90 feat(pgproto3/backend): add a SetMaxBodyLen to limit the max body length for the receive 2023-12-23 10:25:35 -06:00
Samuel Stauffer
9ab9e3c40b Unwrap errors in normalizeTimeoutError 2023-12-16 11:15:35 -06:00
Samuel Stauffer
2daeb8dc5f pgconn: normalize starTLS connection error
Normalize the error that is returned by startTLS in pgconn.connect. This
makes it possible to determine if the error was a context error.
2023-12-16 11:15:35 -06:00
Jack Christensen
df3c5f4df8 Use "Pg" instead of "PG" in new PgError related identifiers
Arguably, PGError might have been better. But since the precedent is
long since established it is better to be consistent.
2023-12-15 18:33:51 -06:00
James Hartig
b1631e8e35 pgconn: add OnPGError to Config for error handling
OnPGError is called on every error response received from Postgres and can
be used to close connections on specific errors. Defaults to closing on
FATAL-severity errors.

Fixes #1803
2023-12-15 18:29:32 -06:00
Jack Christensen
ba05097642 Release v5.5.1 2023-12-09 12:59:44 -06:00
Evan Jones
384fe7775c Batch.Queue: document always uses the conn's DefaultQueryExecMode
The only way to change the query mode used by Batch.Queue and
SendBatch is to use a connection with a different
DefaultQueryExecMode. Add this to the function documentation.

Conn.SendBatch: Move where mode is defined to make this clearer in
the code. I spent time looking for the option that does not exist.
2023-12-09 11:47:56 -06:00
Eshton Robateau
20bf953a17 pull out changes into new public function 2023-12-09 11:20:14 -06:00
Eshton Robateau
12582a0fd4 bitsize largest option is 64 2023-12-09 11:20:14 -06:00
Eshton Robateau
905f252667 uncomment tests 2023-12-09 11:20:14 -06:00
Eshton Robateau
9927e14bbf remove dead line 2023-12-09 11:20:14 -06:00
Eshton Robateau
95b2f85e60 support scientific notation big floats 2023-12-09 11:20:14 -06:00
Jack Christensen
913e4c8487 Update changelog 2023-12-02 09:36:03 -06:00
Jack Christensen
31321c2017 Add race detector to bug report template 2023-12-02 09:27:57 -06:00
maksymnevajdev
319c3172f2 fix panic in prepared sql 2023-12-01 18:34:41 -06:00
Simon Paredes
4678e69599 fix error message to print the unexpected rune 2023-12-01 18:23:23 -06:00
Simon Paredes
89d699c2e8 wrap errors instead of just formatting them 2023-12-01 18:23:23 -06:00
Jacopo
7ebced92b5 Fix issue with order of json encoding #1805 2023-11-24 19:01:48 -06:00
Sam Whited
94e56e61ba Fix usage of logger in stdlib docs
The documentation previously showed the old way of logging and not the
newer tracer adapter. This patch updates the example to build correctly
with pgx/v5.

Signed-off-by: Sam Whited <sam@samwhited.com>
2023-11-22 08:15:05 -06:00
Jack Christensen
9103457384 Improve docs 2023-11-18 07:44:24 -06:00
Jack Christensen
9782306287 Only remove statement from map if deallocate succeeds
https://github.com/jackc/pgx/pull/1795
2023-11-18 07:44:24 -06:00
Jack Christensen
7d5a3969d0 Improve docs and tests 2023-11-18 07:44:24 -06:00
Jack Christensen
e5015e2fac pgx.Conn.Deallocate uses PgConn.Deallocate
This uses the PostgreSQL protocol to deallocate a prepared statement
instead of a SQL statement. This allows it to work even in an aborted
transaction.
2023-11-18 07:44:24 -06:00
Jack Christensen
4dbd57a7ed Add PgConn.Deallocate method
This method uses the PostgreSQL protocol Close method to deallocate a
prepared statement. This means that it can succeed in an aborted
transaction.
2023-11-18 07:44:24 -06:00
Jack Christensen
0570b0e196 Better document PgConn.Prepare implementation 2023-11-18 07:44:24 -06:00
Jack Christensen
df5d00eb60 Remove PostgreSQL 11 from supported versions 2023-11-11 10:09:47 -06:00
robford
d38dd85756 Allowed nxtf to signal end of data by returning nil,nil
Added some test
Improved documentation
2023-11-11 10:06:58 -06:00
robford
9b6d3809d6 added tests 2023-11-11 10:06:58 -06:00
robford
b4d72d4fce copyFromFunc 2023-11-11 10:06:58 -06:00
robford
ccdd85a5eb added ChopyFromCh 2023-11-11 10:06:58 -06:00
Jack Christensen
96f5f9cd95 Release v5.5.0 2023-11-04 10:27:32 -05:00
Kirill Mironov
d3fb6e00da implement json.Marshaler and json.Unmarshaler for Float4, Float8 2023-11-04 10:25:31 -05:00
Jack Christensen
cf6ef75f91 stdlib: Use Ping instead of CheckConn in ResetSession
CheckConn is deprecated. It doesn't detect all network outages. It
causes a 1ms delay while it tries to read the connection. Ping incurs a
round trip but that means it is a much stronger guarantee that the
connection is usable. In addition, if the application and the database
are on the same network it will actually be faster as round trip times
are typically a few hundred microseconds.
2023-10-26 20:41:44 -05:00
Jack Christensen
7a4bb7edb5
Add link to pgx presentation to README.md 2023-10-20 18:49:41 -05:00
Ivan Posazhennikov
6f7400f428 fix typo in the comment in the pgconn.go 2023-10-14 18:02:35 -05:00
Anton Levakin
304697de36 CancelRequest: Wait for the cancel request to be acknowledged by the server 2023-10-14 17:48:16 -05:00
Anton Levakin
5d0f904831 update TestConnContextCanceledCancelsRunningQueryOnServer
Check cancellation of the request for pgbouncer
2023-10-14 17:48:16 -05:00
Anton Levakin
6ca3d8ed4e Revert "CancelRequest: don't try to read the reply"
This reverts commit c861bce438ee5b96cc2dcc78718731dce6949060.
2023-10-14 17:48:16 -05:00
Jack Christensen
81ddcfdefb Fix spurious deadline exceeded error
stdlib_test.TestConnConcurrency had been flickering on CI deadline /
timeout errors. This was extremely confusing because the test deadline
was set for 2 minutes and the errors would occur much quicker.

The problem only manifested in an extremely specific and timing
sensitive situation.

1. The watchdog timer for deadlocked writes starts the goroutine to
   start the background reader
2. The background reader is stopped
3. The next operation is a read without a preceding write (AFAIK only
   CheckConn does this)
4. The deadline is set to interrupt the read
5. The goroutine from 1 actually starts the background reader
6. The background reader gets an error reading the connection with the
   deadline
7. The deadline is cleared
8. The next read on the connection will get the timeout error
2023-10-14 11:38:33 -05:00
Jack Christensen
45f807fdb4 Special case the underlying type of []byte
Underlying types were already tried. But []byte is not a normal
underlying type. It is a slice. But since is can be treated as a scalar
instead of an array / slice we need to special case it.

https://github.com/jackc/pgx/issues/1763
2023-10-12 20:52:49 -05:00
Jack Christensen
8a09979417 Skip test on CockroachDB 2023-10-10 22:07:06 -05:00
Jack Christensen
7a2b93323c Prevent prematurely closing statements in database/sql
This error was introduced by 0f0d23659950bbf7a1677e50aac09b1e29ad7c60.
If the same statement was prepared multiple times then whenever Close
was called on one of the statements the underlying prepared statement
would be closed even if other statements were still using it.

https://github.com/jackc/pgx/issues/1754#issuecomment-1752004634
2023-10-10 21:56:26 -05:00
Nicola Murino
1484fec57f CI: add PostgreSQL 16 2023-10-10 20:54:54 -05:00
Nicola Murino
3957163808 Update supported Go versions and add 1.21 to CI 2023-10-10 20:54:54 -05:00
Jack Christensen
7fc908a5f2 Do not call t.Fatal in goroutine
require.Equal internally calls t.Fatal, which is not safe to call in a
goroutine.
2023-10-07 10:37:24 -05:00
Jack Christensen
0f0d236599 database/sql prepared statement names are deterministically generated
stdlib now uses the functionality introduced in
bbe2653bc51361e5d7607729356344ef979a9f5a for prepared statements. This
means that the prepared statement name is stable for a given query even
across connections and program executions.

It also makes tracing easier.

See https://github.com/jackc/pgx/issues/1754
2023-10-07 10:16:25 -05:00
Ville Skyttä
c6c50110db Spelling and grammar fixes 2023-10-07 09:26:23 -05:00
Jack Christensen
91530db629 Fix typo in string.Cut refactor 2023-10-07 09:20:28 -05:00
Ville Skyttä
24ed0e4257 Make use of strings.Cut 2023-10-04 20:41:55 +03:00
Jack Christensen
163eb68866 Normalize timeout error when receiving pipeline results
https://github.com/jackc/pgx/issues/1748#issuecomment-1740437138
2023-09-30 08:50:40 -05:00
Jack Christensen
a61517a83b SendBatch should pass ctx to StartPipeline
https://github.com/jackc/pgx/issues/1748
2023-09-28 20:00:02 -05:00
Vincent Le Goff
d93f31b8fa docs: GetPoolConnector 2023-09-25 08:51:12 -05:00
Jack Christensen
cf72a00f52 Skip test of unsupported operation on CockroachDB 2023-09-23 10:49:11 -05:00
Jack Christensen
c08cc72306 Improve QueryExecModeCacheDescribe and clarify documentation
QueryExecModeCacheDescribe actually is safe even when the schema or
search_path is modified. It may return an error on the first execution
but it should never silently encode or decode a value incorrectly. Add a
test to demonstrate and ensure this behavior.

Update documentation of QueryExecModeCacheDescribe to remove warning of
undetected result decoding errors.

Update documentation of QueryExecModeCacheStatement and
QueryExecModeCacheDescribe to indicate that the first execution of an
invalidated statement may fail.
2023-09-23 10:35:42 -05:00
Jack Christensen
7de53a958b stmtcache: Use deterministic, stable statement names
Statement names are now a function of the SQL. This may make database
diagnostics, monitoring, and profiling easier.
2023-09-23 09:55:05 -05:00
Jack Christensen
bbe2653bc5 Prepare chooses statement name based on sql if name == sql
This makes it easier to explicitly manage prepared statements.

refs #1716
2023-09-23 08:40:06 -05:00
Mochammad Hanif R
4e7aa59d64 Fix typos in docs 2023-09-23 07:20:36 -05:00
Lev Zakharov
b301530a5f add doc for OpenDBFromPool 2023-09-09 08:13:56 -05:00
Lev Zakharov
f42824cab3 update docs 2023-09-09 08:13:56 -05:00
Lev Zakharov
18856482c4 remove before/after acquire hooks 2023-09-09 08:13:56 -05:00
Lev Zakharov
639691c0ab add test for stdlib.OpenDBFromPool 2023-09-09 08:13:56 -05:00
Lev Zakharov
3e716c4b06 add example to the doc 2023-09-09 08:13:56 -05:00
Lev Zakharov
51ade172e5 refactor to use the same connection implementation 2023-09-09 08:13:56 -05:00
Lev Zakharov
3d4540aa1b add *sql.DB construction from *pgxpool.Pool 2023-09-09 08:13:56 -05:00
Julien GOTTELAND
389931396e No data result on error 2023-08-19 18:31:41 -05:00
Julien GOTTELAND
9ee7d29cf9 Add CollectExactlyOneRow function 2023-08-19 18:31:41 -05:00
Craig Ringer
a7375cc503 docs: Emphasise need to call rows.Err() after rows.Next() returns false
The Rows interface in pgx, like its ancestor in database/sql, is easy to
accidentally misuse in a way that can cause apps to misinterpret
database or connection errors as successful queries with empty or
truncated result-sets.

Update the docs to emphasise the need to call rows.Err() after
rows.Next() returns false, and direct users of the interface to the v5
API helpers that make writing correct code easier.

The docs on Conn.Query() already call this out, so only a small change
is needed to warn users and point them at the details on Query()

Per details in #1707
2023-08-10 17:19:15 -05:00
Jack Christensen
d43bd349c1 Add batch insert benchmarks 2023-08-08 18:44:31 -05:00
Jack Christensen
5c6cf62b53 Fix off by one error in benchmark 2023-08-08 18:38:34 -05:00
Jack Christensen
d17440d5c7 Add missed changelog entry and fix typo 2023-08-05 08:36:48 -05:00
Jack Christensen
4c60839c48 Release v5.4.3 2023-08-05 08:24:37 -05:00
Jack Christensen
e9087eacb8 Fix data race when pgproto3 trace is enabled during CopyFrom
https://github.com/jackc/pgx/issues/1703
2023-08-05 07:30:59 -05:00
Jack Christensen
d626dfe94e TestConnConcurrency has been failing on CI
This probably won't fix it, but at the very least we should not be
running assertions in a goroutine.
2023-07-28 18:13:31 -05:00
Jack Christensen
1a9b2a53a5 Fix staticcheck issues 2023-07-28 18:04:31 -05:00
Alexey Palazhchenko
8fb309c631 Use Go 1.20's link syntax for ParseConfig 2023-07-28 17:51:42 -05:00
horpto
f4533dc906 optimize parseNumericString 2023-07-25 19:25:23 -05:00
Jack Christensen
4091eedf03 Check out code before setting up Go
This allows for caching the Go dependencies.
2023-07-22 17:13:30 -05:00
Jack Christensen
87d771ef9c Prettier ci.yml 2023-07-22 17:12:56 -05:00
Jack Christensen
492283b90b zeronull.Timestamptz should use pgtype.Timestamptz
https://github.com/jackc/pgx/issues/1694
2023-07-22 08:35:32 -05:00
James Hartig
e665f74c99 fix TestPoolBackgroundChecksMinConns and NewConnsCount
Previously it was checking TotalConns but that includes ConstructingConns.
Instead it should directly check IdleConns so the next Acquire takes one of
those and doesn't make a 3rd connection. The check against the context was
also wrong which prevented this from timing out after 2 minutes.

This also fixes a bug where NewConnsCount was not correctly counting
connections created by Acquire directly.

Fixes #1690
2023-07-22 08:28:39 -05:00
Rafi Shamim
f90e86fd8d Unskip TestConnCopyFromLarge for CockroachDB
This test is passing now.
2023-07-22 07:11:47 -05:00
Jack Christensen
88b49d48f6 Disable TestPoolBackgroundChecksMinConns on Windows
https://github.com/jackc/pgx/issues/1690
2023-07-19 21:20:26 -05:00
Jack Christensen
2506cf3666 Make CI badge link 2023-07-19 21:12:49 -05:00
Jack Christensen
d58fe2d53c Fix json scan of non-string pointer to pointer
https://github.com/jackc/pgx/issues/1691
2023-07-19 20:54:05 -05:00
Jack Christensen
ef9e26a5d5 Check nil in defer
A panic might mean that pbr is nil.

https://github.com/jackc/pgx/issues/1689
2023-07-15 10:16:28 -05:00
Evan Jones
6703484a0d go.mod: run go mod tidy; removes golang.org/x/sys
I'm not sure exactly what commit removed the usage of this module,
but it seems worth simplifying the dependencies.
2023-07-15 10:12:11 -05:00
Jack Christensen
c513e2e435 Fix: pgxpool: background health check cannot overflow pool
It was previously possible for a connection to be created while the
background health check was running. The health check could create
connection(s) in excess of the maximum pool size in this case.

https://github.com/jackc/pgx/issues/1660
2023-07-15 10:09:53 -05:00
smaher-edb
f47f0cf823 connect_timeout is not obeyed for sslmode=allow|prefer
connect_timeout given in conn string was not obeyed if sslmode is not specified (default is prefer) or equals sslmode=allow|prefer. It took twice the amount of time specified by connect_timeout in conn string. While this behavior is correct if multi-host is provided in conn string, it doesn't look correct in case of single host. This behavior was also not matching with libpq.

The root cause was to implement sslmode=allow|prefer conn are tried twice. First with TLSConfig and if that doesn't work then without TLSConfig. The fix for this issue now uses the same context if same host is being tried out. This change won't affect the existing multi-host behavior.

This PR goal is to close issue [jackc/pgx/issues/1672](https://github.com/jackc/pgx/issues/1672)
2023-07-15 09:49:09 -05:00
Christoph Engelbert (noctarius)
bd3e0d422c Fixes #1684 QCharArrayOID being defined with the wrong OID 2023-07-15 09:44:48 -05:00
Jack Christensen
2f6fcf8eb0 RowTo(AddrOf)StructByPos ignores fields with "-" db tag
https://github.com/jackc/pgx/discussions/1682
2023-07-15 09:39:20 -05:00
Jack Christensen
038fc448c1 Release v5.4.2 2023-07-11 21:29:54 -05:00
Jack Christensen
95aa87f2e8 exitPotentialWriteReadDeadlock stops bgReader
It's not enough to stop the slowWriteTimer, because the bgReader may
have been started.
2023-07-11 21:29:11 -05:00
Jack Christensen
f512b9688b Add PgConn.SyncConn
This provides a way to ensure it is safe to directly read or write to
the underlying net.Conn.

https://github.com/jackc/pgx/issues/1673
2023-07-11 21:29:11 -05:00
Jack Christensen
05440f9d3f Drastically increase allowed test times for potato CI
The context timeouts for tests are designed to give a better error
message when something hangs rather than the test just timing out.
Unfortunately, the potato CI frequently has some test or another
randomly take a long time. While the increased times are somewhat less
than optimal on a real computer, hopefully this will solve the
flickering CI.
2023-07-11 21:16:08 -05:00
Jack Christensen
e0c70201dc Skip json format test on CockroachDB 2023-07-11 20:51:22 -05:00
Jack Christensen
524f661136 Fix JSON encoding for pointer to structs implementing json.Marshaler
https://github.com/jackc/pgx/issues/1681
2023-07-11 20:28:36 -05:00
Dan McGee
507a9e9ad3 Remove some now unused pgtype code
Most of this is in conversion, and I assume it became unused with some
of the v5 changes and refactors to a codec-based approach.

There are likely a few more cleanups to be made, but these ones seemed
easy and safe to start with.
2023-07-10 20:23:42 -05:00
Dan McGee
0328d314ea Use bytes.Equal rather than bytes.Compare ==/!= 0
As recommended by go-staticcheck, but also might be a bit more efficient
for the compiler to implement, since we don't care about which slice of
bytes is greater than the other one.
2023-07-08 12:08:05 -05:00
Jack Christensen
cd46cdd450 Recreate the frontend in Construct with the new bgReader
https://github.com/jackc/pgx/pull/1629#discussion_r1251472215
2023-07-08 11:39:39 -05:00
Adrian-Stefan Mares
2bf5a61401 fix: Do not use infinite timers 2023-07-08 11:24:39 -05:00
Evan Jones
dc94db6b3d pgtype.Hstore: add a round-trip test for binary and text codecs
This ensures the output of Encode can pass through Scan and produce
the same input. This found two two minor problems with the text
codec. These are not bugs: These situations do not happen when using
pgx with Postgres. However, I think it is worth fixing to ensure the
code is internally consistent.

The problems with the text codec are:

* It did not correctly distinguish between nil and empty. This is not
  a problem with Postgres, since NULL values are marked separately,
  but the binary codec distinguishes between them, so it seems like
  the text codec should as well.
* It did not output spaces between keys. Postgres produces output in
  this format, and the parser now only strictly parses the Postgres
  format. This is not a bug, but seems like a good idea.
2023-06-29 17:25:47 -05:00
Gerasimos (Makis) Maropoulos
b68e7b2a68 README: Add kataras/pgx-golog to 3rd-party loggers 2023-06-24 18:23:15 -05:00
Brandon Kauffman
1dd69f86a1 Enable failover efforts when pg_hba.conf disallows non-ssl connections
Copy of https://github.com/jackc/pgconn/pull/133
2023-06-24 06:41:35 -05:00
Jack Christensen
8e6cf8f3a5 Add comment to test 2023-06-20 08:49:33 -05:00
Jack Christensen
91cba90e8d Fix: RowScanner errors are fatal to Rows
https://github.com/jackc/pgx/issues/1654
2023-06-20 08:48:06 -05:00
Jack Christensen
0d14b87140 Because CI runs on a potato 2023-06-20 08:43:06 -05:00
Nicola Murino
e79efdacf9 CI: run tests in verbose mode
It's helpful to have more detailed logs, such as how long it took to
run a single test
2023-06-19 17:06:21 -05:00
Nicola Murino
20a40120ed TestQueryEncodeError: crdb now returns the same error as postgres 2023-06-19 17:06:21 -05:00
Nicola Murino
aa263d4352 CockroachDB tests: use a more recent version 2023-06-19 17:06:21 -05:00
Nicola Murino
7fccc604af stdlib: add a concurrency test 2023-06-19 17:06:21 -05:00
Jack Christensen
34f17a6048 Allow more time for test on slow CI 2023-06-18 08:36:03 -05:00
Jack Christensen
74ab538d2a Release v5.4.1 2023-06-18 08:27:21 -05:00
Lev Zakharov
7c386112e3 fix concurrency bug in pgtype.defaultMap (#1650) 2023-06-18 08:23:56 -05:00
Jack Christensen
9a5ead9048 Add TxOptions.BeginQuery to allow overriding the default BEGIN query
https://github.com/jackc/pgx/issues/1643
2023-06-18 06:43:17 -05:00
Jack Christensen
737b5af236 Allow more time for test on slow CI 2023-06-17 19:03:15 -05:00
Jack Christensen
f20070650f Make TestPoolBackgroundChecksMinConns less timing sensitive for CI 2023-06-17 17:34:07 -05:00
Evan Jones
e5db6a0467 pgtype array: Fix encoding of vtab \v
Arrays with values that start or end with vtab ("\v") must be quoted.
Postgres's array parser skips leading and trailing whitespace with
the array_isspace() function, which is slightly different from the
scanner_isspace() function that was previously linked. Add a test
that reproduces this failure, and fix the definition of isSpace.

This also includes a change to use strings.EqualFold which should
really not matter, but does not require copying the string.
2023-06-17 17:15:58 -05:00
Jack Christensen
5b7cc8e215 Make TestConnCheckConn less timing sensitive for CI 2023-06-17 17:12:58 -05:00
Evan Jones
bc8b1ca320 remove the single backing string optimization
This is a bit slower than using this optimization, but ensures this
version does not change garbage collection behavior. This does still
using a single []string for all the *string value pointers because
that is what text parsing already does. This makes the two behave
similarly.

benchstat results of master versus this version:

                               │  orig.txt   │   new-binary-no-share-string.txt    │
                               │   sec/op    │   sec/op     vs base                │
HstoreScan/databasesql.Scan-10   82.11µ ± 1%   81.71µ ± 2%        ~ (p=0.280 n=10)
HstoreScan/text-10               83.30µ ± 1%   82.45µ ± 1%   -1.02% (p=0.000 n=10)
HstoreScan/binary-10             15.99µ ± 2%   10.12µ ± 1%  -36.67% (p=0.000 n=10)
geomean                          47.82µ        40.86µ       -14.56%

                               │   orig.txt   │   new-binary-no-share-string.txt    │
                               │     B/op     │     B/op      vs base               │
HstoreScan/databasesql.Scan-10   56.23Ki ± 0%   56.23Ki ± 0%       ~ (p=0.128 n=10)
HstoreScan/text-10               65.12Ki ± 0%   65.12Ki ± 0%       ~ (p=0.541 n=10)
HstoreScan/binary-10             21.09Ki ± 0%   19.87Ki ± 0%  -5.75% (p=0.000 n=10)
geomean                          42.58Ki        41.75Ki       -1.95%

                               │  orig.txt  │    new-binary-no-share-string.txt    │
                               │ allocs/op  │ allocs/op   vs base                  │
HstoreScan/databasesql.Scan-10   744.0 ± 0%   744.0 ± 0%        ~ (p=1.000 n=10) ¹
HstoreScan/text-10               743.0 ± 0%   743.0 ± 0%        ~ (p=1.000 n=10) ¹
HstoreScan/binary-10             464.0 ± 0%   316.0 ± 0%  -31.90% (p=0.000 n=10)
geomean                          635.4        559.0       -12.02%
¹ all samples are equal

benchstat results of the version with one string and this version:

                               │ new-binary-share-everything.txt │    new-binary-no-share-string.txt    │
                               │             sec/op              │    sec/op     vs base                │
HstoreScan/databasesql.Scan-10                       81.80µ ± 1%    81.71µ ± 2%        ~ (p=1.000 n=10)
HstoreScan/text-10                                   82.77µ ± 1%    82.45µ ± 1%        ~ (p=0.063 n=10)
HstoreScan/binary-10                                 7.330µ ± 2%   10.124µ ± 1%  +38.13% (p=0.000 n=10)
geomean                                              36.75µ         40.86µ       +11.18%

                               │ new-binary-share-everything.txt │   new-binary-no-share-string.txt    │
                               │              B/op               │     B/op      vs base               │
HstoreScan/databasesql.Scan-10                      56.23Ki ± 0%   56.23Ki ± 0%       ~ (p=0.232 n=10)
HstoreScan/text-10                                  65.12Ki ± 0%   65.12Ki ± 0%       ~ (p=0.218 n=10)
HstoreScan/binary-10                                20.73Ki ± 0%   19.87Ki ± 0%  -4.11% (p=0.000 n=10)
geomean                                             42.34Ki        41.75Ki       -1.39%

                               │ new-binary-share-everything.txt │     new-binary-no-share-string.txt     │
                               │            allocs/op            │  allocs/op   vs base                   │
HstoreScan/databasesql.Scan-10                        744.0 ± 0%    744.0 ± 0%         ~ (p=1.000 n=10) ¹
HstoreScan/text-10                                    743.0 ± 0%    743.0 ± 0%         ~ (p=1.000 n=10) ¹
HstoreScan/binary-10                                  41.00 ± 0%   316.00 ± 0%  +670.73% (p=0.000 n=10)
geomean                                               283.0         559.0        +97.53%
¹ all samples are equal
2023-06-16 15:31:37 -05:00
Evan Jones
2de94187f5 hstore: Make binary parsing 2X faster
* use []string for value string pointers: one allocation instead of
  one per value.
* use one string for all key/value pairs, instead of one for each.

After this change, one Hstore will share two allocations: one string
and one []string. The disadvantage is that it cannot be deallocated
until all key/value pairs are unused. This means if an application
takes a single key or value from the Hstore and holds on to it, its
memory footprint will increase. I would guess this is an unlikely
problem, but it is possible.

The benchstat results from my M1 Max are below.

goos: darwin
goarch: arm64
pkg: github.com/jackc/pgx/v5/pgtype
                               │   orig.txt   │               new.txt               │
                               │    sec/op    │   sec/op     vs base                │
HstoreScan/databasesql.Scan-10    82.11µ ± 1%   82.66µ ± 2%        ~ (p=0.436 n=10)
HstoreScan/text-10                83.30µ ± 1%   84.24µ ± 3%        ~ (p=0.165 n=10)
HstoreScan/binary-10             15.987µ ± 2%   7.459µ ± 6%  -53.35% (p=0.000 n=10)
geomean                           47.82µ        37.31µ       -21.98%

                               │   orig.txt   │               new.txt               │
                               │     B/op     │     B/op      vs base               │
HstoreScan/databasesql.Scan-10   56.23Ki ± 0%   56.23Ki ± 0%       ~ (p=0.324 n=10)
HstoreScan/text-10               65.12Ki ± 0%   65.12Ki ± 0%       ~ (p=0.675 n=10)
HstoreScan/binary-10             21.09Ki ± 0%   20.73Ki ± 0%  -1.70% (p=0.000 n=10)
geomean                          42.58Ki        42.34Ki       -0.57%

                               │  orig.txt   │               new.txt                │
                               │  allocs/op  │ allocs/op   vs base                  │
HstoreScan/databasesql.Scan-10    744.0 ± 0%   744.0 ± 0%        ~ (p=1.000 n=10) ¹
HstoreScan/text-10                743.0 ± 0%   743.0 ± 0%        ~ (p=1.000 n=10) ¹
HstoreScan/binary-10             464.00 ± 0%   41.00 ± 0%  -91.16% (p=0.000 n=10)
geomean                           635.4        283.0       -55.46%
¹ all samples are equal
2023-06-16 15:31:37 -05:00
Evan Jones
07670dddca do not share the original input string
This allows the original input string to be garbage collected, so it
should not change the memory footprint. This is a slower than the
version that shares a string, but only a small amount. It is still
faster than binary parsing (until that is optimized).

benchstat difference of original versus this version:

                               │  orig.txt   │     new-do-not-share-string.txt     │
                               │   sec/op    │   sec/op     vs base                │
HstoreScan/databasesql.Scan-10   82.11µ ± 1%   14.24µ ± 2%  -82.66% (p=0.000 n=10)
HstoreScan/text-10               83.30µ ± 1%   14.97µ ± 1%  -82.03% (p=0.000 n=10)
HstoreScan/binary-10             15.99µ ± 2%   15.80µ ± 0%   -1.16% (p=0.024 n=10)
geomean                          47.82µ        14.99µ       -68.66%

                               │   orig.txt   │     new-do-not-share-string.txt      │
                               │     B/op     │     B/op      vs base                │
HstoreScan/databasesql.Scan-10   56.23Ki ± 0%   20.11Ki ± 0%  -64.24% (p=0.000 n=10)
HstoreScan/text-10               65.12Ki ± 0%   29.00Ki ± 0%  -55.47% (p=0.000 n=10)
HstoreScan/binary-10             21.09Ki ± 0%   21.09Ki ± 0%        ~ (p=0.722 n=10)
geomean                          42.58Ki        23.08Ki       -45.80%

                               │  orig.txt  │     new-do-not-share-string.txt      │
                               │ allocs/op  │ allocs/op   vs base                  │
HstoreScan/databasesql.Scan-10   744.0 ± 0%   340.0 ± 0%  -54.30% (p=0.000 n=10)
HstoreScan/text-10               743.0 ± 0%   339.0 ± 0%  -54.37% (p=0.000 n=10)
HstoreScan/binary-10             464.0 ± 0%   464.0 ± 0%        ~ (p=1.000 n=10) ¹
geomean                          635.4        376.8       -40.70%
¹ all samples are equal

benchstat difference of the shared string versus not:

                               │ new-share-string.txt │     new-do-not-share-string.txt     │
                               │        sec/op        │   sec/op     vs base                │
HstoreScan/databasesql.Scan-10            10.57µ ± 2%   14.24µ ± 2%  +34.69% (p=0.000 n=10)
HstoreScan/text-10                        11.60µ ± 2%   14.97µ ± 1%  +29.03% (p=0.000 n=10)
HstoreScan/binary-10                      15.87µ ± 2%   15.80µ ± 0%        ~ (p=0.280 n=10)
geomean                                   12.48µ        14.99µ       +20.07%

                               │ new-share-string.txt │     new-do-not-share-string.txt      │
                               │         B/op         │     B/op      vs base                │
HstoreScan/databasesql.Scan-10           11.68Ki ± 0%   20.11Ki ± 0%  +72.17% (p=0.000 n=10)
HstoreScan/text-10                       20.58Ki ± 0%   29.00Ki ± 0%  +40.93% (p=0.000 n=10)
HstoreScan/binary-10                     21.08Ki ± 0%   21.09Ki ± 0%        ~ (p=0.427 n=10)
geomean                                  17.17Ki        23.08Ki       +34.39%

                               │ new-share-string.txt │      new-do-not-share-string.txt       │
                               │      allocs/op       │  allocs/op   vs base                   │
HstoreScan/databasesql.Scan-10             44.00 ± 0%   340.00 ± 0%  +672.73% (p=0.000 n=10)
HstoreScan/text-10                         44.00 ± 0%   339.00 ± 0%  +670.45% (p=0.000 n=10)
HstoreScan/binary-10                       464.0 ± 0%    464.0 ± 0%         ~ (p=1.000 n=10) ¹
geomean                                    96.49         376.8       +290.47%
2023-06-16 15:30:54 -05:00
Evan Jones
d48d36dc02 pgtype/hstore: Make text parsing about 6X faster
I am working on an application that uses hstore types, and we found
that returning the values is slow, particularly when using the text
protocol, such as when using database/sql. This improves parsing to
be about 6X faster (currently faster than binary). The changes are:

* referencing the original string instead of copying into new strings
  (very large win)
* using string.IndexByte to scan double quoted strings: it has
  architecture-specific assembly implementations, and most of the
  time is spent in key/value strings.
* estimating the number of key/value pairs to allocate the correct
  size of the slice and map up front. This reduces the number of
  allocations and bytes allocated by a factor of 2, and was a small
  CPU win.
* parsing directly into the Hstore, rather than copying into it.

This parser is stricter than the old one. It only accepts hstore
strings serialized by Postgres. The old one was already stricter
than Postgres's own parser, but previously accepted any whitespace
character after a comma. This one only accepts space. Example:

  "k1"=>"v1",\t"k2"=>"v2"

Postgres only ever uses ", " as the separator. See hstore_out:
https://github.com/postgres/postgres/blob/master/contrib/hstore/hstore_io.c

The result of using benchstat to compare the benchmark on my M1 Pro
with the following command line in below. The new text parser is now
faster than the binary parser. I will improve the binary parser in a
separate change.

for i in $(seq 10); do go test ./pgtype -run=none -bench=BenchmarkHstoreScan -benchtime=1s >> new.txt; done

goos: darwin
goarch: arm64
pkg: github.com/jackc/pgx/v5/pgtype
                               │  orig.txt   │               new.txt               │
                               │   sec/op    │   sec/op     vs base                │
HstoreScan/databasesql.Scan-10   82.11µ ± 1%   10.51µ ± 0%  -87.20% (p=0.000 n=10)
HstoreScan/text-10               83.30µ ± 1%   11.49µ ± 1%  -86.20% (p=0.000 n=10)
HstoreScan/binary-10             15.99µ ± 2%   15.77µ ± 1%   -1.35% (p=0.007 n=10)
geomean                          47.82µ        12.40µ       -74.08%

                               │   orig.txt   │               new.txt                │
                               │     B/op     │     B/op      vs base                │
HstoreScan/databasesql.Scan-10   56.23Ki ± 0%   11.68Ki ± 0%  -79.23% (p=0.000 n=10)
HstoreScan/text-10               65.12Ki ± 0%   20.58Ki ± 0%  -68.40% (p=0.000 n=10)
HstoreScan/binary-10             21.09Ki ± 0%   21.09Ki ± 0%        ~ (p=0.378 n=10)
geomean                          42.58Ki        17.18Ki       -59.66%

                               │  orig.txt   │               new.txt                │
                               │  allocs/op  │ allocs/op   vs base                  │
HstoreScan/databasesql.Scan-10   744.00 ± 0%   44.00 ± 0%  -94.09% (p=0.000 n=10)
HstoreScan/text-10               743.00 ± 0%   44.00 ± 0%  -94.08% (p=0.000 n=10)
HstoreScan/binary-10              464.0 ± 0%   464.0 ± 0%        ~ (p=1.000 n=10) ¹
geomean                           635.4        96.49       -84.81%
¹ all samples are equal
2023-06-16 15:30:54 -05:00
Nicola Murino
eb2807bda5 copy tests: test all supported modes
if we run the test in parallel, we always test the latest mode

see here

https://github.com/golang/go/wiki/LoopvarExperiment

also fix a lint warning about pgtype.Vec2
2023-06-15 20:54:24 -05:00
Nicola Murino
b1f8055584 TestConnectWithFallback: increase timeout
on Windows connecting on a closed port takes about 2 seconds.
You can test with something like this

        start := time.Now()
	_, err := d.DialContext(context.Background(), "tcp", "127.0.0.1:1")
	fmt.Printf("finished, time %s, err: %v\n", time.Since(start), err)

This seems by design

https://groups.google.com/g/comp.os.ms-windows.programmer.win32/c/jV6kRVY3BqM

Generally TestConnectWithFallback takes about 8-9 seconds on Windows.
Increase timeout to avoid random failures under load
2023-06-15 20:54:24 -05:00
Jack Christensen
461b9fa36e Release v5.4.0 2023-06-14 09:41:17 -05:00
Jack Christensen
45520d5a11 Document pgtype.Map and pgtype.Type are immutable after registration 2023-06-14 08:27:04 -05:00
Lev Zakharov
90f9aad67f add singleton pgtype.Map for default type mappings 2023-06-14 08:21:28 -05:00
Jack Christensen
5f28621394 Add docs clarifying that FieldDescriptions may return nil
https://github.com/jackc/pgx/issues/1634
2023-06-14 07:42:11 -05:00
Klaus
c542df4fb4 added MarshalJSON and UnmarshalJSON to timestamp and added their tests (based on timestamptz implementation) 2023-06-12 09:52:49 -05:00
Jack Christensen
34eddf9983 Increase slowWriteTimer to 15ms and document why 2023-06-12 09:39:26 -05:00
Jack Christensen
5d4f9018bf failed to write startup message error should be normalized 2023-06-12 09:39:26 -05:00
Jack Christensen
482e56a79b Fix race condition when CopyFrom is cancelled. 2023-06-12 09:39:26 -05:00
Jack Christensen
3ea2f57d8b Deprecate CheckConn in favor of Ping 2023-06-12 09:39:26 -05:00
Jack Christensen
26c79eb215 Handle writes that could deadlock with reads from the server
This commit adds a background reader that can optionally buffer reads.
It is used whenever a potentially blocking write is made to the server.
The background reader is started on a slight delay so there should be no
meaningful performance impact as it doesn't run for quick queries and
its overhead is minimal relative to slower queries.
2023-06-12 09:39:26 -05:00
Jack Christensen
85136a8efe Restore pgx v4 style CopyFrom implementation
This approach uses an extra goroutine to write while the main goroutine
continues to read. This avoids the need to use non-blocking I/O.
2023-06-12 09:39:26 -05:00
Jack Christensen
4410fc0a65 Remove nbconn
The non-blocking IO system was designed to solve three problems:

1. Deadlock that can occur when both sides of a connection are blocked
   writing because all buffers between are full.
2. The inability to use a write deadline with a TLS.Conn without killing
   the connection.
3. Efficiently check if a connection has been closed before writing.
   This reduces the cases where the application doesn't know if a query
   that does a INSERT/UPDATE/DELETE was actually sent to the server or
   not.

However, the nbconn package is extraordinarily complex, has been a
source of very tricky bugs, and has OS specific code paths. It also does
not work at all with underlying net.Conn implementations that do not
have platform specific non-blocking IO syscall support and do not
properly implement deadlines. In particular, this is the case with
golang.org/x/crypto/ssh.

I believe the deadlock problem can be solved with a combination of a
goroutine for CopyFrom like v4 used and a watchdog for regular queries
that uses time.AfterFunc.

The write deadline problem actually should be ignorable. We check for
context cancellation before sending a query and the actual Write should
be almost instant as long as the underlying connection is not blocked.
(We should only have to wait until it is accepted by the OS, not until
it is fully sent.)

Efficiently checking if a connection has been closed is probably the
hardest to solve without non-blocking reads. However, the existing code
only solves part of the problem. It can detect a closed or broken
connection the OS knows about, but it won't actually detect other types
of broken connections such as a network interruption. This is currently
implemented in CheckConn and called automatically when checking a
connection out of the pool that has been idle for over one second. I
think that changing CheckConn to a very short deadline read and changing
the pool to do an actual Ping would be an acceptable solution.

Remove nbconn and non-blocking code. This does not leave the system in
an entirely working state. In particular, CopyFrom is broken, deadlocks
can occur for extremely large queries or batches, and PgConn.CheckConn
is now a `select 1` ping. These will be resolved in subsequent commits.
2023-06-12 09:39:26 -05:00
Massimo Costa
9cfdd21f1c feat: add reference to pgx-slog adapter 2023-06-12 09:37:20 -05:00
Evan Jones
4d643b75f5 pgtype/hstore_test.go: Extend coverage of scan benchmark
I am working on an application that reads a lot of hstore values, and
have discovered that scanning it is fairly slow. I'm working on some
improvements, but first I wanted a better benchmark. This adds more
realistic data, and extends it to cover the three APIs: database/sql,
and pgconn.Rows.Scan with both text and binary protocols.
2023-06-12 09:17:24 -05:00
Jack Christensen
490f70fc5f Fix docs for QueryExecModeDescribeExec with connection poolers
https://github.com/jackc/pgx/issues/1635
2023-06-11 08:26:02 -05:00
Evan Jones
1b68b5970e pgtype/hstore: Save 2 allocs in database/sql Scan implementation
Remove unneeded string to []byte to string conversion, which saves 2
allocs and should make Hstore text scanning slightly faster.

The Hstore.Scan() function takes a string as input, converts it to
[]byte, and calls scanPlanTextAnyToHstoreScanner.Scan(). That
function converts []byte back to string and calls parseHstore. This
refactors scanPlanTextAnyToHstoreScanner.Scan into
scanPlanTextAnyToHstoreScanner.scanString so the database/sql Scan
function can call it directly, bypassing this conversion.

The added Benchmark shows this saves 2 allocs for longer strings, and
saves about 5% CPU overall on my M1 Pro. benchstat output:

goos: darwin
goarch: arm64
pkg: github.com/jackc/pgx/v5/pgtype
              │  orig.txt   │              new.txt               │
              │   sec/op    │   sec/op     vs base               │
HstoreScan-10   1.334µ ± 2%   1.257µ ± 2%  -5.77% (p=0.000 n=10)

              │   orig.txt   │               new.txt               │
              │     B/op     │     B/op      vs base               │
HstoreScan-10   2.094Ki ± 0%   1.969Ki ± 0%  -5.97% (p=0.000 n=10)

              │  orig.txt  │              new.txt              │
              │ allocs/op  │ allocs/op   vs base               │
HstoreScan-10   36.00 ± 0%   34.00 ± 0%  -5.56% (p=0.000 n=10)
2023-06-07 15:35:22 -05:00
Evan Jones
ee04d4a74d pgtype/hstore: Avoid Postgres Mac OS X parsing bug
Postgres on Mac OS X has a bug in how it parses hstore text values
that causes it to misinterpret some Unicode values as spaces. This
causes values sent by pgx to be misinterpreted. To avoid this, always
quote hstore values, which is how Postgres serializes them itself.
The test change fails on Mac OS X without this fix.

While I suspect this should not be performance critical for any
application, I added a quick benchmark to test the performance of the
encoding. This change actually makes encoding slightly faster on my
M1 Pro. The output from the benchstat program on this banchmark is:

goos: darwin
goarch: arm64
pkg: github.com/jackc/pgx/v5/pgtype
                          │   orig.txt   │           new-quotes.txt            │
                          │    sec/op    │   sec/op     vs base                │
HstoreSerialize/text-10      207.1n ± 0%   142.3n ± 1%  -31.31% (p=0.000 n=10)
HstoreSerialize/binary-10   100.10n ± 0%   99.64n ± 1%   -0.45% (p=0.013 n=10)
geomean                      144.0n        119.1n       -17.31%

I have also attempted to fix the Postgres bug, but it will take a
long time for this fix to get upstream:

https://www.postgresql.org/message-id/CA%2BHWA9awUW0%2BRV_gO9r1ABZwGoZxPztcJxPy8vMFSTbTfi4jig%40mail.gmail.com
2023-06-07 15:29:25 -05:00
Jack Christensen
d9560c78b8 Use tx instead of underlying conn in test
Improves clarity
2023-06-03 07:59:28 -05:00
Jack Christensen
608f39f426 Ensure pgxpool.Pool.QueryRow.Scan releases connection on panic
Otherwise a connection would be leaked and closing the pool would block.

https://github.com/jackc/pgx/issues/1628
2023-06-03 07:39:56 -05:00
Nicola Murino
229d2aaa49 TestConnCopyFromBinary: increase context timeout 2023-06-03 06:45:28 -05:00
Nicola Murino
b4314ddaf7 TestConnCopyFromSlowFailRace: increase context timeout
On Windows time.Sleep(time.Millisecond) will sleep for 15 milliseconds
2023-06-03 06:45:28 -05:00
Nicola Murino
28bd5b3843 TestConnectTimeoutStuckOnTLSHandshake: allow more time to complete
to avoid random errors in Windows CI
2023-06-03 06:45:28 -05:00
Nicola Murino
fb47e1abbb TestContextWatcherStress: reduce sleep counts 2023-06-03 06:45:28 -05:00
Nicola Murino
c861bce438 CancelRequest: don't try to read the reply
Postgres will just process the request and close the connection
2023-06-03 06:45:28 -05:00
Nicola Murino
46d91255b0 remove timeout for test cases on Windows 2023-06-03 06:45:28 -05:00
Nicola Murino
ef363b59ab skipping some config parsing tests on Windows
this should be investigated and fixed
2023-06-03 06:45:28 -05:00
Nicola Murino
bad6b36c47 CI Windows: Initialize test database 2023-06-03 06:45:28 -05:00
Nicola Murino
33d4fa0fa6 TLS with Fake Non-blocking IO test is expected to fail on Windows 2023-06-03 06:45:28 -05:00
Nicola Murino
30d63caa6a CI: run basic tests on Windows 2023-06-03 06:45:28 -05:00
Nicola Murino
b0fa429fd0 add a comment explaining that nbOperMu and nbOperCnt are used on Windows 2023-06-03 06:45:28 -05:00
Nicola Murino
32c7858e61 Revert "Remove unused fields"
This reverts commit 2c1973de4634a6a83d3ba09bdcde392aaf7cfb71.
2023-06-03 06:45:28 -05:00
Pavlo Golub
c7733fe52e Update README.md
add pgxmock description
2023-05-31 07:11:41 -05:00
Jack Christensen
9720d0d63f Use context timeouts for tracelog tests 2023-05-29 11:23:21 -05:00
Jack Christensen
5f6636d028 Add context timeouts for more pgxpool tests 2023-05-29 11:15:40 -05:00
Jack Christensen
a1a97a7ca8 Add context timeouts for some pgxpool tests 2023-05-29 11:04:52 -05:00
Jack Christensen
0ec512b504 Fix: possible fail in goroutine after test has completed 2023-05-29 10:43:15 -05:00
Jack Christensen
f93b42b6ac Allow more time for TestConnExecBatchHuge 2023-05-29 10:35:38 -05:00
Jack Christensen
9f00b6f750 Use context timeouts in more tests
Tests should timeout in a reasonable time if something is stuck. In
particular this is important when testing deadlock conditions such as
can occur with the copy protocol if both the client and the server are
blocked writing until the other side does a read.
2023-05-29 10:25:57 -05:00
Jonathan Gonzalez V
4b9aa7c4f2 chore: update version of golang.org/x/crypto library from v0.6.0 to v0.9.0
During the update also the following packages were updated:

golang.org/x/sys v0.5.0 to v0.8.0
golang.org/x/text v0.7.0 to v0.9.0

Signed-off-by: Jonathan Gonzalez V <jonathan.abdiel@gmail.com>
2023-05-29 09:20:51 -05:00
Jack Christensen
2c1973de46 Remove unused fields 2023-05-27 08:18:47 -05:00
Jack Christensen
b3739c1289 pgconn.CheckConn locks connection
This ensures that a closed connection at the pgconn layer is not
considered okay when the background closing of the net.Conn is still in
progress.

This also means that CheckConn cannot be called when the connection is
locked (for example, by in an progress query). But that seems
reasonable. It's not exactly clear that that would have ever worked
anyway.

https://github.com/jackc/pgx/issues/1618#issuecomment-1563702231
2023-05-26 06:03:25 -05:00
Alek Anokhin
70a200cff4 Fix test failures
Add bool type alias conversion in `elemKindToPointerTypes` and `underlyingNumberType`
2023-05-20 08:53:23 -05:00
Wichert Akkerman
c1c67e4e58 Fix: correctly handle bool type aliases
https://github.com/jackc/pgx/issue/1593
2023-05-20 08:53:23 -05:00
Evan Jones
9de41fac75 ParseConfig: default_query_exec_mode: Return arg in error
If the default_query_exec_mode is unknown, the returned error
previously was:

    invalid default_query_exec_mode: <nil>

This changes it to return the argument. Add a test that unknown modes
fail to parse and include this string.
2023-05-20 08:09:35 -05:00
Evan Jones
11d892dfcf pgconn.CancelRequest: Fix unix sockets: don't use RemoteAddr()
The tests for cancelling requests were failing when using unix
sockets. The reason is that net.Conn.RemoteAddr() calls getpeername()
to get the address. For Unix sockets, this returns the address that
was passed to bind() by the *server* process, not the address that
was passed to connect() by the *client*. For postgres, this is always
relative to the server's directory, so is a path like:

    ./.s.PGSQL.5432

Since it does not return the full absolute path, this function cannot
connect, so it cannot cancel requests. To fix it, use the connection's
config for Unix sockets. I think this should be okay, since a system
using unix sockets should not have "fallbacks". If that is incorrect,
we will need to save the address on PgConn.

Fixes the following failed tests when using Unix sockets:

--- FAIL: TestConnCancelRequest (2.00s)
    pgconn_test.go:2056:
          Error Trace:  /Users/evan.jones/pgx/pgconn/pgconn_test.go:2056
                              /Users/evan.jones/pgx/pgconn/asm_arm64.s:1172
          Error:        Received unexpected error:
                        dial unix ./.s.PGSQL.5432: connect: no such file or directory
          Test:         TestConnCancelRequest
    pgconn_test.go:2063:
          Error Trace:  /Users/evan.jones/pgx/pgconn/pgconn_test.go:2063
          Error:        Object expected to be of type *pgconn.PgError, but was <nil>
          Test:         TestConnCancelRequest
--- FAIL: TestConnContextCanceledCancelsRunningQueryOnServer (5.10s)
    pgconn_test.go:2109:
          Error Trace:  /Users/evan.jones/pgx/pgconn/pgconn_test.go:2109
          Error:        Received unexpected error:
                        timeout: context already done: context deadline exceeded
          Test:         TestConnContextCanceledCancelsRunningQueryOnServer
2023-05-20 08:08:47 -05:00
Evan Jones
0292edecb0 pgx.Conn: Fix memory leak: Delete items from preparedStatements
Previously, items were never removed from the preparedStatements map.
This means workloads that send a large number of unique queries could
run out of memory. Delete items from the map when sending the
deallocate command to Postgres. Add a test to verify this works.

Fixes https://github.com/jackc/pgx/issues/1456
2023-05-20 08:06:37 -05:00
Evan Jones
eab316e200 pgtype.Hstore: Fix quoting of whitespace; Add test
Before this change, the Hstore text protocol did not quote keys or
values containing non-space whitespace ("\r\n\v\t"). This causes
inserts with these values to fail with errors like:

    ERROR: Syntax error near "r" at position 17 (SQLSTATE XX000)

The previous version also quoted curly braces ("{}"), but they don't
seem to require quoting.

It is possible that it would be easier to just always quote the
values, which is what Postgres does when encoding its text protocol,
but this is a smaller change.
2023-05-16 07:02:55 -05:00
Evan Jones
8ceef73b84 pgtype.parseHstore: Reject invalid input; Fix error messages
The parseHstore function did not check the return value from
p.Consume() after a ', ' sequence. It expects a doublequote '"' that
starts the next key, but would accept any character. This means it
accepted invalid input such as:

    "key1"=>"b", ,key2"=>"value"

Add a unit test that covers this case
Fix a couple of the nearby error strings while looking at this.

Found by looking at staticcheck warnings:

    pgtype/hstore.go:434:6: this value of end is never used (SA4006)
    pgtype/hstore.go:434:6: this value of r is never used (SA4006)
2023-05-15 18:10:20 -05:00
Evan Jones
bbcc4fc0b8 pgtype/hstore_test.go: Add coverage for text protocol
The existing test registers pgtype.Hstore in the text map, then uses
the query modes that use the binary protocol. The existing test did
not use the text parsing code. Add a version of the test that uses
pgtype.Hstore as the input and output argument in all query modes,
and tests it without registering the codec.
2023-05-15 18:09:31 -05:00
Evan Cordell
cead918e18 run tests that rely on backend PID to run against cockroach
cockroach has supported backend PIDs on connections since 22.1:
https://www.cockroachlabs.com/docs/releases/v22.1.html#v22-1-3-sql-language-changes
2023-05-15 18:06:08 -05:00
Evan Cordell
7f2bb9595f add BeforeClose to pgxpool.Pool 2023-05-15 18:06:08 -05:00
Evan Jones
d8b38b28be pgtype/hstore.go: Remove unused quoteHstore{Element,Replacer}
These are unused. The code uses quoteArrayElement instead.
2023-05-13 10:03:22 -05:00
Evan Jones
2a86501e86 Fix hstore NULL versus empty
When running queries with the hstore type registered, and with simple
mode queries, the scan implementation does not correctly distinguish
between NULL and empty. Fix the implementation and add a test to
verify this.
2023-05-13 09:34:30 -05:00
Jack Christensen
f59e8bf555 Fix: RowToStructByPos with embedded unexported struct
https://github.com/jackc/pgx/issues/1583
2023-04-27 21:03:58 -05:00
Lev Zakharov
c27b9b49ea support different bool string representations 2023-04-27 20:29:41 -05:00
Jack Christensen
6defa2a607 Fix error when using BatchResults.Exec
...on a select that returns an error after some rows.

This was initially found in by a failure with CockroachDB because it
seems to send a RowDescription before an error even when no rows are
returned. PostgreSQL doesn't.
2023-04-20 21:43:59 -05:00
Jack Christensen
a23a423f55 Fix pipelineBatchResults.Exec() not returning error from ResultReader 2023-04-20 21:19:41 -05:00
Jack Christensen
09371981f9 Fix pipeline batch results not closing pipeline
when error occurs while reading directly from results instead of using
a callback.

https://github.com/jackc/pgx/issues/1578
2023-04-20 20:58:04 -05:00
Jack Christensen
67f2a41587 Fix scanning a table type into a struct
Table types have system / hidden columns like tableoid, cmax, xmax, etc.
These are not included when sending or receiving composite types.

https://github.com/jackc/pgx/issues/1576
2023-04-20 20:13:37 -05:00
Simon Paredes
2cf1541bb9 wrap error 2023-04-11 18:07:05 -05:00
Vinícius Garcia
84eb2e460a Add KSQL on the 3rd party section of the README 2023-04-11 17:53:38 -05:00
Jack Christensen
847f888631 Fix scan array of record to pointer to slice of struct
https://github.com/jackc/pgx/issues/1570
2023-04-08 14:39:48 -05:00
Daniel Castro
f72a147db3 skip cockroachdb 2023-04-05 17:36:00 -05:00
Daniel Castro
8b7c699b8f proper naming 2023-04-05 17:36:00 -05:00
Daniel Castro
215ffafc74 fix tests 2023-04-05 17:36:00 -05:00
Daniel Castro
5eeaa201d9 add extra tests 2023-04-05 17:36:00 -05:00
Jack Christensen
be79f1c8f5 Allow batch callback function to override error
https://github.com/jackc/pgx/pull/1538#issuecomment-1486083411
2023-03-31 20:18:05 -05:00
cemre.mengu
ca022267db add tests 2023-03-25 10:22:11 -05:00
Cemre Mengu
2a653b4a8d fix: handle null interface for json
When using `scany` I encountered the following case. This seems to fix it.

Looks like null `jsonb` columns cause the problem. If you create a table like below you can see that the following code fails. Is this expected?

```sql
CREATE TABLE test (
	a int4 NULL,
	b int4 NULL,
	c jsonb NULL
);

INSERT INTO test (a, b, c) VALUES (1, null, null);
```

```go
package main

import (
	"context"
	"log"

	"github.com/georgysavva/scany/v2/pgxscan"
	"github.com/jackc/pgx/v5"
)

func main() {
	var rows []map[string]interface{}
	conn, _ := pgx.Connect(context.Background(), , ts.PGURL().String())
	
	// this will fail with can't scan into dest[0]: cannot scan NULL into *interface {}
	err := pgxscan.Select(context.Background(), conn, &rows, `SELECT c from test`) 
	
	// this works
	// err = pgxscan.Select(context.Background(), conn, &rows, `SELECT a,b from test`)
	
	if err != nil {
		panic(err)
	}

	log.Printf("%+v", rows)
}
```
2023-03-25 10:22:11 -05:00
Jack Christensen
7af80ae8a6 Batch Query callback is called even when there is an error
This allows the callback to handle additional error types such as
foreign key constraint violations.

See https://github.com/jackc/pgx/pull/1538.
2023-03-25 10:21:34 -05:00
Audi P. Risa P
7555c43033 add lax field to namedStructRowScanner 2023-03-25 09:57:38 -05:00
Audi P. Risa P
193bab416f add RowTo(AddrOf)StructByNameLax 2023-03-25 09:57:38 -05:00
Dmitry K
e9d64ec29d Use time.Equal instead of direct comparison 2023-03-24 17:51:34 -05:00
Dmitry K
2f1bba09c4 Guard deadline readings by mutex 2023-03-24 17:51:34 -05:00
Dmitry K
d829073b2f Improve deadline simulation 2023-03-24 17:51:34 -05:00
Dmitry K
48da6435a5 Add deadline simulation 2023-03-24 17:51:34 -05:00
Dmitry K
34e3013153 Remove commented out atomic calls 2023-03-24 17:51:34 -05:00
Dmitry K
009a377028 Use mutex to guard entire SetBlockingMode call 2023-03-24 17:51:34 -05:00
Dmitry K
e05abb83ec Better error messages 2023-03-24 17:51:34 -05:00
Dmitry K
89475c4c91 use atomic.Int32 instead of int + atomic calls 2023-03-24 17:51:34 -05:00
Dmitry K
c3d62c8783 Small comment update 2 2023-03-24 17:51:34 -05:00
Dmitry K
1298a835bc Small comment update 2023-03-24 17:51:34 -05:00
Dmitry K
b2b4fbcf57 Set socket to non-blocking mode in Read, Flush and BufferReadUntilBlock operations 2023-03-24 17:51:34 -05:00
Dmitry K
3db7d1774e Set socket to non-blocking mode before doneChan is allocated to avoid that channel leaked in case when SetBlockingMode will return error 2023-03-24 17:51:34 -05:00
Dmitry K
a83faa67f5 Small improvements 2023-03-24 17:51:34 -05:00
Dmitry K
8b5e8d9d89 Fix Windows non-blocking I/O for CopyFrom
Created based on discussion here: https://github.com/jackc/pgx/pull/1525#pullrequestreview-1344511991

Fixes https://github.com/jackc/pgx/issues/1552
2023-03-24 17:51:34 -05:00
Sergej Brazdeikis
9ae852eb58 Fix typo in error message uint32 -> uint16 2023-03-11 15:34:08 -06:00
Nicola Murino
19039e6dd1 fix build on 32-bit Windows 2023-03-07 17:09:03 -06:00
Dmitry K
0dbb0a52ab Fix realNonblockingRead, set realNonblockingRead call error to nonblockReadErr 2023-03-04 09:25:36 -06:00
Dmitry K
087b8b2ba8 Try to make windows non-blocking I/O 2023-03-04 09:25:36 -06:00
Jack Christensen
c09ddaf440 Add Windows non-blocking IO 2023-03-04 09:25:36 -06:00
Jack Christensen
80eb6e1859 Remove sleeps in test
Sleeping for a microsecond on Windows actually takes 10ms. This caused
the test to never finish. Instead use channel to ensure the two
goroutines start working at the same time and remove the sleeps.
2023-02-27 20:32:51 -06:00
Jack Christensen
7ec6ee7b0a Release v5.3.1 2023-02-27 19:57:26 -06:00
Jack Christensen
6105ca5073 Fix TestInternalNonBlockingWriteWithDeadline(t
The test was relying on sending so big a message that the write blocked.
However, it appears that on Windows the TCP connections over localhost
have an very large or infinite sized buffer. Change the test to simply
set the deadline to the current time before triggering the write.
2023-02-25 17:02:55 -06:00
Jack Christensen
8f46c75e73 Fix: fake non-blocking read adaptive wait time
If the time reached the minimum time before the 5 tries were up it
would get stuck reading 1 byte at a time indefinitely.
2023-02-25 16:45:34 -06:00
Jack Christensen
38e09bda4c Fix *wrapSliceEncodePlan[T].Encode
It should pass a FlatArray[T] to the next step instead of a
anySliceArrayReflect. By using a anySliceArrayReflect, an encode of
[]github.com/google/uuid.UUID followed by []string into a PostgreSQL
uuid[] would crash. This was caused by a EncodePlan cache collision
where the second encoding used part of the cached plan of the first.

In proper usage a cache collision shouldn't be able to occur. If this
assertion proves incorrect it will be necessary to add an optional
interface to ScanPlan and EncodePlan that marks the plan as ineligable
for caching. But I have been unable to construct a failing case, and
given that ScanPlans have been cached for quite some time now without
incident I do not think it is possible. This issue only occurred due to
the bug in *wrapSliceEncodePlan[T].Encode.

https://github.com/jackc/pgx/issues/1502
2023-02-21 21:04:30 -06:00
Ch. König
9567297815 add mgx module reference to the readme file 2023-02-17 08:58:34 -06:00
Jack Christensen
42d327f660 Add text format jsonpath support 2023-02-14 19:52:47 -06:00
Jack Christensen
f17c743c3c Unwatch at end of test
https://github.com/jackc/pgx/issues/1505
2023-02-14 09:03:41 -06:00
Jack Christensen
a6ace8969b Fix: Prefer sql.Scanner before TryWrapScanPlanFuncs
This was already the case when the data type was unknown but should also
be the case when it is known.
2023-02-14 09:03:41 -06:00
Tomáš Procházka
c2e278e5d4 simplify duplicate pgx registration guard
The binary search is overkill here.
Readability first.
2023-02-13 21:08:42 -06:00
Jack Christensen
c5daa3a814 Release v5.3.0 2023-02-11 09:15:31 -06:00
Jack Christensen
f5d2da7a19 Upgrade golang.org/x/crypto and golang.org/x/text 2023-02-11 08:59:51 -06:00
Jack Christensen
b8262ace75 Upgrade to puddle v2.2.0 2023-02-11 08:57:19 -06:00
Jack Christensen
2100a64dbe Fix broken benchmarks 2023-02-10 20:26:18 -06:00
Jack Christensen
4484831550 Prefer binary format for arrays
This improves performance decoding text[].
2023-02-10 20:21:25 -06:00
Jack Christensen
1f43e2e490 Fix text format array decoding with a string of "NULL"
It was incorrectly being treated as NULL instead of 'NULL'.

fixes https://github.com/jackc/pgx/issues/1494
2023-02-10 19:59:03 -06:00
Jack Christensen
b707faea8f Fix flickering test TestBufferNonBlockingRead 2023-02-10 19:40:31 -06:00
Vitalii Solodilov
255f16b00f Register pgx driver using major version
Fixed: #1480
2023-02-10 19:18:45 -06:00
Felix Röhrich
a47e836471 make TestPointerPointerStructScan easier to read 2023-02-10 19:06:20 -06:00
Felix Röhrich
5cd8468b99 replace erroneous reflect.New with reflect.Zero in TryWrapStructScanPlan 2023-02-10 19:06:20 -06:00
Felix Röhrich
fa5fbed497 add filter for dropped attributes in getCompositeType 2023-02-07 08:45:56 -06:00
Jack Christensen
190c05cc24 CI fix: Go versions are strings
Otherwise Go 1.20 was being treated as Go 1.2.
2023-02-04 07:32:13 -06:00
Jack Christensen
c875abea84 Fix encode []any to array
https://github.com/jackc/pgx/issues/1488
2023-02-04 07:28:52 -06:00
Jack Christensen
98543e0354 Update supported Go versions and add 1.20 to CI 2023-02-04 07:01:03 -06:00
Jack Christensen
32c29a6edd Update issue template to use pgx v5 2023-02-01 19:40:25 -06:00
Jack Christensen
9963c32d4f Only count when bytes actually read 2023-01-31 20:35:44 -06:00
Jack Christensen
6bc327b3ce Find fastest possible read time for fakeNonblockingReadWaitDuration
The first 5 fake non-blocking reads are limited to 1 byte. This should
ensure that there is a measurement of a read where bytes are already
waiting in Go or the OS's read buffer.
2023-01-31 20:25:57 -06:00
Jack Christensen
f46d35610e Only set c.fakeNonblockingReadWaitDuration when it will be decreased 2023-01-31 20:25:17 -06:00
Jack Christensen
cf78472ce5 Use unix build tag
With Go 1.19 available we can use a simpler build tag.
2023-01-31 20:10:34 -06:00
Yumin Xia
766d2bba4f add UnmarshalJSON for pgtype Numeric 2023-01-30 21:33:02 -06:00
Jack Christensen
384a581e99 Avoid slightly overflowing the send copy buffer
This avoids send buffer sequences such as 65531, 13, 65531, 13, 65531,
13, 65531, 13.
2023-01-30 20:59:54 -06:00
Jack Christensen
898891a6ee Fake non-blocking read adapts its max wait time
The reason for a high max wait time was to ensure that reads aren't
cancelled when there is data waiting for it in Go or the OS's receive
buffer. Unfortunately, there is no way to know ahead of time how long
this should take.

This new code uses 2x the fastest successful read time as the max read
time. This allows the code to adapt to whatever host it is running on.

https://github.com/jackc/pgx/issues/1481
2023-01-28 09:35:52 -06:00
Jack Christensen
7019ed1edf Fix tests for iobufpool optimization 2023-01-28 09:30:12 -06:00
Jack Christensen
eee854fb06 iobufpool uses *[]byte instead of []byte to reduce allocations 2023-01-28 08:02:49 -06:00
Jack Christensen
bc754291c1 Save memory on non blocking read path
Only create RawConn.Read callback once and have it use NetConn fields.
Avoids the closure and some allocations.

https://github.com/jackc/pgx/issues/1481
2023-01-27 20:53:30 -06:00
Jack Christensen
2c7d86a543 Only create RawConn.Write callback once
This saves an allocation on every call.

https://github.com/jackc/pgx/issues/1481
2023-01-27 20:34:21 -06:00
Jack Christensen
42a47194a2 Memoize encode plans
This significantly reduces memory allocations in paths that repeatedly
encode the same type of values such as CopyFrom.

https://github.com/jackc/pgx/issues/1481
2023-01-27 20:19:06 -06:00
Jack Christensen
7941518809 BufferReadUntilBlock should release buf when no bytes read
This was causing allocations every time there was a non-blocking read
with nothing to read.

https://github.com/jackc/pgx/issues/1481
2023-01-27 18:03:38 -06:00
Alexey Palazhchenko
f839d501a7 Apply gofmt -s
And add CI check for that.
2023-01-24 07:55:00 -06:00
Alexey Palazhchenko
f581584148 Use Go 1.19's lists for proper formatting 2023-01-23 19:54:30 -06:00
Jack Christensen
e48e7a7189 Fix scanning json column into **string
refs https://github.com/jackc/pgx/issues/1470
2023-01-20 18:38:11 -06:00
Mark Chambers
516300aabf spelling: successfully, compatibility 2023-01-16 20:06:01 -06:00
Mark Chambers
62a7e19a04 func multiInsert returns nil when err != nil
I suspect it should return err.
2023-01-16 20:06:01 -06:00
Mark Chambers
672431c0bd Replace deprecated "io/ioutil"
ioutil.TempFile: Deprecated: As of Go 1.17, this function simply calls os.CreateTemp.

ioutil.ReadFile: Deprecated: As of Go 1.16, this function simply calls os.ReadFile.
2023-01-16 20:06:01 -06:00
Mark Chambers
7c0c7dc01e Remove unused test struct. 2023-01-16 20:06:01 -06:00
Jack Christensen
fcec008a4c Update CI to test on Go 1.19 2023-01-14 09:37:11 -06:00
Jack Christensen
d993cfa8fd Use puddle with Go 1.19 atomics instead of uber atomics
Doing this a bit early to resolve
https://github.com/jackc/pgx/issues/1465. Won't actually tag the release
until Go 1.20 is released to comply with pgx's versioning policy.
2023-01-14 09:31:38 -06:00
Jack Christensen
a95cfe5cc5 Fix connect with multiple hostnames when one can't be resolved
If multiple hostnames are provided and one cannot be resolved the others
should still be tried.

Longterm, it would be nice for the connect process to return a list of
errors rather than just one.

fixes https://github.com/jackc/pgx/issues/1464
2023-01-14 09:19:00 -06:00
Mark Chambers
c46d792c93 Numeric numberTextBytes() workaround...
This seems a bit of a hack. It fixes the problems demonstrated in my previous commit.

Maybe there's a cleaner way?

Associated: https://github.com/jackc/pgx/issues/1426
2023-01-14 08:42:42 -06:00
Mark Chambers
37c6f97b11 pgtype.Numeric numberTextBytes() encoding bug
Demonstrate the problem with the tests:

...for negative decimal values e.g. -0.01

This causes errors when encoding to JSON:

    "json: error calling MarshalJSON for type pgtype.Numeric"

It also causes scan failures of sql.NullFloat64:

    "converting driver.Value type string ("0.-1") to a float64"

As reported here: https://github.com/jackc/pgx/issues/1426
2023-01-14 08:42:42 -06:00
Alex Goncharov
74f9b9f0a4 Bump github.com/jackc/pgservicefile to v0.0.0-20221227161230-091c0ba34f0a to get rid of vulnerable version of gopkg.in/yaml.v2
Signed-off-by: Alex Goncharov <github@b4bay.com>
2022-12-27 17:31:07 -06:00
Stephen Afam-Osemene
5177e1a8df Add stephenafamo/scan reference to README.md 2022-12-27 10:13:36 -06:00
Jack Christensen
d4fcd4a897 Support sql.Scanner on renamed base type
https://github.com/jackc/pgtype/issues/197
2022-12-23 14:22:59 -06:00
Wagner Camarao
c514b2e0c3 add pmx module reference to the readme file 2022-12-23 13:51:59 -06:00
Jack Christensen
e66ad1bcec Fix encode to json ignoring driver.Valuer
https://github.com/jackc/pgx/issues/1430
2022-12-23 13:44:09 -06:00
Alejandro Do Nascimento Mora
c4ac6d810f Use DefaultQueryExecMode in CopyFrom
CopyFrom had to create a prepared statement to get the OIDs of the data
types that were going to be copied into the table. Every COPY operation
required an extra round trips to retrieve the type information. There
was no way to customize this behavior.

By leveraging the QueryExecMode feature, like in `Conn.Query`, users can
specify if they want to cache the prepared statements, execute
them on every request (like the old behavior), or bypass the prepared
statement relying on the pgtype.Map to get the type information.

The `QueryExecMode` behave exactly like in `Conn.Query` in the way the
data type OIDs are fetched, meaning that:

- `QueryExecModeCacheStatement`: caches the statement.
- `QueryExecModeCacheDescribe`: caches the statement and assumes they do
  not change.
- `QueryExecModeDescribeExec`: gets the statement description on every
  execution. This is like to the old behavior of `CopyFrom`.
- `QueryExecModeExec` and `QueryExecModeSimpleProtocol`: maintain the
  same behavior as before, which is the same as `QueryExecModeDescribeExec`.
  It will keep getting the statement description on every execution

The `QueryExecMode` can only be set via
`ConnConfig.DefaultQueryExecMode`, unlike `Conn.Query` there's no
support for specifying the `QueryExecMode` via optional arguments
in the function signature.
2022-12-23 13:22:26 -06:00
Jack Christensen
456a242f5c Unregistered OIDs are handled the same as unknown OIDs
This improves handling of unregistered types. In general, they should
"just work". But there are performance benefits gained and some edge
cases avoided by registering types. Updated documentation to mention
this.

https://github.com/jackc/pgx/issues/1445
2022-12-23 13:14:56 -06:00
Jack Christensen
d737852654 Fix: driver.Value representation of bytea should be []byte not string
https://github.com/jackc/pgx/issues/1445
2022-12-21 17:54:42 -06:00
Ben Weintraub
29ad306e47 Make MaxConnLifetimeJitter setting actually jitter 2022-12-20 20:18:26 -06:00
Jack Christensen
f42af35884 Add support for single dimensional arrays
https://github.com/jackc/pgx/issues/1442
2022-12-20 20:12:12 -06:00
Yevgeny Pats
11fa083a0d fix: Improve errors in batch modes 2022-12-20 19:33:46 -06:00
Mark Chambers
1ce3e0384a pgtype Int fix minimum error message.
Previously on the minimum condition the error would be:

  "is greater than maximum"

Also add encoding/json import into the .erb template as the import was
missing after running rake generate.
2022-12-17 09:10:02 -06:00
Alejandro Do Nascimento Mora
e58381ac94 Enable some CopyFrom tests for cockroachDB
CockroachDB added support for COPY in version 20.2.

https://www.cockroachlabs.com/docs/v20.2/copy-from

There are some limitations on the implementation, that's why not all the
existing tests were enabled.
2022-12-12 18:22:32 -06:00
Jack Christensen
279c3c0a20 Fix: json values work with sql.Scanner
https://github.com/jackc/pgx/issues/1418
2022-12-06 19:44:55 -06:00
Jack Christensen
17f8f7af63 Release v5.2.0 2022-12-05 20:41:55 -06:00
Jack Christensen
f0a73424b1 Fix: Scan uint and uint64 ScanNumeric
fixes https://github.com/jackc/pgx/issues/1414
2022-12-05 20:34:46 -06:00
Vitalii Solodilov
88b373f9ee Skipped multirange tests for postgres less than 14 version 2022-12-01 19:33:33 -06:00
Vitalii Solodilov
8e2de2fefa Conn.LoadType supports range and multirange types (#1393)
Closes #1393
2022-12-01 19:33:33 -06:00
Nazar Vovk
24c53259f8 Fix typo 2022-11-28 09:36:20 -06:00
ksco
8eb062f588 perf(tx): use strings.Builder to avoid the overhead of []byte -> string conversion 2022-11-25 12:39:22 -06:00
Petr Evdokimov
fbfafb3edf Optimize 'beginSQL' runtime and memory allocations 2022-11-22 09:00:12 -06:00
Vitalii Solodilov
174224fa07 The tracelog.TraceLog implements the pgx.PrepareTracer interface
Allows Tracelog to log the Prepare queiries.

Fixes #1383

Unit tests:
* added logger.Clear method to cleanup old log messages
* added logger.FilterByMsg to get only specific logs for assertions. When quieries are executed using different query exec methods prepare query can be not executed. So we can get different number of logs using different exec methods.
2022-11-19 07:43:39 -06:00
Jack Christensen
8ad1394f4c Update changelog for v5.1.1 2022-11-17 19:47:09 -06:00
Bodo Kaiser
56633b3d51 removed unnecessary name argument from DeallocateAll 2022-11-17 19:41:18 -06:00
Jack Christensen
ba4bbf92af Fix query sanitizer
...when query text has contains Unicode replacement character.
uft8.RuneError actually is a valid character.
2022-11-14 18:32:26 -06:00
Jack Christensen
b4d2eae777 Update changelog 2022-11-12 11:02:55 -06:00
Bodo Kaiser
3520c2ea43 updated DeallocateAll to also reset client-side statement and description cache 2022-11-12 10:57:31 -06:00
Bodo Kaiser
c94c47f584 added DeallocateAll to pgx.Conn to clear prepared statement cache 2022-11-12 10:57:31 -06:00
Jack Christensen
8678ed560f Update puddle to v2.1.2 2022-11-12 10:42:08 -06:00
Jack Christensen
05924a9d6b Update CONTRIBUTING.md 2022-11-12 10:42:02 -06:00
Jack Christensen
2e9e2865f9 Added more docs and tests 2022-11-12 10:13:20 -06:00
Pavlo Golub
14be51536b implement RowToStructByName and RowToAddrOfStructByName 2022-11-12 09:39:54 -06:00
Jack Christensen
1376a2c0ed Update Go doc badge 2022-11-12 09:23:07 -06:00
Jack Christensen
932f676cfd Remove PG 10 from CI and add PG 15 to CI
PG 10 is now out of support.
2022-11-12 09:20:48 -06:00
Jack Christensen
5b6fb75669 Conn.LoadType supports domain types
If the underlying type is registered then use the same Codec.

fixes https://github.com/jackc/pgx/issues/1373
2022-11-12 08:11:37 -06:00
Jack Christensen
b265fedd75 Correct error message 2022-11-12 07:06:54 -06:00
Jack Christensen
871f14e43b Fix text decoding of dates with 5 digit years 2022-11-12 07:01:11 -06:00
Jack Christensen
071d1c9467 DateCodec.DecodeValue can return pgtype.InfinityModifier
Previously, an infinite value was returned as a string. Other types
that can be infinite such as Timestamptz return a
pgtype.InfinityModifier. This change brings them into alignment.
2022-11-12 06:27:41 -06:00
Jack Christensen
29109487ec DateCodec.DecodeDatabaseSQLValue returns time.Time when possible
Previously it returned a string. However, this was an unintended
behavior change from pgx v4.

89f69aaea9 (commitcomment-89173737)
2022-11-12 06:21:48 -06:00
Jack Christensen
daf570c752 Date text encoding pads year with 0 for at least 4 digits
e.g. 0007-01-02 instead of 7-01-02

89f69aaea9 (commitcomment-89173737)
2022-11-12 06:14:04 -06:00
Jack Christensen
a86acf61e0 Fix encode ErrorResponse
fixes https://github.com/jackc/pgx/issues/1371
2022-11-11 18:20:16 -06:00
Jack Christensen
a968ce3437 Add typed nil behavior change note to changelog
https://github.com/jackc/pgx/issues/1367
2022-11-03 21:24:44 -05:00
Jack Christensen
39676004de Fix logger string truncation with UTF-8
fixes #1365
2022-11-03 20:50:30 -05:00
Jack Christensen
6f90866f58 Expose underlying pgconn GetSSLPassword support to pgx
pgconn supports a GetSSLPassword function but the pgx connection
functions did not expose a means of using it.

See PR #1233 for more context.
2022-11-03 20:09:52 -05:00
Jack Christensen
d8c04249d1 Give up on that test in CI
The test works if I use upterm and run manually on the CI server...
TLS is tested in the TLS with client certificate tests anyway.
2022-10-31 22:37:05 -05:00
Jack Christensen
7fd064ab80 Disable upterm 2022-10-31 22:28:50 -05:00
Jack Christensen
0013f6c7ca Enable upterm 2022-10-31 22:20:58 -05:00
Jack Christensen
95498282bb more ci 2022-10-31 22:10:37 -05:00
Jack Christensen
6e77e0a09d Fight with CI some more 2022-10-31 22:05:35 -05:00
Jack Christensen
1f0fd66623 Go back to Ubuntu 20.04 on CI
Should fix some strange openssl / TLS issues.
2022-10-31 21:57:38 -05:00
Jack Christensen
45aeaed20a Remove unused pg-version matrix 2022-10-31 21:28:58 -05:00
Jack Christensen
a2da398dff Partial CI fixes 2022-10-31 21:24:57 -05:00
Jack Christensen
be419e25b4 Use des3 for certs in testing / CI 2022-10-31 19:42:22 -05:00
Jack Christensen
dd07e24a6c sudo the CI 2022-10-31 19:34:59 -05:00
Jack Christensen
0920c79b02 Test SCRAM, sslmode=verify-full and client cert auth on CI 2022-10-31 19:30:22 -05:00
Jack Christensen
268af3903c Upgrade CI to ubuntu-22.04 2022-10-31 19:10:49 -05:00
Jack Christensen
4d711aaa73 Remove v5-dev branch from CI 2022-10-31 19:10:21 -05:00
Jack Christensen
dc85718658 Remove unused code from CI script 2022-10-29 19:02:04 -05:00
Jack Christensen
6b52e0b5e0 Contributing guide now includes instructions to test client ssl auth 2022-10-29 19:00:29 -05:00
Jack Christensen
9eaeb51e30 Fix CI PostgreSQL user permissions 2022-10-29 17:55:13 -05:00
Jack Christensen
8b2ac8c18f Fix unix domain socket tests on CI 2022-10-29 17:45:13 -05:00
Jack Christensen
05e9234c2e Upgrade setup-go and checkout actions to v3 2022-10-29 17:29:10 -05:00
Jack Christensen
97d1012f42 Use testsetup/postgresql_setup.sql in CI 2022-10-29 17:27:39 -05:00
Jack Christensen
6bedfa7def Use testsetup/pg_hba.conf in CI 2022-10-29 17:23:13 -05:00
Jack Christensen
55b5067ddd Improve testing / contributing instructions
* Extract CONTRIBUTING.md
* Add instructions and scripts to setup standalone PostgreSQL server
  that tests the various connection and authentication types.
2022-10-29 17:14:09 -05:00
Jack Christensen
1ec3816a20 pgconn and pgproto use same environment variable for tests as pgx 2022-10-29 13:23:25 -05:00
Jack Christensen
c9c166b8b2 Fix TestConnCopyFromDataWriteAfterErrorAndReturn always being skipped 2022-10-29 13:17:52 -05:00
Jack Christensen
9a207178f6 Fix TestConnCheckConn always being skipped 2022-10-29 13:16:05 -05:00
Jack Christensen
3feeddd9f1 Fix tests when PGUSER is different than OS user 2022-10-29 13:12:03 -05:00
Jack Christensen
72c89108ad Fix tests when PGPORT set to non-default value 2022-10-29 13:06:53 -05:00
Jack Christensen
c130b2d74a Update CopyFrom documentation to be clearer
Regarding binary requirement and enums in particular.

https://github.com/jackc/pgx/issues/1338
2022-10-29 09:48:45 -05:00
Jack Christensen
7d3b9c1e44 QueryRewriter.RewriteQuery now returns an error
https://github.com/jackc/pgx/issues/1186#issuecomment-1288207250
2022-10-29 09:33:13 -05:00
Jack Christensen
6515e183ff Update doc example for pgx.ForEachRow
fixes https://github.com/jackc/pgx/issues/1360
2022-10-29 08:59:57 -05:00
Jack Christensen
e35041372d Remove mistakenly included replace directive in go.mod 2022-10-29 08:56:49 -05:00
Jack Christensen
6fabd8f5b1 Fix encoding uint64 larger than math.MaxInt64 into numeric
fixes https://github.com/jackc/pgx/issues/1357
2022-10-29 08:47:12 -05:00
Jack Christensen
c00fb5d2a1 Upgrade to puddle v2.0.1 2022-10-29 08:09:54 -05:00
Jack Hopner
55d5d036c0 add pgx xray tracer to readme 2022-10-27 19:42:36 -05:00
Jack Christensen
987de3874e Update changelog 2022-10-24 19:11:50 -05:00
Jack Christensen
3ad9995dfe Exec checks if tx is closed
https://github.com/jackc/pgx/discussions/1350
2022-10-24 18:23:26 -05:00
Baptiste Fontaine
3e825ec898 Fix RowToStructByPos on structs with multiple anonymous sub-structs
Fixes #1343
2022-10-22 10:02:32 -05:00
Jeff Koenig
ba100785cc fix: bump text package for CVE-2022-32149
https://security.snyk.io/vuln/SNYK-GOLANG-GOLANGORGXTEXTLANGUAGE-3043869
2022-10-22 09:07:24 -05:00
Jack Christensen
48b4807b33 Fix some reflect Kind checks to first check for nil
fixes https://github.com/jackc/pgx/issues/1335
2022-10-22 08:57:49 -05:00
Jack Christensen
6e40968cfc CollectOneRow prefers PostgreSQL error over pgx.ErrorNoRows
fixes https://github.com/jackc/pgx/issues/1334
2022-10-22 08:44:06 -05:00
Jack Christensen
11e5f68ff6 Update changelog for v5.0.3 2022-10-14 19:11:11 -05:00
Baptiste Fontaine
7a9e70d1e0 Fix some bad rows.Err() handlings in tests 2022-10-14 19:02:44 -05:00
Jack Christensen
f2e7c8144d reflect.TypeOf can return nil. Check before using
https://github.com/jackc/pgx/issues/1331
2022-10-12 20:03:51 -05:00
Jack Christensen
aff180b192 Remove dead code 2022-10-12 19:58:06 -05:00
Jack Christensen
a581124dea Encode with driver.Valuer after trying TryWrapEncodePlanFuncs
However, all builtin TryWrapEncodePlanFuncs check for driver.Valuer and
skip themselves if it is found.
2022-10-12 19:52:57 -05:00
Jack Christensen
c4407fb36e Prevent infinite loop for driver.Valuer / Codec edge case
A `driver.Valuer()` results in a `string` that the `Codec` for the
PostgreSQL type doesn't know how to handle. That string is scanned into
whatever the default type for that `Codec` is. That new value is
encoded. If the new value is the same type as the original type than an
infinite loop occured. Check that the types are different.

https://github.com/jackc/pgx/issues/1331
2022-10-12 19:46:15 -05:00
Jack Christensen
094ad9c9d8 Update changelog for v5.0.2 2022-10-08 18:58:17 -05:00
Jack Christensen
af0b896290 Allow scanning null even if PG and Go types are incompatible
refs https://github.com/jackc/pgx/issues/1326
2022-10-08 09:10:43 -05:00
Jack Christensen
5655f9d593 Fix scan to pointer to pointer to renamed type
refs https://github.com/jackc/pgx/issues/1326
2022-10-08 08:10:40 -05:00
Jack Christensen
f803c790d0 Fix docs for listen / notify
https://github.com/jackc/pgx/issues/1318
2022-10-01 12:58:49 -05:00
Jack Christensen
222e3b37bc Prefer driver.Value over wrap plans when encoding
This is tricky due to driver.Valuer returning any. For example, we can
plan for fmt.Stringer because it always returns a string.

Because of this driver.Valuer was always handled as the last option. But
with pgx v5 now having the ability to find underlying types like a
string and supporting fmt.Stringer it meant that driver.Valuer was
often not getting called because something else was found first.

This change tries driver.Valuer immediately after the initial PlanScan
for the Codec. So a type that directly implements a pgx interface should
be used, but driver.Valuer will be prefered before all the attempts to
handle renamed types, pointer deferencing, etc.

fixes https://github.com/jackc/pgx/issues/1319
fixes https://github.com/jackc/pgx/issues/1311
2022-10-01 12:20:23 -05:00
Jack Christensen
89f69aaea9 Date text encoding includes leading zero for month and day
e.g. 2000-01-01 instead of 2000-1-1. PostgreSQL accepted it without
zeroes but our text decoder didn't. This caused a problem when we needed
to take a value and encode to text so something else could parse it as
if it had come from the PostgreSQL server in text format. e.g.
database/sql compatibility.
2022-10-01 10:41:40 -05:00
Jack Christensen
63ae730fe8 Upgrade CockroachDB on CI 2022-10-01 10:11:11 -05:00
Jack Christensen
305c4ddbc7 Move and rename test 2022-10-01 10:09:57 -05:00
Jack Christensen
fb83fb0cc3 Skip TestCopyFrom on CockroachDB 2022-10-01 10:08:03 -05:00
Tommy Reilly
c48dd7e1f8 Add a test case demonstrating I/O race with CopyFrom 2022-10-01 10:07:38 -05:00
Jack Christensen
cd8b29b0fe Fix flickering on TestConnectTimeoutStuckOnTLSHandshake
Ensure that even if the outer function finishes the goroutine can still
send an error.
2022-09-24 12:54:59 -05:00
Jack Christensen
0aa681f3a3 Update changelog for v5.0.1 2022-09-24 11:15:31 -05:00
Jack Christensen
335c8621ff Fix sqlScannerWrapper NULL handling
https://github.com/jackc/pgx/issues/1312
2022-09-24 10:30:12 -05:00
Jack Christensen
ac9d4f4d96 Encode text for Lseg includes [ and ]
https://github.com/jackc/pgtype/issues/187
2022-09-24 10:30:12 -05:00
yogipristiawan
72e4b88e56 feat: add marshalJSON for float8 type 2022-09-24 10:00:40 -05:00
Peter Feichtinger
639fb28846 Fix typo 2022-09-24 09:26:52 -05:00
Jack Christensen
d7c7ddc594 Fix Windows 386 atomic usage
https://github.com/jackc/pgx/issues/1307
2022-09-24 09:23:36 -05:00
Jack Christensen
4fc4f9a603 Remove spurious .travis.yml 2022-09-17 10:36:36 -05:00
Jack Christensen
23a59d68fc Merge branch 'v5-dev' 2022-09-17 10:35:32 -05:00
Jack Christensen
5a055434f2 Upgrade dependencies 2022-09-17 10:24:19 -05:00
Jack Christensen
1a314bda3b pgconn.Timeout() no longer considers context.Canceled as a timeout error.
https://github.com/jackc/pgconn/issues/81
2022-09-17 10:18:06 -05:00
Jack Christensen
4f1a8084f1 Various doc and changelog tweaks 2022-09-17 09:03:48 -05:00
Jack Christensen
a05fb80b8a Update docs and changelog for renamed pgxpool.NewWithConfig
fixes https://github.com/jackc/pgx/issues/1306
2022-09-16 18:16:36 -05:00
Jack Christensen
90b69c0ee0 Fix atomic alignment on 32-bit platforms
refs #1288
2022-09-08 20:43:53 -05:00
Jack Christensen
ee2622a8e6 RowToStructByPos supports embedded structs
https://github.com/jackc/pgx/issues/1273#issuecomment-1236966785
2022-09-06 18:32:10 -05:00
Jack Christensen
d42b399be3 Update changelog 2022-09-03 13:42:36 -05:00
Jack Christensen
f015ced1bf Use puddle v2.0.0-beta.2 for Acquire in background after cancel 2022-09-03 13:20:19 -05:00
Jack Christensen
782133158f Test sending CopyData before CopyFrom responds with error 2022-09-03 09:31:41 -05:00
Tom Möller
dfce986bb5 Fix panic when logging batch error 2022-09-03 09:02:23 -05:00
Jack Christensen
f8d088cfb6 Fix JSON scan not completely overwriting destination
See https://github.com/jackc/pgtype/pull/185 for original report in
pgx v4 / pgtype.
2022-09-02 18:37:02 -05:00
Jack Christensen
f5cdf0d383 Update changelog 2022-08-27 18:18:41 -05:00
Jack Christensen
72fe594942 Upgrade to puddle v1.3.0 2022-08-27 18:18:34 -05:00
Jack Christensen
bce26b85d1 Fix atomic alignment on 32-bit platforms
refs #1288
2022-08-27 09:23:17 -05:00
Jack Christensen
bb6c997102 Add NewCommandTag
Useful for mocking and testing.

https://github.com/jackc/pgx/issues/1273#issuecomment-1224154013
2022-08-23 19:39:15 -05:00
Jack Christensen
fe3a4f3150 Standardize casing for NULL in error messages 2022-08-22 21:01:18 -05:00
Jack Christensen
2e73d1e8ee Improve error message when failing to scan a NULL::json 2022-08-22 20:56:36 -05:00
Jack Christensen
0d5d8e0137 Fallback to other format when encoding query arguments
The preferred format may not be possible for certain arguments. For
example, the preferred format for numeric is binary. But if
shopspring/decimal is being used without jackc/pgx-shopspring-decimal
then it will use the database/sql/driver.Valuer interface. This will
return a string. That string should be sent in the text format.

A similar case occurs when encoding a []string into a non-text
PostgreSQL array such as uuid[].
2022-08-22 20:26:38 -05:00
Jack Christensen
ae65a8007b Use higher pgconn.FieldDescription with string Name
Instead of using pgproto3.FieldDescription through pgconn and pgx. This
lets the lowest level pgproto3 still be as memory efficient as possible.

https://github.com/jackc/pgx/pull/1281
2022-08-20 10:04:18 -05:00
Jack Christensen
dbee461dc9 Update previous pgconn merge for v5 2022-08-19 17:42:04 -05:00
Jack Christensen
ef5655c563 Merge remote-tracking branch 'pgconn/master' into v5-dev 2022-08-19 17:36:29 -05:00
Stas Kelvich
15f8e6323e Fix tests that check tls.Config.ServerName -- with SNI this field
is filled, unless SNI is delibaretely disabled. Also, do not set
SNI when host is an IP address as per RFC 6066.
2022-08-19 17:35:33 -05:00
Stas Kelvich
e3406d95f9 Add test coverage for client SNI 2022-08-19 17:35:33 -05:00
Stas Kelvich
067771b2e6 Set SNI for SSL connections
This allows an SNI-aware proxy to route connections. Patch adds a new
connection option (`sslsni`) to opt out of the SNI, to have the same
behavior as `libpq` does. See more in `sslsni` sections at
<https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS>.
2022-08-19 17:35:33 -05:00
Jack Christensen
8eae4a2a3e Merge remote-tracking branch 'pgconn/master' into v5-dev 2022-08-13 10:19:49 -05:00
Nathan Giardina
faabb0696f Fix for timeout when a single node has timed out, created a new context to allow for each db node to timeout individually 2022-08-13 10:18:55 -05:00
Jack Christensen
1d748d9bbf Failsafe timeout for background pool connections
Do not override existing connect timeout.
2022-08-13 09:50:37 -05:00
Jack Christensen
c842802d65 Failsafe timeout for background pool connections
Do not override existing connect timeout.
2022-08-13 09:49:06 -05:00
Jack Christensen
7c6a31f9d2 CopyFrom parses strings to encode into binary format
https://github.com/jackc/pgx/issues/1277
https://github.com/jackc/pgx/issues/1267
2022-08-13 09:30:29 -05:00
Jack Christensen
02d9a5acd8 Fix naming of some tests 2022-08-13 08:41:06 -05:00
Jack Christensen
8256ab147f Add build tag to skip default PG type registration
https://github.com/jackc/pgx/issues/1273#issuecomment-1207338136
2022-08-13 08:09:44 -05:00
Jack Christensen
906f709e0c Fix typo in Windows code
https://github.com/jackc/pgx/issues/1274
2022-08-11 20:59:37 -05:00
Jack Christensen
33b782a96d Potential fix for Windows
https://github.com/jackc/pgx/issues/1274
2022-08-11 20:55:50 -05:00
Jack Christensen
1453cd4b97 Update v5 status 2022-08-06 07:11:11 -05:00
Jack Christensen
6871a0c4a6 Add v5 testing note to readme 2022-08-06 07:10:37 -05:00
Jack Christensen
1819959d07 Merge branch 'master' into v5-dev 2022-08-06 06:41:33 -05:00
Jack Christensen
8f0c9557e4 Merge remote-tracking branch 'pgconn/master' into v5-dev 2022-08-06 06:33:10 -05:00
Jack Christensen
1f64122c42 Tweak changelog 2022-08-06 06:27:32 -05:00
Jack Christensen
5768a0c0bd Update changelog 2022-08-06 06:24:41 -05:00
Jack Christensen
7ce634d62b Ensure there is a timeout for background pool connections 2022-08-06 06:22:17 -05:00
Jack Christensen
f3e04b28cc Go 1.19 go fmt 2022-08-06 06:20:50 -05:00
Jack Christensen
7ad36f386d Upgrade dependencies 2022-08-06 06:11:07 -05:00
Jack Christensen
4c048d40d8 Update changelog 2022-08-06 06:07:40 -05:00
Jack Christensen
5192d9acc1 Upgrade 3rd party dependencies 2022-08-06 06:00:03 -05:00
Jack Christensen
0a539a9d92 Upgrade pgproto3 2022-08-06 05:58:55 -05:00
Peter Feichtinger
3cb99532f5 pgxpool: Make BeginTx success case clearer 2022-08-04 20:22:57 -05:00
Jack Christensen
0eda0109ca Add Pool.Reset() 2022-07-30 12:22:29 -05:00
Jack Christensen
83670d675d Upgrade golang.org/x/crypto 2022-07-30 12:17:00 -05:00
Jack Christensen
957671a6ec Use puddle v2 2022-07-30 12:16:16 -05:00
Jack Christensen
033fc6f62a Rename pgxpool.NewConfig to NewWithConfig
https://github.com/jackc/pgx/issues/1264
2022-07-30 09:16:42 -05:00
Jack Christensen
c3258b7f52 Fix scan pointer to pointer to nil slice
https://github.com/jackc/pgx/issues/1263
2022-07-30 09:10:50 -05:00
James Hartig
91c9e841e1 Ignore cancellation in puddle constructor
Fixes #1259
2022-07-30 07:54:16 -05:00
Jack Christensen
88079de700
Update issue templates 2022-07-30 07:46:10 -05:00
Jack Christensen
7f382f5190 Better fuzz testing and fix several bugs it found
Fix infinite loop in AuthenticationSASL.Decode
Fix panic in CommandComplete.Decode
Fix panic in DataRow.Decode
Fix panic in NotificationResponse.Decode
2022-07-23 16:13:06 -05:00
Jack Christensen
9d0f27bc4b Initial fuzz testing and fix
Initial fuzz testing of pgproto3 found a panic
2022-07-23 15:22:28 -05:00
Jack Christensen
2da0a11c52 Skip some examples on CockroachDB 2022-07-23 10:52:35 -05:00
Jack Christensen
cb5ddcd6b6
Update issue templates 2022-07-23 10:43:26 -05:00
Jack Christensen
ce378b4d9c Skip example on Cockroach DB 2022-07-23 10:21:01 -05:00
Jack Christensen
5cee04a026 Add child records docs and examples 2022-07-23 10:11:13 -05:00
Jack Christensen
4739f79fca More doc tweaks 2022-07-23 09:42:46 -05:00
Jack Christensen
3595561d9a More doc improvements 2022-07-23 09:29:25 -05:00
Jack Christensen
e487ab0886 Docs should emphasize CollectRows and ForEachRow 2022-07-23 09:04:03 -05:00
Jack Christensen
83780b85b5 Remove pgx logging code moved to tracelog 2022-07-23 08:54:59 -05:00
Jack Christensen
68b7e12df2 Add examples 2022-07-23 08:52:01 -05:00
Jack Christensen
9a61fc250f Recommend CollectRows in ConnQuery docs 2022-07-23 08:31:37 -05:00
Jack Christensen
4087119005 Add Conn.Query example 2022-07-23 08:24:44 -05:00
Jack Christensen
178a84261f Improve Query docs 2022-07-23 07:53:02 -05:00
Jack Christensen
cb48716c67 Update to new package path 2022-07-23 07:31:14 -05:00
Jack Christensen
d433545662 Remove obsolete doc 2022-07-23 07:06:22 -05:00
Jack Christensen
f07ad22f14 Update PgBouncer docs 2022-07-23 07:04:32 -05:00
Jack Christensen
7c81972938 Update line wrapping in docs 2022-07-23 07:04:23 -05:00
Jack Christensen
fe0fb3b24d Clean up docs for new ParseConfigOptions feature 2022-07-20 06:28:08 -05:00
Jack Christensen
69b99209fb Run go fmt 2022-07-20 06:06:54 -05:00
yun.xu
cdd2cc4124 EC-2198 change for sslpassword 2022-07-20 06:05:06 -05:00
Eric McCormack
7402796e02 Delete pgconn.iml 2022-07-20 06:05:06 -05:00
Eric McCormack
c56b38c1f6 SSL password - changes based on community feedback 2022-07-20 06:05:06 -05:00
Eric McCormack
32ec44f726 Add support for SslPassword 2022-07-20 06:05:06 -05:00
Jack Christensen
a5b4f888c2 Fix flickering test on CI
Ensure the conn reads everything expected before closing.
2022-07-16 18:16:19 -05:00
Jack Christensen
b6f5cbd15e Add Conn to Rows interface
https://github.com/jackc/pgx/issues/1191
2022-07-16 17:56:24 -05:00
Jack Christensen
29254180ca Add callback functions to queued queries
Improve batch query ergonomics by allowing the code to handle the
results of a query to be right next to the query.
2022-07-16 17:46:47 -05:00
Jack Christensen
78875bb95a Add tracing support
Replaces existing logging support. Package tracelog provides adapter for
old style logging.

https://github.com/jackc/pgx/issues/1061
2022-07-16 12:27:10 -05:00
Jack Christensen
9201cc0341 ConnectConfig copies config 2022-07-16 08:58:43 -05:00
Jack Christensen
759e47dba3 Merge branch 'master' into v5-dev 2022-07-12 07:26:00 -05:00
Jack Christensen
d5807f01ed Restore test from v4 2022-07-12 06:57:56 -05:00
Jack Christensen
93c79d7d41 Merge remote-tracking branch 'pgtype/master' into v5-dev 2022-07-12 06:52:59 -05:00
Jack Christensen
0f7b95c3a4 Merge remote-tracking branch 'pgconn/master' into v5-dev 2022-07-12 06:45:54 -05:00
Jack Christensen
3dc9d17757 Document new ResultReader.Values behavior 2022-07-11 21:17:45 -05:00
Jack Christensen
f0cd9cb867 Update CommandTag comment 2022-07-11 21:09:55 -05:00
Jack Christensen
aaacdbf3ea Use string internally for CommandTag 2022-07-11 21:09:03 -05:00
Jack Christensen
786de2bda8 Use correct cache 2022-07-11 20:42:55 -05:00
Jack Christensen
224393188d Fix InetCodec.DecodeValue 2022-07-11 08:07:23 -05:00
Jack Christensen
a059d1099f pgxpool pools always connect lazily
Rename constructor functions now that they don't actually connect.
2022-07-10 14:58:30 -05:00
Jack Christensen
ca41a6a222 Update docs 2022-07-10 14:32:08 -05:00
Jack Christensen
e7eb8a3250 Use netip package for representing inet and cidr types 2022-07-10 14:31:55 -05:00
Jack Christensen
7974a102fc Improve Scan error messages 2022-07-09 21:47:39 -05:00
Jack Christensen
b662ab6767 Better encode error message 2022-07-09 21:40:44 -05:00
Jack Christensen
731daea586 Skip test on CockroachDB 2022-07-09 21:08:15 -05:00
Jack Christensen
80a529fcb7 Test LoadType disambiguate name by schema 2022-07-09 17:48:46 -05:00
Jack Christensen
31ec18cc65 Replace Begin and BeginTx methods with functions 2022-07-09 17:25:55 -05:00
Jack Christensen
62f0347586 Add CollectOneRow 2022-07-09 16:59:29 -05:00
Jack Christensen
90c2dc6f68 Rename ForEachScannedRow to ForEachRow 2022-07-09 16:47:28 -05:00
Jack Christensen
da192291f7 Add CollectRows and RowTo* functions
Collect functionality was originally developed in pgxutil
2022-07-09 16:39:42 -05:00
Jack Christensen
3dafb5d4ee Skip test with non-standard CRDB behavior 2022-07-09 10:21:17 -05:00
Jack Christensen
c31b89a3f2 Delay handling invalidated statements when in transaction 2022-07-09 10:20:54 -05:00
Jack Christensen
e7aa76ccf9 SendBatch now uses pipeline mode to prepare and describe statements
Previously, a batch with 10 unique parameterized statements executed
100 times would entail 11 network round trips. 1 for each prepare /
describe and 1 for executing them all. Now pipeline mode is used to
prepare / describe all statements in a single network round trip. So it
would only take 2 round trips.
2022-07-09 09:32:36 -05:00
Jack Christensen
ba58e3d5d2 Fix pipeline prepare query without row results 2022-07-09 08:32:12 -05:00
Jack Christensen
76946fb5a3 Replace QueryFunc with ForEachScannedRow 2022-07-07 20:29:04 -05:00
Jack Christensen
a86f4f3db9 Add deallocate to pipeline mode 2022-07-07 19:32:01 -05:00
Jack Christensen
1168b375e4 Expose pgx functionality for manual integration with pgconn
This is primarily useful for using pipeline mode.
2022-07-04 13:29:49 -05:00
Jack Christensen
f7433cc5f2 Fix typo 2022-07-04 06:20:15 -05:00
Jack Christensen
f635b43a6b Use bigint in tests for CockroachDB compatibility
CRDB automatically changes int4 to int8.
2022-07-02 22:00:42 -05:00
Jack Christensen
a97ba0c34a Remove ReceiveResults
Pipeline mode should be used instead.
2022-07-02 21:50:07 -05:00
Jack Christensen
ae2881a23c Add pipeline mode to pgconn 2022-07-02 21:48:16 -05:00
sergey.bashilov
a18df2374a add ignore not preferred err flag in connect func 2022-07-02 07:04:50 -05:00
sergey.bashilov
cdc240d920 rename error 2022-07-02 07:04:50 -05:00
sergey.bashilov
618a12a094 remove HasPreferStandbyTargetSessionAttr, rename error to indicate server is not standby 2022-07-02 07:04:50 -05:00
sergey.bashilov
1b6543f29c fix typos 2022-07-02 07:04:50 -05:00
sergey.bashilov
25935a39b6 add prefer-standby target_session_attrs 2022-07-02 07:04:50 -05:00
Jack Christensen
ed3e9f1dd4 Check for more specific error 2022-07-01 15:33:12 -05:00
Jack Christensen
585022440b Update changelog 2022-06-25 18:11:39 -05:00
Jack Christensen
03da9fcec6 Check conn liveness before using when idle for more than 1 second
Implemented in pgxpool.Pool and database/sql.

https://github.com/jackc/pgx/issues/672
2022-06-25 17:58:53 -05:00
Jack Christensen
26eda0f86d Check for ENV conn string and skip test if missing 2022-06-25 16:55:09 -05:00
Jack Christensen
9afd320b9e Fix flickering test in CI
While this test always worked on my machine, it flickered in CI. And to
be fair the test can't guarantee the condition it is testing. Work
around this by trying many times before admitting failure.
2022-06-25 16:05:20 -05:00
Jack Christensen
72b1dcff2f Add pgconn.CheckConn 2022-06-25 15:55:09 -05:00
Jack Christensen
b068d53753 Fix race in test
Goroutine should have it's own err var instead of sharing.
2022-06-25 14:07:48 -05:00
Jack Christensen
125ee9670e Test TLS connection with pg_stat_ssl
Because of the nbconn wrapper it is no longer possible to check if the
conn is a *tls.Conn directly. This is actually a more reliable test
anyway.
2022-06-25 13:43:16 -05:00
Jack Christensen
82ca09e645 Numeric infinity only supported on PG 14+
Move to PG 14+ specific test
2022-06-25 13:33:09 -05:00
Jack Christensen
811d855a35 Add non-blocking IO
This eliminates an edge case that can cause a deadlock and is a
prerequisite to cheaply testing connection liveness and to recoving a
connection after a timeout.

https://github.com/jackc/pgconn/issues/27

Squashed commit of the following:

commit 0d7b0dddea1575e9fd72592665badb8cbdd581cc
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Jun 25 13:15:05 2022 -0500

    Add test for non-blocking IO preventing deadlock

commit 79d68d23d38bb03ddb8bf13cb45792430eaf959a
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Jun 18 18:23:24 2022 -0500

    Release CopyFrom buf when done

commit 95a43139c7b0b7557898c4480e5b3e42417ee3c0
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Jun 18 18:22:32 2022 -0500

    Avoid allocations with non-blocking write

commit 6b63ceee076794bc4380495a55dd414dbbd08a43
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Jun 18 17:46:49 2022 -0500

    Simplify iobufpool usage

commit 60ecdda02e5a24c894df4f58d31c485b90de5d5b
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Jun 18 11:51:59 2022 -0500

    Add true non-blocking IO

commit 7dd26a34a182d4aacaed3bf8c09f9cc48a7b6156
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Jun 4 20:28:23 2022 -0500

    Fix block when reading more than buffered

commit afa702213f1b6d24c976406448301b2be53b7f70
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Jun 4 20:10:23 2022 -0500

    More TLS support

commit 51655bf8f40321d5f89bc3c02dd55fba0ac6aa49
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Jun 4 17:46:00 2022 -0500

    Steps toward TLS

commit 2b80beb1ed75f0f58db8188b87753dbc26b62098
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Jun 4 13:06:29 2022 -0500

    Litle more TLS support

commit 765b2c6e7b034ff6ffab3974579fd6ee7add593b
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Jun 4 12:29:30 2022 -0500

    Add testing of TLS

commit 5b64432afbed9224f9512cc46624c88e7ebec625
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Jun 4 09:48:19 2022 -0500

    Introduce testVariants in prep for TLS

commit ecebd7b103d4a9125c61e83f3651b950658b0b84
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Jun 4 09:32:14 2022 -0500

    Handle and test read of previously buffered data

commit 09c64d8cf3ca5be1a31bef46bf78fa5cb9fae831
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Jun 4 09:04:48 2022 -0500

    Rename nbbconn to nbconn

commit 73398bc67a7b7bd1aa044fb9b0546f4198ef92d2
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Jun 4 08:59:53 2022 -0500

    Remove backup files

commit f1df39a29d23ae4e5175b92c69697f2bf9b4e112
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Jun 4 08:58:05 2022 -0500

    Initial passing tests

commit ea3cdab234343fc9761d9b7966c5346179cd1b01
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Jun 4 08:38:57 2022 -0500

    Fix connect timeout

commit ca22396789d120ff556f9704f4470268fbc8c0d8
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Thu Jun 2 19:32:55 2022 -0500

    wip

commit 2e7b46d5d7454daf0859dd48f8a8e190995164c5
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Mon May 30 08:32:43 2022 -0500

    Update comments

commit 7d04dc5caa80cb147929b6f65bab60a27baaff89
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat May 28 19:43:23 2022 -0500

    Fix broken test

commit bf1edc77d70465b4097a59c08c581033d2033ac6
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat May 28 19:40:33 2022 -0500

    fixed putting wrong size bufs

commit 1f7a855b2e4d1e14f85ac5f5683e2b93db0a4bd9
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat May 28 18:13:47 2022 -0500

    initial not quite working non-blocking conn
2022-06-25 13:15:31 -05:00
Jack Christensen
12c49ee213 shopspring-numeric extension does not panic on NaN
https://github.com/jackc/pgtype/issues/169
2022-06-23 21:01:56 -05:00
Gabor Szabad
396195466c Add logger func wrapper 2022-06-23 20:15:20 -05:00
Jack Christensen
c0a4d1b9ce Add a few tests 2022-06-20 20:43:56 -05:00
Jack Christensen
6dd004c8b8 Backport numeric to string from v5
refs https://github.com/jackc/pgx/issues/1230
2022-06-20 20:40:25 -05:00
James Hartig
a814153aeb pgxpool: health check should avoid going below minConns 2022-06-07 18:38:03 -05:00
William Storey
4db2a33562 Do not convert IPv4-mapped IPv6 addresses to IPv4
These addresses behave differently in some cases, so assume if we're
given them, we keep them as they are.
2022-06-07 18:34:04 -05:00
William Storey
1e485c1c3b Do not send IPv4 networks as IPv4-mapped IPv6
Previously if we provided a parameter that was an array of strings such
as []string{"0.0.0.0/8"}, we would encode this when sending to Postgres
as ::ffff:0.0.0.0/8. From what I can tell, this is because when parsing
the IP/network using net functions, we get a byte array that is 16 bytes
long, even if it is an IPv4 network. In Inet.EncodeBinary(), we look at
the length of the IP to determine what family the input is, and saw it
as IPv6 because of this.

We now always normalize IPv4 addresses using To4().
2022-06-07 18:34:04 -05:00
William Storey
6fc738ea05 Use correct test description 2022-06-07 18:34:04 -05:00
James Hartig
2afddedda8 protect against panic from PlanScan when interface{}(nil) is passed 2022-06-02 20:21:53 -05:00
Jack Christensen
37c3f157bc Add Hijack from v5 2022-06-02 20:04:08 -05:00
Jack Christensen
e12ba1b6b9 Extract iobufpool 2022-05-28 10:59:54 -05:00
Jack Christensen
7d5993d104 Add BenchmarkConnectClose 2022-05-28 06:32:39 -05:00
James Hartig
824d8ad40d support *sql.Scanner for null handling
Fixes jackc/pgx#1211
2022-05-28 06:30:12 -05:00
Oliver Tan
7ddbd74d5e stop ignoring ErrorResponse during GSS auth 2022-05-25 06:22:18 -05:00
Jack Christensen
bfaea9e7ec Fix rare race in CopyFrom 2022-05-24 08:26:37 -05:00
Jack Christensen
55e0b4c30e Skip CockroachDB in TestTrace 2022-05-23 18:15:53 -05:00
Jack Christensen
b59cd50508 TestTrace enables tracing after connection established
This avoids locking to a specific version of the server.
2022-05-23 17:52:10 -05:00
Jack Christensen
67635f896c Fix output to include message size and add some docs 2022-05-21 17:30:47 -05:00
Jack Christensen
b74c109f61 Optimize tracing
The addition of tracing caused messages to escape to the heap. By
avoiding interfaces the messages no longer escape.
2022-05-21 17:22:58 -05:00
Jack Christensen
f2e96156a0 Add message tracing 2022-05-21 14:43:04 -05:00
Jack Christensen
5714896b10 Restructure sending messages
Use an internal buffer in pgproto3.Frontend and pgproto3.Backend instead
of directly writing to the underlying net.Conn. This will allow tracing
messages as well as simplify pipeline mode.
2022-05-21 11:06:44 -05:00
Jack Christensen
dc0ad04ff5 Fix batch logging tests 2022-05-12 19:10:02 -05:00
Stepan Rabotkin
4099b447b9 feat: add batch logging 2022-05-12 19:05:08 -05:00
Stepan Rabotkin
bfb19cd4f6 feat: add time duration to error query and copy 2022-05-12 19:05:08 -05:00
Jack Christensen
989a4835de Remove rune to text conversion
Because rune is an alias for int32 this caused some very surprising
results. e.g. inserting int32(65) into text would insert "A" instead of
"65".
2022-05-12 17:13:49 -05:00
Jack Christensen
8b9b4055f3 Release v4.16.1 2022-05-07 07:14:53 -05:00
Jack Christensen
644bd73dcc Upgrade to pgconn v1.12.1 2022-05-07 07:13:23 -05:00
Jack Christensen
831fc211bc Release v1.12.1 2022-05-07 07:11:19 -05:00
Rafi Shamim
1d398317ca Stop ignoring ErrorResponse during SCRAM auth
The server may send back an ErrorResponse during SCRAM auth, and these
messages may contain useful information that described why
authentication failed. For example, if the password was invalid.
2022-05-07 07:03:52 -05:00
Eno Compton
0135721378 Add support for Unix sockets on Windows
Fixes #1199.
2022-05-07 06:59:53 -05:00
Jack Christensen
c1495aace0 Add RowScanner interface 2022-04-30 12:49:12 -05:00
Jack Christensen
01190e5d78 Update ScanPlan.Scan documentation 2022-04-30 08:29:51 -05:00
Jack Christensen
a89a400b69 Fix documentation for Rows.RawValues and test new behavior 2022-04-30 08:27:57 -05:00
Jack Christensen
81d55568f6 Clarify v5 supported Go version plans
fixes https://github.com/jackc/pgx/issues/1197
2022-04-28 08:13:51 -05:00
Jack Christensen
e8f81bb7de Merge branch 'master' into v5-dev
Rewrite fix for https://github.com/jackc/pgx/issues/1196 for pgx v5.
2022-04-28 08:06:34 -05:00
Jack Christensen
d9622f438d Merge remote-tracking branch 'pgconn/master' into v5-dev 2022-04-28 08:01:26 -05:00
Jack Christensen
7ceeea6fe6 Fix explicitly prepared statements with describe statement cache mode
fixes https://github.com/jackc/pgx/issues/1196
2022-04-28 07:58:24 -05:00
Jack Christensen
0c6266ef30 Fix scanning null did not overwrite slice 2022-04-26 14:52:01 -05:00
sireax
84e8238fa0 Fix: setting krbspn and krbsrvname did'n work 2022-04-26 08:54:24 -05:00
Jack Christensen
7427820aba Scan binary UUID to string
https://github.com/jackc/pgx/issues/1191
2022-04-26 08:37:10 -05:00
Jack Christensen
d13bdbbd35 NamedArgs allows underscore 2022-04-25 10:16:47 -05:00
Harmen
d846dbcb75 allow string values in timestamp[tz].Set() 2022-04-25 08:43:55 -05:00
Diego Becciolini
53266f029f Hstore: fix AssignTo
Hstore.AssignTo a map of string pointers takes the address of the loop variable, thus setting all the entries to the same string pointer.

extend TestHstoreAssignToNullable

assert fix
2022-04-25 08:38:20 -05:00
Jack Christensen
c093c4af21 Update changelog 2022-04-23 18:56:38 -05:00
Jack Christensen
107196ab0c Add NamedArgs
https://github.com/jackc/pgx/issues/1186
https://github.com/jackc/pgx/issues/387
2022-04-23 18:45:38 -05:00
Jack Christensen
b72b0daa5a Add QueryRewriter interface 2022-04-23 17:26:42 -05:00
Jack Christensen
f9857b73d9 Skip multirange tests on PG < 14 2022-04-23 16:55:24 -05:00
Jack Christensen
dfb681d716 Build / rewrite / port multirange support 2022-04-23 12:50:18 -05:00
Jack Christensen
126b582f19 Make range helpers private 2022-04-23 11:10:04 -05:00
Jack Christensen
1f4b34f932 Merge branch 'master' into v5-dev 2022-04-23 11:05:24 -05:00
Jack Christensen
cb45e85954 Merge remote-tracking branch 'pgtype/master' into v5-dev 2022-04-23 11:00:07 -05:00
Jack Christensen
c323ab6662 Merge remote-tracking branch 'pgconn/master' into v5-dev 2022-04-23 10:48:44 -05:00
Jack Christensen
a92f1df1df Merge remote-tracking branch 'pgproto3/master' into v5-dev
Pull in pgproto3 changes and update for pgx v5
2022-04-23 10:43:48 -05:00
Jack Christensen
468b793282 Skip tests with unsupported types on CockroachDB 2022-04-23 10:34:53 -05:00
Jack Christensen
791176f4fe Add link to github.com/vgarvardt/pgx-google-uuid 2022-04-23 10:26:41 -05:00
Jack Christensen
c6335a30d0 Add link to github.com/vgarvardt/pgx-google-uuid 2022-04-23 10:25:08 -05:00
Jack Christensen
8c1815e02e Release v4.16.0 2022-04-21 20:15:18 -05:00
Jack Christensen
c74f3f058f Add link to https://github.com/otan/gopgkrb5 2022-04-21 20:08:28 -05:00
Jack Christensen
e012ea0bed Upgrade pgconn, pgtype, and pgproto3 2022-04-21 20:05:32 -05:00
Jack Christensen
c5a0faca99 Release v1.11.0 2022-04-21 19:58:17 -05:00
Jack Christensen
49a860125f Try to pacify finicky timing test on CI 2022-04-21 19:52:34 -05:00
Jack Christensen
9bb49f990f Release v1.12.0 2022-04-21 19:49:01 -05:00
Jack Christensen
1b244eec5d Upgrade to pgproto3 v2.3.0 2022-04-21 19:48:43 -05:00
Jack Christensen
097b6aacb7 Add time to logging failed Exec
fixes #1189
2022-04-21 19:26:25 -05:00
Jack Christensen
c63f912615 Hstore.Set accepts map[string]Text 2022-04-21 19:19:32 -05:00
Jack Christensen
cc7de81d3b Make array helpers private 2022-04-16 14:21:40 -05:00
Jack Christensen
1c90746cf5 Update CHANGELOG 2022-04-16 14:14:59 -05:00
Jack Christensen
e94cf1fbaa Remove AcquireConn and ReleaseConn
Superseded by (*sql.Conn) Raw()
2022-04-16 14:07:59 -05:00
Jack Christensen
a01a9ee6df Automatically register Array and FlatArray 2022-04-16 14:04:25 -05:00
Jack Christensen
fccaebc93d Add pgtype.Map.SQLScanner
This enables compatibility with database/sql for types that cannot
implement Scan themselves.
2022-04-16 13:38:27 -05:00
Jack Christensen
f1a4ae3070 Add Array and FlatArray container types 2022-04-16 11:33:45 -05:00
Jack Christensen
d4abe83edb Revert use generics for RangeCodec
Reverted almost all of 976b1e0.

Still may consider a way to get DecodeValue to be strongly typed but
that feature isn't worth the complications of generics. Especially in
that applying this style to ArrayCodec would make Conn.LoadType
impossible for arrays.
2022-04-16 10:39:12 -05:00
Jack Christensen
8b483e4223 Use generic / type safe puddle for pgxpool 2022-04-16 09:28:46 -05:00
Jack Christensen
beb4e2cfbc SQLCODE 42501 is fatal connect error
Don't try fallback configs. Match libpq behavior.

fixes https://github.com/jackc/pgconn/issues/108
2022-04-16 07:26:56 -05:00
Jack Christensen
25558de3bd Add UnmarshalJSON to pgtype.Int2
fixes https://github.com/jackc/pgtype/issues/153
2022-04-16 07:07:31 -05:00
Oliver Tan
90ef5bba3f add GSSAPI authentication
This commit adds the GSSAPI authentication to pgx. This roughly follows
the lib/pq implementation:
* We require registering a provider to avoid mass dependency inclusions
  that may not be desired (https://github.com/lib/pq/issues/971).
* Requires the pgproto3 package be updated. I've included my custom fork
  for now.
2022-04-15 08:17:48 -05:00
Jack Christensen
b03b1666a6 Add Hijack to pgxpool.Conn 2022-04-14 11:50:12 -05:00
Oliver Tan
175856ffd3 add GSS authentication to pgproto3 2022-04-12 07:13:21 -05:00
Jack Christensen
bb8c52f7e8 Add doc regarding default pgxpool.Config.MaxConns
refs #1183
2022-04-11 18:49:11 -05:00
Jack Christensen
1ef2cee36e Update changelog 2022-04-09 10:26:45 -05:00
Jack Christensen
976b1e03a9 Use generics for RangeCodec
This allows DecodeValue to return a more strongly typed value.
2022-04-09 10:21:17 -05:00
Jack Christensen
c8025fd79a Use generics for Range values 2022-04-09 09:34:37 -05:00
Jack Christensen
f14fb3d692 Replace interface{} with any 2022-04-09 09:12:55 -05:00
Jack Christensen
95265a7421 Use Go 1.18 2022-04-09 09:11:19 -05:00
Jack Christensen
829babcea9 Better number to string handling
Avoid ambiguity of stringWrapper implementing Int64Scanner and
Float64Scanner.
2022-04-09 09:09:46 -05:00
Matthew Gabeler-Lee
5982e4b4f8 fix detection of database does not exist error during connect 2022-04-09 07:47:59 -05:00
Mukundan Kavanur Kidambi
c6ccb4b9a3 Addressing feedback 2022-04-02 18:58:59 -05:00
Mukundan Kavanur Kidambi
e145003288 Addressing feedback 2022-04-02 18:58:59 -05:00
Mukundan Kavanur Kidambi
1d7886b012 Adding UTs 2022-04-02 18:58:59 -05:00
Mukundan Kavanur Kidambi
fa2b096400 fix: Adding overall format before appending ColumnFormatCodes 2022-04-02 18:58:59 -05:00
WGH
ccb207cba5 Add support for record array
Like Record itself, it only implements BinaryDecoder,
doesn't implement BinaryEncoder, and has no support for the text
protocol.
2022-04-02 18:52:15 -05:00
WGH
3e230ba731 Split encode_binary and decode_binary in typed_array.go.erb
Again, RECORD, for example, has binary decoding, but no binary encoding.
2022-04-02 18:52:15 -05:00
WGH
5db1de5fc1 Make text format for type_array.go.erb opt-out
Some types, like RECORD, don't have sane text format. If we want to have
arrays of such types, we don't want to generate text format for such arrays
either.
2022-04-02 18:52:15 -05:00
WGH
71648e3d78 Add defaults for typed_array.go.erb template parameters
Most of the time binary_format is "true", and text_null is "NULL",
so it makes sense to not repeat that.
2022-04-02 18:52:15 -05:00
WGH
5ece2efd4c Fix typo in Record type documentation 2022-04-02 18:52:15 -05:00
Jack Christensen
8cf6721d66 Better int64 / numeric compat and text fixes 2022-04-02 16:55:05 -05:00
Jack Christensen
53ec52aa17 Fix out of date pgtype/int_test.go.erb 2022-04-02 14:41:33 -05:00
Jack Christensen
ee93440ac1 pgtype uses pgxtest
Added ValueRoundTripTest to pgxtest
Removed pgtype/testutil

pgtype tests now run with all (applicable) query modes. This gives
better coverage than before and revealed several bugs which are also
fixed in this commit.
2022-04-02 14:34:19 -05:00
Jack Christensen
83e50f21e8 Extract SkipCockroachDB to pgxtest 2022-04-02 10:35:13 -05:00
Jack Christensen
e18d76b798 Initial extraction of pgxtest
- Introduce ConnTestRunner
- RunWithQueryExecModes
2022-04-02 10:26:47 -05:00
Jack Christensen
e392908c72 Remove Int64Valuer implementation from stringWrapper 2022-04-02 08:24:55 -05:00
Jack Christensen
500c0721d7 Improve error messages for query argument encoding 2022-04-01 18:00:25 -05:00
Jack Christensen
3a6d9490e5 Only test numeric infinity on PG 14+ 2022-03-26 11:38:31 -05:00
Jack Christensen
600c4fd931 Skip test for Cockroach CI 2022-03-22 20:44:17 -05:00
Jack Christensen
103dfe145e Test should always close rows 2022-03-22 20:41:05 -05:00
Jack Christensen
7b31b56de9 Reactivate CI for other DB versions 2022-03-22 20:33:24 -05:00
Jack Christensen
e04b35bfcb Make pgtype test compat with CockroachDB when possible 2022-03-22 20:33:24 -05:00
Jack Christensen
210ebb4a50 Disable incomptible test with CockroachDB 2022-03-22 20:33:24 -05:00
Jack Christensen
0fd0688d4f Alter some tests for CockroachDB 2022-03-22 20:33:24 -05:00
Jack Christensen
69580cd519 Fix a test failure 2022-03-22 20:33:24 -05:00
Jack Christensen
95c03dc9ae Unskip and fix tests 2022-03-22 20:33:24 -05:00
Jack Christensen
793eb53017 Enable test with updated error message 2022-03-22 20:33:24 -05:00
Jack Christensen
29bec2b97e Remove skipped test for scan binary to string
Receiving a binary value and encoding it back into text seems to be an
anti-pattern to may. Don't want to silently enable this. May be able to
reverse course later if necessary.
2022-03-22 20:33:24 -05:00
Jack Christensen
0cd7c757c3 Fix skipped test 2022-03-22 20:33:24 -05:00
Jack Christensen
be5a6cc9c0 Remove obsolete test 2022-03-22 20:33:24 -05:00
Jack Christensen
5ca048ed2d Fix crash with pointer to nil struct 2022-03-22 20:33:24 -05:00
Jens Emil Schulz Østergaard
4c6f1b1dc4 fix: add json rawmessage to typed_array_gen.sh 2022-03-21 19:15:21 -05:00
Jens Emil Schulz Østergaard
b103a6efbd test: jsonbarray set failing test cases 2022-03-21 19:15:21 -05:00
Patrick Audley
9f23ed84ba Minor typo in Changelog 2022-03-21 09:01:46 -05:00
Jack Christensen
8c18d7808b Add documentation 2022-03-19 17:01:12 -05:00
Jack Christensen
72b72b9ae9 Remove dead code 2022-03-12 15:07:32 -06:00
Jack Christensen
cb721dfb5b SendBatch supports default QueryExecMode 2022-03-12 15:06:13 -06:00
Jack Christensen
1390a11fe2 Query supports QueryExecMode
Fixed QueryExecModeExec as it must only use text format without
specifying param OIDs.
2022-03-12 14:15:39 -06:00
Jack Christensen
0c166c7620 Fix BC dates in text format 2022-03-12 12:47:01 -06:00
Jack Christensen
46966227bc Enable all QueryExecModes for exec path 2022-03-12 10:04:02 -06:00
Jack Christensen
8e341e20f3 Remove ConnConfig.BuildStatementCache 2022-03-12 09:23:40 -06:00
Jack Christensen
f27178ba85 Initial privatization of stmtcache
ConnConfig.BuildStatementCache is pending removal once connections
always have separate caches for prepared and described statements.
2022-03-12 08:35:31 -06:00
Jack Christensen
fe21cc7486 Use Map.Encode path for simple protocol 2022-03-05 21:40:49 -06:00
Jack Christensen
c4b08378f2 Handle driver.Valuers inside Map.Encode 2022-03-05 21:27:17 -06:00
Jack Christensen
0905d1f452 Register more default types and handle unknown types better 2022-03-05 21:19:58 -06:00
Jack Christensen
2831eedef3 Simplify copy encoding 2022-03-05 20:27:36 -06:00
Jack Christensen
e5685a34fc Simplify encoding extended query arguments 2022-03-05 20:16:57 -06:00
Jack Christensen
1cef9075d9 Simply typed nil and driver.Valuer handling
* Convert typed nils to untyped nils at beginning of encoding process.
* Restore v4 json/jsonb null behavior
* Add anynil internal package
2022-03-05 19:53:59 -06:00
Jack Christensen
39d2e3dc3f Move chooseParameterFormatCode 2022-03-05 15:16:12 -06:00
Jack Christensen
0d8e109c21 Test every QueryExecMode 2022-03-05 14:04:51 -06:00
Jack Christensen
aad3d65e16 Initial restructure of simple protocol to query exec mode 2022-03-05 10:27:15 -06:00
Jack Christensen
2885b039d5 Rename Uint32 field to include bit size
i.e. Uint renamed to Uint32. This matches the pattern set by the
database/sql types.
2022-03-05 09:23:25 -06:00
Jack Christensen
84a3d91322 pgtype Float4 and Float8 fields include bit size
e.g. Instead of Float it is Float64. This matches the pattern set by the
database/sql types.
2022-03-05 09:20:03 -06:00
Jack Christensen
d723a4ab6f pgtype Int2, Int4, and Int8 fields include bit size
e.g. Instead of Int it is Int64. This matches the pattern set by the
database/sql types.
2022-03-05 09:17:31 -06:00
Jack Christensen
872a7a9315 Fix pgtype/int.go.erb 2022-03-05 09:08:14 -06:00
Jack Christensen
e7f90ba6e4 Remove unused pgtype.Map field 2022-03-05 09:00:49 -06:00
Jack Christensen
ec8f7c4204 Add comment for FormatCodeForOID 2022-03-05 08:56:41 -06:00
Jack Christensen
3ce50c079e Rename dbSavepoint to dbSimulatedNestedTx
https://github.com/jackc/pgx/issues/1161
2022-03-05 08:41:02 -06:00
Jack Christensen
b7a85d1a6f Consider any "0A000" error a possible cached plan changed error
https://github.com/jackc/pgx/issues/1162
2022-03-05 08:23:58 -06:00
Andrew Rusakow
a86ece025c Fix single line comment for line endings in mac when sanitizing. 2022-03-04 18:17:40 -06:00
Vu
a365c9a3c2 Add multirange support for num, int4 and int8 type 2022-03-04 18:15:30 -06:00
Jack Christensen
45a8b00271 Do not recursively call public PlanScan that caches
Otherwise, wrapper types get cached. Wrapper types are expected to fail
most of the time. These failures should not be cached. In addition,
wrappers wrap multiple different types so it doesn't make sense to cache
results of a wrapper.
2022-03-04 11:04:46 -06:00
Jack Christensen
a8f6674a07 TextCodec specifically supports scanning to BytesScanner
This lets it support DriverBytes and PreallocatedBytes.
2022-02-26 20:28:15 -06:00
Jack Christensen
ffc5a692cb Detect unsafe pgtype.DriverBytes usage
Add test for unsafe usage and test for correct usage that ensures driver
memory is actually used.
2022-02-26 20:23:35 -06:00
Jack Christensen
b1e4b96e6c Reduce big read buffer allocations with sync.Pool 2022-02-26 19:57:41 -06:00
Jack Christensen
2fad63c189 Set cap when returning slice from chunkReader 2022-02-26 09:37:14 -06:00
Jack Christensen
e641d0a5ad Reuse connection read buffer
To avoid extra copies and small allocations previously large
read buffers were allocated and never reused. However, the down side of
this was greater total memory allocation and the possibility that a
reference to a single byte could pin an entire buffer.

Now the buffer is reused.
2022-02-26 09:31:45 -06:00
Jack Christensen
2e0ec225de Make Chunkreader an internal implementation detail 2022-02-26 08:50:46 -06:00
Jack Christensen
d13f651810 Finish importing pgio as internal package 2022-02-21 14:35:20 -06:00
Jack Christensen
1be4c10ce4 Merge branch 'pgioimport' into v5-dev 2022-02-21 14:33:10 -06:00
Jack Christensen
d35500e397 Move pgio 2022-02-21 14:32:55 -06:00
Jack Christensen
032ea5f5c0 Finish import of chunkreader 2022-02-21 14:29:39 -06:00
Jack Christensen
44375443e1 Merge branch 'chunkreaderimport' into v5-dev 2022-02-21 14:27:21 -06:00
Jack Christensen
fd1a98f858 Move and clean for import 2022-02-21 14:27:05 -06:00
Jack Christensen
95cbbfe441 Import pgproto3
Also copy in pgmock as an internal package.
2022-02-21 13:22:42 -06:00
Jack Christensen
e06cc67875 Merge branch 'pgproto3import' into v5-dev 2022-02-21 11:58:13 -06:00
Jack Christensen
04476c4a13 Move pgproto3 to subdirectory 2022-02-21 11:57:34 -06:00
Jack Christensen
43083cb0e3 Memoize pgtype.Map.PlanScan 2022-02-21 10:10:16 -06:00
Jack Christensen
9c538cd4a9 Remove actualTarget argument 2022-02-21 09:30:01 -06:00
Jack Christensen
f3defbc150 Rename pgtype.None to pgtype.Finite 2022-02-21 09:25:30 -06:00
Jack Christensen
1f2f239d09 Renamed pgtype.ConnInfo to pgtype.Map 2022-02-21 09:13:09 -06:00
Jack Christensen
bda10b2ec9 Rename pgtype.DataType to pgtype.Type 2022-02-21 09:01:48 -06:00
Jack Christensen
a3c351d11a RegisterDataType now accepts *DataType 2022-02-21 08:49:04 -06:00
Jack Christensen
5daa487a2c Merge branch 'master' into v5-dev 2022-02-21 08:35:11 -06:00
Jack Christensen
b6b24f9e8a Allocate connRows on demand instead of preallocating in bulk
The 64 element preallocatedRows may be pinning memory from previous
queries.

See https://github.com/jackc/pgx/issues/1127
2022-02-19 11:51:25 -06:00
Jack Christensen
1e565b0d44 Handle stmtCache.Get error previously thought impossible
The statement cache is already prefilled, but it is possible for the
ctx to be canceled between when the statement is prepared and when the
statement is retrieved for use.

refs #1156
2022-02-19 11:28:39 -06:00
William Storey
ded272b1f2 Remove documentation line stating only one IP is used
With `expandWithIPs()` (added in #14), we try all IPs.
2022-02-19 08:10:07 -06:00
William Storey
ccb96b8aca Fix typos in comments 2022-02-19 08:10:07 -06:00
Jack Christensen
34bf0a5df9 Upgrade golang.org/x/text to v0.3.7
https://github.com/jackc/pgconn/issues/103
2022-02-19 08:00:51 -06:00
Jack Christensen
9c5dfbdfb3 pgconn.CommandTag is now an opaque type
It now makes a copy instead of retaining driver memory. This is in
preparation to reuse the driver read buffer.
2022-02-12 10:26:26 -06:00
Jack Christensen
e6680127e3 Reenable TestRowsScanNilThenScanValue 2022-02-12 09:40:33 -06:00
Jack Christensen
60da2914f3 Re-enable test 2022-02-12 09:37:12 -06:00
Jack Christensen
a14f3f291f Re-enable domain type test 2022-02-12 09:35:52 -06:00
Jack Christensen
4b6d527b0b Merge branch 'master' into v5-dev 2022-02-12 09:22:37 -06:00
Jack Christensen
3650fc9f68 Remove comparison with alternatives from readme
lib/pq and go-pg are both in maintenance mode. No point in comparison.
2022-02-12 09:19:55 -06:00
Saimon Shaplygin
4ac1499060 ref: remove anchored code 2022-02-12 09:15:24 -06:00
Isabel Jimenez
71da600c3a exposing stdlib DB connector 2022-02-12 09:11:56 -06:00
Jack Christensen
f861d83a17 Fix range types not clearing unbounded or empty 2022-02-08 16:48:17 -06:00
Jack Christensen
0306ce3a19 Fix scanning negative ints into Int64Scanner 2022-02-08 14:13:06 -06:00
Jack Christensen
1334d45d71 Parse array header to empty slices instead of nils 2022-02-08 11:35:40 -06:00
Jack Christensen
bcc0af3f56 Fix scan empty array into multi-dimension slice 2022-02-08 11:12:05 -06:00
Jack Christensen
7193e48923 Restore multi-dimensional slices
Move ArrayCode to use pgtype wrapper pattern as well
2022-02-08 10:07:40 -06:00
Jack Christensen
318018504a Merge branch 'master' into v5-dev 2022-02-07 11:22:01 -06:00
Jack Christensen
e2769993cc Merge remote-tracking branch 'pgconn/master' into v5-dev 2022-02-07 11:17:27 -06:00
Jack Christensen
2b7de82ef4 Release v4.15.0 2022-02-07 11:03:06 -06:00
Jack Christensen
67401de1c3 Upgrade to pgconn v1.11.0 2022-02-07 10:55:29 -06:00
Jack Christensen
3e5de44314 Release v1.11.0 2022-02-07 10:54:39 -06:00
Jack Christensen
6fea8eba5e Upgrade to pgtype v1.10.0 2022-02-07 10:52:30 -06:00
Jack Christensen
202542ead5 Release v1.10.0 2022-02-07 10:51:03 -06:00
Jack Christensen
c9eefd852a Upgrade to puddle v1.2.1 2022-02-07 10:48:30 -06:00
Jack Christensen
e8857f04a1 Make BatchResults.Close safe to be called multiple times
https://github.com/jackc/pgx/issues/1138
https://github.com/jackc/pgx/issues/938
2022-02-07 10:44:39 -06:00
Collin Forsyth
f4252a58be correctly Scan type aliases for floating point types 2022-02-05 20:23:39 -06:00
Jack Christensen
d02b2ed013 Add batch test for QueryRow without any rows
refs #1150
2022-02-05 20:12:35 -06:00
djsavvy
9eccdd6a81 Clarify that Values() and Scan() require Next() to have been called on the rows object 2022-02-05 20:06:01 -06:00
Jack Christensen
02372f1c3c Add DecodeValue to composites 2022-02-05 15:12:09 -06:00
Jack Christensen
3a94113118 Add composite to arbitrary struct encoding and decoding 2022-02-05 14:24:34 -06:00
Jack Christensen
727fc19cb7 Another error message improvement 2022-02-05 13:10:58 -06:00
Jack Christensen
28ea2cd190 Better error messages 2022-02-05 13:05:23 -06:00
Jack Christensen
6ebf54b62b Fix EnumCodec caching and add tests 2022-02-05 09:57:40 -06:00
Jack Christensen
288080c58c Add test documenting typed nil json encoding
Encoded into json null not SQL NULL.
2022-02-05 09:34:39 -06:00
Jack Christensen
0355d2ffea Add Float8range
PostgreSQL doesn't define float8range out of the box though it can
easily be created by the user. However, it is still convenient to treat
a numrange as a float8range.
2022-02-05 08:54:38 -06:00
Jack Christensen
a74ebc9e51 pgtype.Numeric implements Float64Valuer 2022-02-05 08:39:53 -06:00
Jack Christensen
a280f4db8a Float4 and Float8 implement Int64 Scanner and Valuer 2022-02-03 20:19:52 -06:00
Jack Christensen
ba4583cf4c Add range array types 2022-02-02 08:47:56 -06:00
Jack Christensen
cebe44ee85 Restore range support 2022-02-02 08:40:42 -06:00
Pinank Solanki
94e10b98b1 Fix typo in float8 2022-02-01 16:55:20 -06:00
Jack Christensen
11223497b3 Restore record support 2022-01-31 20:42:12 -06:00
Jack Christensen
ef7114a8ce Add DecodeValue and DecodeDatabaseSQLValue for ArrayCodec 2022-01-31 20:39:50 -06:00
Jack Christensen
558748ef9c ArrayCodec contains element DataType 2022-01-29 16:41:07 -06:00
Jack Christensen
b5bf9d7bb9 Move LoadDataType to pgx.Conn 2022-01-29 16:32:05 -06:00
Jack Christensen
f5c3eeb813 Initial rebuilt composite support 2022-01-29 15:43:18 -06:00
Jack Christensen
dc77e7c2da Add QueryRow warning to DriverBytes 2022-01-29 08:17:50 -06:00
Jack Christensen
47345e0d1e ArrayHeader.EncodeBinary doesn't need ci parameter 2022-01-25 20:21:28 -06:00
Jack Christensen
551d26ca41 Change ArrayHeader.ElementOID to uint32 2022-01-25 20:19:02 -06:00
Jack Christensen
f5806bc01c Add a fuzz test
Investigating https://github.com/jackc/pgx/issues/938.
2022-01-24 08:10:01 -06:00
Jack Christensen
0ddf9e3b4b Try wrapping scan target before sql.Scanner
This allows wrappers to directly avoid the slow sql.Scanner interface.
2022-01-22 18:40:46 -06:00
Jack Christensen
5ed95dcd1c Expose wrap functions on ConnInfo
- Remove rarely used ScanPlan.Scan arguments
- Plus other refactorings and fixes that fell out of this change.
- Plus rows Scan now handles checking for changed type.
2022-01-22 17:50:19 -06:00
Jack Christensen
322bfedc60 Remove old SQL scanner integration 2022-01-22 16:20:37 -06:00
Jack Christensen
aedf7d63e5 Expose try wrap functions in ConnInfo 2022-01-22 16:19:32 -06:00
Jack Christensen
2b395f3730 pgtype.DataType.Codec can never be nil 2022-01-22 12:21:16 -06:00
Jack Christensen
db95cee40c Remove pgtype.Value interface 2022-01-22 12:18:40 -06:00
Jack Christensen
4cf6dc9447 Remove BinaryEncoder and TextEncoder 2022-01-22 12:16:02 -06:00
Jack Christensen
3a90c6c879 Removed TextEncoder and BinaryEncoder
Restructured / fixed a lot of tests along the way.
2022-01-22 12:07:35 -06:00
Jack Christensen
eb0a4c9626 Replace some old database/sql compatibility 2022-01-22 11:21:12 -06:00
Jack Christensen
ad785d8134 Remove TypeValue interface 2022-01-22 10:56:56 -06:00
Jack Christensen
8d2c87b5e5 Remove old typed array code gen 2022-01-22 10:54:54 -06:00
Jack Christensen
740263c0d4 Convert UUID to Codec 2022-01-22 10:53:47 -06:00
Jack Christensen
b9b5e35d0f Convert numeric to Codec 2022-01-22 09:31:59 -06:00
Jack Christensen
0056156904 Add time array 2022-01-21 16:51:53 -06:00
Jack Christensen
61b4fb7689 Convert time to Codec 2022-01-21 16:50:30 -06:00
Jack Christensen
c8b8764401 Allow scanning tid to string 2022-01-20 20:59:36 -06:00
Jack Christensen
5ca29a014e Add tid array 2022-01-20 20:41:56 -06:00
Jack Christensen
7a3bc454e0 Convert TID to Codec 2022-01-20 20:40:37 -06:00
Jack Christensen
b10eb89fe4 Use wrapper to treat fmt.String as pgtype.TextValuer 2022-01-20 20:22:53 -06:00
Jack Christensen
06f4e47750 Add macaddr array 2022-01-20 20:10:43 -06:00
Jack Christensen
97443487ce Convert macaddr to Codec 2022-01-20 20:07:09 -06:00
Jack Christensen
b2e5c4ff6e Add "char" array 2022-01-20 18:00:43 -06:00
Jack Christensen
bcf4931a7e Convert "char" to Codec 2022-01-20 17:58:57 -06:00
Jack Christensen
05d532b5df Fix connect when receiving NoticeResponse
refs #102
2022-01-20 16:41:47 -06:00
Jack Christensen
99fb8cf2f3 Convert timestamp and timestamptz to Codec 2022-01-18 21:49:38 -06:00
Jack Christensen
8b27725f5b Convert json and jsonb to Codec 2022-01-18 16:04:25 -06:00
Jack Christensen
8728acfca6 Add polygon array 2022-01-18 12:05:28 -06:00
Jack Christensen
abd7e98f31 Convert polygon to Codec 2022-01-18 12:04:17 -06:00
Jack Christensen
11d96fb928 Add path array 2022-01-18 11:52:44 -06:00
Jack Christensen
5ff0ad548b Convert path to Codec 2022-01-18 11:51:08 -06:00
Jack Christensen
0ae8de35c8 Add lseg array 2022-01-18 11:39:58 -06:00
Jack Christensen
869213a315 Convert lseg to Codec 2022-01-18 11:38:35 -06:00
Jack Christensen
97d8a408ea Add line array 2022-01-18 11:30:39 -06:00
Jack Christensen
06593ffb10 Convert line to Codec 2022-01-18 11:29:19 -06:00
Jack Christensen
bff036b366 Add interval array support 2022-01-15 18:48:10 -06:00
Jack Christensen
77e4b01553 Convert Interval to Codec 2022-01-15 18:46:28 -06:00
Jack Christensen
5472ce9f10 Reorder Box functions 2022-01-15 18:45:42 -06:00
Jack Christensen
67720623f8 Extract plan wrapper concept 2022-01-15 18:43:52 -06:00
Jack Christensen
a6863a7dd2 Convert Hstore to Codec 2022-01-15 17:47:37 -06:00
Jack Christensen
313254c75d Convert float4 and float8 to Codec 2022-01-15 11:12:06 -06:00
Jack Christensen
05598d4ca6 Convert inet and cidr to codec 2022-01-15 09:48:21 -06:00
Oleg Lomaka
ccc7cc2931 Assign Numeric to *big.Rat 2022-01-14 17:16:04 -06:00
Jack Christensen
f743007fb4 Restore array support to pgxtype.LoadDataType 2022-01-11 20:49:20 -06:00
Jack Christensen
ae9be0b99e Replace EnumType with EnumCodec 2022-01-11 20:46:10 -06:00
Jack Christensen
b57e0c419b Convert Date to Codec 2022-01-10 21:02:20 -06:00
Jack Christensen
f4a9d84e32 Add CID, OID, and XID arrays 2022-01-09 00:41:25 -06:00
Jack Christensen
eec82c9433 Replace CID, OID, OIDValue, and XID with Uint32 2022-01-09 00:35:49 -06:00
Jack Christensen
b26618ac95 Prevent try underlying type from acting on a value
This is necessary to prevent infinite recursion where a base type is
wrapped and then unwrapped.
2022-01-09 00:25:20 -06:00
Jack Christensen
ad79dccd99 Builtin types are automatically wrapped if necessary 2022-01-08 23:44:53 -06:00
Jack Christensen
8aaf235595 Standardize scanner and valuer for int types 2022-01-08 21:41:08 -06:00
Jack Christensen
dc05bd9fee Remove old code gen 2022-01-08 20:51:44 -06:00
Jack Christensen
6be0c3f6b2 Remove convertToBoolForEncode 2022-01-08 20:51:28 -06:00
Jack Christensen
6cb3439492 Fix encode plan names 2022-01-08 18:35:54 -06:00
Jack Christensen
f573cde09c Convert bytea to Codec 2022-01-08 18:33:08 -06:00
Jack Christensen
c6f3e03a61 BoolCodec EncodePlan actually plans 2022-01-08 17:01:32 -06:00
Jack Christensen
f5347987a6 Add bit and varbit array support 2022-01-08 16:53:15 -06:00
Jack Christensen
17513d175a Convert bit and varbit to Codec 2022-01-08 16:49:58 -06:00
Jack Christensen
1eee7987e1 Use TextCodec for aclitem type 2022-01-08 16:24:05 -06:00
Jack Christensen
313569db56 Remove useless allocations 2022-01-08 13:38:56 -06:00
Jack Christensen
4aff33603d Remove useless receivers 2022-01-08 13:37:23 -06:00
Jack Christensen
6a6878bafd Fix Box, Circle, and Point NULL 2022-01-08 13:29:47 -06:00
Jack Christensen
58d2d8e453 Add name array 2022-01-08 13:16:09 -06:00
Jack Christensen
fcc9dcc960 Convert text to Codec
This also entailed updating and deleting types that depended on Text.
2022-01-08 13:13:26 -06:00
Jack Christensen
a7d4a22001 Add point array support 2022-01-08 09:37:40 -06:00
Jack Christensen
2b0afbb408 Convert point to Codec 2022-01-08 09:33:08 -06:00
Jack Christensen
dcaf102f8e Introduce PlanEncode 2022-01-05 08:59:21 -06:00
Jack Christensen
ac80fa5b33 Remove proposed v5 type system before Codec 2022-01-04 20:04:48 -06:00
Jack Christensen
1a189db041 Remove ValueTranscoder interface 2022-01-04 19:59:32 -06:00
Jack Christensen
b90f92d2d2 Remove obsolute ArrayType 2022-01-04 19:58:40 -06:00
Jack Christensen
80ae29d056 Inline Encoder interface to Codec 2022-01-04 19:56:16 -06:00
Jack Christensen
6a32f938f1 Extract codecDecodeToTextFormat 2022-01-03 21:23:29 -06:00
Jack Christensen
f7c0c31e87 Extract DecodeValue helper 2022-01-03 21:20:52 -06:00
Jack Christensen
ad6ee2bd56 Add circle array 2022-01-03 20:56:12 -06:00
Jack Christensen
eb2c37a983 Convert circle to Codec 2022-01-03 20:53:50 -06:00
Jack Christensen
5c4560eed3 Add box array 2022-01-03 20:30:57 -06:00
Jack Christensen
298a5f0dca Convert box to Codec 2022-01-03 20:27:44 -06:00
Jack Christensen
4b1121c2a9 Convert bool to Codec 2022-01-01 18:18:47 -06:00
Jack Christensen
0c0e28a70a Convert int4 and int8 to new system
Note: purposely disabled some tests and composite support that needs to
be restored later in v5 development.
2022-01-01 17:26:39 -06:00
Jack Christensen
40fb889605 Temporarily remove composite and record support 2022-01-01 11:41:08 -06:00
Jack Christensen
ffa1fdd66e Temporarily remove range type support 2022-01-01 11:32:52 -06:00
Jack Christensen
d2cf33ed40 Add UnmarshalJSON to generated ints 2022-01-01 11:25:26 -06:00
Jack Christensen
0403c34ae3 Prepare for generating tests 2022-01-01 11:22:14 -06:00
Jack Christensen
1b353297d5 Prepare for generating int types 2022-01-01 11:11:31 -06:00
Jack Christensen
6c7f1593e8 Use rake to build generated code 2022-01-01 10:54:54 -06:00
Jack Christensen
93cc21199f All tests passing 2021-12-31 17:54:47 -06:00
Jack Christensen
1516a0d8db pgtype tests pass 2021-12-31 17:51:18 -06:00
Jack Christensen
19ae359e9e Add binary scan plans for int2 2021-12-31 17:03:31 -06:00
Jack Christensen
77b9b59622 Generate text to int scan plans 2021-12-31 13:07:08 -06:00
Jack Christensen
b99d95470f Fix tryBaseTypeScanPlan infinite recursion 2021-12-31 12:32:46 -06:00
Jack Christensen
c39924d0c6 Improvements to ArrayCodec 2021-12-31 12:28:45 -06:00
Jack Christensen
9fc8f9b3a8 Initial passing tests for main pgx package 2021-12-30 18:12:47 -06:00
Oscar
109c4c2d95 fix standby mode validation 2021-12-28 09:31:38 -06:00
Oscar
3aaf3409ce remove redundant map value type 2021-12-28 09:31:38 -06:00
Oscar
3ce8a835e1 add support for read-only, primary, standby, prefer-standby target_session_attributes 2021-12-28 09:31:38 -06:00
Blake Embrey
a1852214fe Keep status connecting after tls 2021-12-27 14:28:33 -06:00
Blake Embrey
b148a14bbe Fix defer usage 2021-12-27 14:28:33 -06:00
Blake Embrey
01a6923376 Rename fn to new 2021-12-27 14:28:33 -06:00
Blake Embrey
024de4c8f3 Unwatch and re-watch tls 2021-12-27 14:28:33 -06:00
Blake Embrey
c0a0be876d Fix TLS connection timeout 2021-12-27 14:28:33 -06:00
Jack Christensen
58b7486343 Initial codec support for int2 and int2[] 2021-12-23 13:12:54 -06:00
Jack Christensen
14b5053209 Merge remote-tracking branch 'pgconn/master' into v5-dev 2021-12-18 08:20:53 -06:00
James Hartig
5a5260b73d feat: support port in ip from LookupFunc to override config
Fixes #97
2021-12-18 08:16:31 -06:00
Jack Christensen
d2dc20af81 Link to extensions 2021-12-11 15:32:52 -06:00
Jack Christensen
b2569172d8 Fix typo in example 2021-12-11 14:55:02 -06:00
Jack Christensen
7c5dbde59e Upgrade remaining dependencies 2021-12-11 14:54:25 -06:00
Jack Christensen
8e2e8a7009 Remove external log adapters 2021-12-11 14:52:31 -06:00
Jack Christensen
ef2b70edad Remove github.com/gofrs/uuid test dependency 2021-12-11 14:37:02 -06:00
Jack Christensen
731312fea8 Remove github.com/shopspring/decimal test dependency 2021-12-11 14:32:32 -06:00
Jack Christensen
9ab821620f Remove github.com/Masterminds/semver/v3 test dependency 2021-12-11 14:27:00 -06:00
Jack Christensen
1b416b36dc Finish temp removal of PG < 14 from CI 2021-12-11 14:26:04 -06:00
Jack Christensen
066908d4f8 Temporarily remove all PG versions but 14 from CI
Same issue as previous commit removing CockroachDB. numeric type only
supports infinity on PG 14 and there is no easy way in the current
test structure to skip tests based on server version.
2021-12-11 14:15:22 -06:00
Jack Christensen
5fbf907471 Temporarily remove cockroachdb from CI
pgtype has a ton of tests that don't work on CockroachDB. And because of
how the tests are structured it is difficult to skip just those tests.

pgtype may have significant changes before v5 is released so delay
updating these tests.
2021-12-11 14:09:37 -06:00
Jack Christensen
9ae7452196 Remove Go 1.16 from CI
By the time v5 is released 1.17 will be the minimum supported version.
May as well save some CI time in the mean while.
2021-12-11 14:07:52 -06:00
Jack Christensen
8c9646dbfe Remove github.com/cockroachdb/apd test dependency 2021-12-11 13:45:37 -06:00
Jack Christensen
6b2a0d99a2 Run CI on v5-dev branch 2021-12-11 13:37:13 -06:00
Jack Christensen
81168a61d1 Update go.mod go version to 1.17 2021-12-11 13:32:50 -06:00
Jack Christensen
85b08ac663 Fix some previously broken comment links 2021-12-11 13:30:36 -06:00
Jack Christensen
72cc95e4dd Bump module version to v5 2021-12-11 13:29:03 -06:00
Jack Christensen
390bd79757 Add array integration benchmarks 2021-12-11 09:19:11 -06:00
Jack Christensen
d9e53647ec Use ideomatic casing 2021-12-11 09:08:05 -06:00
Jack Christensen
fbbf403cf2 Update changelog 2021-12-11 08:56:41 -06:00
Jack Christensen
0e293b966c Finish import of pgconn 2021-12-04 14:06:57 -06:00
Jack Christensen
5c36639f09 Merge branch 'pgconnimport' into v5-dev 2021-12-04 13:51:50 -06:00
Jack Christensen
19ec4d505f Import to pgx main repo in pgconn subdir 2021-12-04 13:51:24 -06:00
Jack Christensen
7e13db4538 Finish import of pgtype repo
Fix some tests that broke by merging repos
Tweak readme wording
2021-12-04 13:45:57 -06:00
Jack Christensen
7aeb42b80c Merge branch 'pgtypeimport' into v5-dev 2021-12-04 13:10:07 -06:00
Jack Christensen
44214b7854 Import to pgx main repo in pgtype subdir 2021-12-04 13:07:54 -06:00
Jack Christensen
550cc7b529 wip 2021-12-04 12:53:20 -06:00
Jack Christensen
e22675d20b ValueTranscoder uses new interfaces 2021-12-04 12:45:20 -06:00
Jack Christensen
8f454e4cd6 Add initial ParamEncoder and ResultDecoder support to core types 2021-12-04 11:36:50 -06:00
Torkel Rogstad
75446032b9 Normalize UTC timestamps to comply with stdlib 2021-12-04 10:22:49 -06:00
Jack Christensen
2226a5e14e Remove explicit https://github.com/gofrs/uuid integration
Better integration is now enabled by github.com/jackc/pgx-gofrs-uuid.
2021-11-29 12:55:23 -05:00
Jack Christensen
0d9bd0366b Add Numeric.MarshalJSON 2021-11-29 12:55:23 -05:00
Jack Christensen
55195b3a64 Add Numeric.Getter 2021-11-29 12:55:23 -05:00
Jack Christensen
1a3e5b0266 Remove explicit shopspring/decimal integration
Better integration is now enabled by github.com/jackc/pgx-shopspring-decimal.
2021-11-29 12:55:23 -05:00
Jack Christensen
55ad9007cd Finish Numeric changes for easy integration with 3rd party types 2021-11-29 12:55:23 -05:00
Jack Christensen
c0eae32e8b Remove ConnInfo.DeepCopy() 2021-11-29 12:55:23 -05:00
Jack Christensen
63a8fe12d7 Add hooks for efficiently integrating with 3rd party types 2021-11-29 12:55:23 -05:00
Jack Christensen
2886673a3c Add full query decoding benchmarks 2021-11-29 12:55:23 -05:00
Jack Christensen
11d351dd75 Replace Status with Valid to conform to database/sql style
https://github.com/jackc/pgx/issues/1060
2021-11-29 12:55:19 -05:00
Jack Christensen
37044f47f5 Remove tests against github.com/lib/pq 2021-11-29 12:54:23 -05:00
Jack Christensen
cf0de913ee Use pgtype.UUID for test instead of ext UUID 2021-11-29 12:29:02 -05:00
Jack Christensen
9fdaf7da81 Release v4.14.1 2021-11-28 22:46:52 -05:00
Jack Christensen
5b91cac132 Update to pgtype v1.9.1 2021-11-28 22:43:59 -05:00
Jack Christensen
e95ebc02d9 Release v1.9.1 2021-11-28 16:29:42 -06:00
Jack Christensen
84bb47fb26 Fix: Timestamp DecodeBinary is in UTC
Preserve previously existing behavior.

fixes #138
2021-11-24 07:57:51 -06:00
Jille Timmermans
1d606a91b8 Change constant definition syntax so they show up in godoc
for TxAccessMode, TxDeferrableMode and TxIsoLevel

After this commit, Godoc understands these are the valid values of these
types and shows them together in the documentation.
2021-11-22 09:47:15 -06:00
Jack Christensen
058f346079 Start pgxpool background health check after initial connections
Otherwise the health check and the create initial connection(s) may both
create connections. While this generally wouldn't be a real problem it
did cause TestPoolBackgroundChecksMinConns to flicker on CI.
2021-11-20 11:19:10 -06:00
Jack Christensen
a55e88ee48 Do not run CI with verbose
Makes it very difficult to see what is actually failing.
2021-11-20 10:53:51 -06:00
Jack Christensen
9da0a7aef7 Release v4.14.0 2021-11-20 10:48:31 -06:00
Jack Christensen
968a00094e Further increase wait times on timing tests for CI 2021-11-20 10:43:13 -06:00
Jack Christensen
885f39c368 Increase wait time in timing sensitive test 2021-11-20 10:28:52 -06:00
Jack Christensen
09c360f8aa Update supported / tested platforms 2021-11-20 10:25:10 -06:00
Jack Christensen
a9f8400b01 Upgrade pgx dependency
Fix test that was comparing times directly instead of using Equal.
2021-11-20 10:20:02 -06:00
Jack Christensen
e80bc75409 Release v1.9.0 2021-11-20 10:09:43 -06:00
Jack Christensen
662ecb496f Release v1.10.1 2021-11-20 09:56:46 -06:00
Jack Christensen
a457da8bff Unpin extendedQueryBuilder memory immediately after use
refs #1110
2021-11-13 17:12:09 -06:00
Jack Christensen
851091fdf4 Improve Query docs regarding error detection 2021-11-13 04:42:28 -06:00
Jack Christensen
146268e829 Move context test above bad statement cleanup 2021-11-13 04:12:35 -06:00
Georges Varouchas
cd7dcd5802 have lru.Get() always check if context is already expired 2021-11-13 04:10:45 -06:00
Georges Varouchas
141f132ae7 add a unit test on LRU context check
TestLRUContext highlights the lack of context check when querying for a cached value
2021-11-13 04:10:45 -06:00
Martin Ashby
40ecac487c Remove unimplemented JSON marshalling for FunctionCall type. 2021-11-06 16:39:43 -05:00
Martin Ashby
3d9a54f092 Fix unit test, it should return after any error is returned from Decode
function whether expected or not, rather than continue and try to
compare invalid decoded results.

Extend the unit test slightly to check the header.

Remove go-test/deep dependency in favour of standard library reflect
package.
2021-11-06 16:39:43 -05:00
Martin Ashby
9275da562f Added FunctionCall support
Added support for FunctionCall message as per
https://www.postgresql.org/docs/11/protocol-message-formats.html

Adds unit test for Encode / Decode cycle and invalid message format
errors.

Fixes https://github.com/jackc/pgproto3/issues/23
2021-11-06 16:39:43 -05:00
Jack Christensen
162dc65eff Make ContextWatcher concurrency safe
fixes #94
2021-11-06 08:57:49 -05:00
Jim Tsao
decb75f242 Add numeric tests for infinity encoding/decoding 2021-11-01 07:28:13 -05:00
Jim Tsao
14c515db82 Add infinity support for Numeric Binary Encode/Decode 2021-11-01 07:28:13 -05:00
Jim Tsao
8890a746d7 Add infinity support for Numeric Text Encode/Decode 2021-11-01 07:28:13 -05:00
Jim Tsao
001b3166b9 Add infinity support for Numeric AssignTo 2021-11-01 07:28:13 -05:00
Jim Tsao
e0f9fc5212 Add infinity support for Numeric Set/Get 2021-11-01 07:28:13 -05:00
Daniel
6cd6c43dcb Fix failing test 2021-11-01 07:26:49 -05:00
Daniel
8bc6aa6b49 Fix goroutine leak and unclosed connections 2021-10-30 10:25:09 -05:00
Daniel
36708a1cc6 Eager initialize minpoolsize on connect 2021-10-30 10:25:09 -05:00
Jack Christensen
a29019de9d Fix binary decoding of very large numerics.
fixes #133
2021-10-30 10:17:58 -05:00
Yuli Khodorkovskiy
5c447ff35d Fix JSON output for SASL{Response,InitialResponse}
Hex encoding the Data field in the SASL responses made debugging SCRAM
more difficult than actually helping.

Before:

F{"Type":"SASLResponse","Data":"633d655377732c723d4d4d4e4e6d666b536f5862694a68385833466d324f2b4d77787354692f4550753052414157484b7a306b7376336c5747392f4d4a5267504d2c703d616742664b533164383937674b4f4a6d4c7171626c49326b6b4a506f2b58354359516c63473458357657343d"}
F{"Type":"SASLInitialResponse","AuthMechanism":"SCRAM-SHA-256","Data":"792c2c6e3d2c723d4d4d4e4e6d666b536f5862694a68385833466d324f2b4d77"}

After:

F{"Type":"SASLResponse","Data":"c=eSws,r=9dR43UQLL1KbrKKl4/QbxjqgVjZYR9mqnx3rFBiI7R/1pp5oeVYMGhXj,p=b2hmuvTvWn2xN0fclm+O4TwLAarRM8xoHSN7jsKDHAU="}
F{"Type":"SASLInitialResponse","AuthMechanism":"SCRAM-SHA-256","Data":"y,,n=,r=9dR43UQLL1KbrKKl4/Qbxjqg"}
2021-10-30 09:08:58 -05:00
Jack Christensen
2caf113f1b Fix parsing text array with negative bounds
e.g. '[-4:-2]={1,2,3}'

fixes #132
2021-10-30 09:00:48 -05:00
urso
044ba47522 Add zerologadapter.NewContextLogger
This change introduces a new zerologadapter that allows
users to pass the actual logger via context.Context. Especially HTTP
middleware might choose to use `(*zerolog.Logger).WithContext` and
`zerolog.Ctx`. Allowing users to extract the logger from the context
keeps the full enriched logger available when pgx emits logs.
2021-10-30 08:52:59 -05:00
Adrian Sieger
b72f8084b5 implement nullable values for hstore maps 2021-10-30 08:38:48 -05:00
Jack Christensen
5cb98120c1 Add tests for big time and port fix to Timestamp.DecodeBinary
https://github.com/jackc/pgtype/pull/128
2021-10-23 09:57:10 -05:00
Lorenzo Paoliani
0d20d1241e Fixes a typo in the docs
I think this meant to say "until a notification is received" rather than "until a context is received".
2021-10-16 08:11:08 -05:00
Jim Tsao
e28459e9d1 Fix int64 overflow error 2021-10-08 14:46:26 +02:00
Jeff Widman
3599f64629 Tweak wording 2021-10-02 07:42:32 -05:00
Jeff Widman
ce81f577a9 Clarify that COPY is a specific command
COPY is a specific postgres command.
2021-10-02 07:41:49 -05:00
Rueian
290ee79d1e feat: remove unnecessary pending for CopyInResponse 2021-10-02 07:36:21 -05:00
Jan Dubsky
e53b7aebab Add support for fmt.Stringer and driver.Valuer in String fields encoding 2021-09-25 09:31:55 -05:00
Dan Gillis
b28c053c39 Add comments 2021-09-25 09:26:59 -05:00
Dan Gillis
bb8d7ffc87 Add comments
Add several comments and correct ConnectConfig
2021-09-25 09:26:59 -05:00
Dan Gillis
763050e10e Add comments
Add Stat struct comment and correct Stat.MaxConns method comment
2021-09-25 09:26:59 -05:00
Jack Christensen
0b5b7c0d1e Fix BPChar.AssignTo **rune
https://github.com/jackc/pgtype/issues/123
2021-09-25 09:25:01 -05:00
Thomas Frössman
255276c390 Add context options to zerologadapter
WithContextFunc adds possibility to get request scoped values from the
ctx.Context before logging lines.

WithoutPGXModule disables adding module:pgx to the default logger context.
2021-09-18 10:36:03 -05:00
Jack Christensen
b31b6d7a1a Upgrade github.com/jackc/puddle 2021-09-11 11:20:28 -05:00
Dan Gillis
8876b3a4ff Add comments 2021-09-11 11:07:31 -05:00
Jack Christensen
435605a59c Fix pgxpool BatchResults for QueryFunc 2021-09-11 11:05:44 -05:00
Jack Christensen
693c7c7f7d Fix NULL being lost when scanning unknown OID into sql.Scanner
https://github.com/jackc/pgx/issues/1078
2021-09-11 10:59:26 -05:00
Jack Christensen
38cd1b40aa Add QueryFunc to BatchResults
https://github.com/jackc/pgx/issues/1048#issuecomment-915123822
2021-09-11 10:32:02 -05:00
Kei Kamikawa
3bee0c6398 removed lines to read conn 2021-08-28 08:52:37 -05:00
Jack Christensen
90af821478 Remove old Travis CI code 2021-08-26 21:09:46 -05:00
Jack Christensen
30d7638296 Fix zeronull.Float8 2021-08-26 15:42:47 -05:00
Jack Christensen
39aa071b15 Add zeronull float8 2021-08-26 13:21:02 -05:00
Rulin Tang
5320ad87c8 remove query row connection release 2021-08-23 11:39:45 -05:00
Rulin Tang
c9e271df29 add missing pgx pool release for QueryRow 2021-08-23 11:39:45 -05:00
Carl Dunham
94f8441f4e Fix #119: add support for bare IP address as input for Inet 2021-08-12 17:50:31 -05:00
Don2Quixote
23b4656fc5 Made error check conditions clearer 2021-08-09 08:41:07 -05:00
Eli Treuherz
db84905b7f Add NullDecimal to shopspring-numeric
The shopspring/decimal package provides a NullDecimal struct intended
for use with nullable SQL NUMERICs and numbers. It has Scanner and
Valuer implementations already, but adding it to this package allows
it to be used with the binary encoding as well.

The implementation is very straightforward, but the tests have been made
slightly more complicated. The previous version wasn't testing the
decimal.Decimal cases, and this change adds those as well as new
NullDecimal cases. I've added some logic to the test harness to catch
these as you need to use the Equals method to properly compare Decimals.
2021-08-07 08:23:02 -05:00
Jack Christensen
6bda09691d Fix hstore binary null decoding
Bug was advancing the read pointer by the length of the value even if it
was a NULL value. Since NULL is indicated by a -1 length it actually
decremented the read pointer.
2021-07-31 11:06:03 -05:00
Jack Christensen
8f33ed07cd Release v4.13.0 2021-07-24 10:59:24 -05:00
Jack Christensen
24f1d26fde Upgrade pgconn and pgtype 2021-07-24 10:55:24 -05:00
Jack Christensen
53f5fed36c Release v1.10.0 2021-07-24 10:52:26 -05:00
Jack Christensen
e26c6b4e3d Release v1.8.1 2021-07-24 10:50:22 -05:00
Jack Christensen
377eed5d2f Cleaning go.sum 2021-07-24 10:48:07 -05:00
Jack Christensen
c16a4f7d6a Revert "Temporarily delete tests and pgxtype to break recursive dependency with pgx"
This reverts commit 32e20a603178b49fb189d1be971d0fb6960cabb2.
2021-07-24 10:40:30 -05:00
Jack Christensen
640aa07df1 Temporary step to clean up go.sum 2021-07-24 10:39:13 -05:00
Jack Christensen
d89c8390a5 Update dependencies and go mod tidy 2021-07-24 10:25:38 -05:00
Jack Christensen
32e20a6031 Temporarily delete tests and pgxtype to break recursive dependency with pgx 2021-07-24 10:16:00 -05:00
Jack Christensen
7d0a620dda Upgrade pgx version used for tests 2021-07-24 09:20:54 -05:00
Jack Christensen
6996e8d6c5 Context errors returned instead of net.Error
The net.Error caused by using SetDeadline to implement context
cancellation shouldn't leak.

fixes #80
2021-07-24 09:09:22 -05:00
Michael Darr
59fa1868a7 Support time durations for simple protocol
Signed-off-by: Michael Darr <michael.e.darr@gmail.com>
2021-07-17 09:01:57 -05:00
KeiichiHirobe
1470d69c58 go mod tidy 2021-07-17 08:39:31 -05:00
KeiichiHirobe
48f39340b3 switch from github.com/go-kit/kit/log to github.com/go-kit/log 2021-07-17 08:39:31 -05:00
Jack Christensen
9ee04e87e3 Release v4.12.0 2021-07-10 10:06:00 -05:00
Jack Christensen
785f279272 Upgrade dependencies 2021-07-10 10:02:16 -05:00
Jack Christensen
dcdc3eaec7 Release v1.8.0 2021-07-10 09:58:12 -05:00
Jack Christensen
13d454882b Release v1.9.0 2021-07-10 09:54:39 -05:00
Jack Christensen
5b7c6a3c8e Upgrade to pgproto3 v2.1.1 2021-07-10 09:54:24 -05:00
Michael Darr
a50d96d491 Make timeout error private
Signed-off-by: Michael Darr <michael.e.darr@gmail.com>
2021-07-07 18:14:36 -05:00
Michael Darr
9a9830c00d Always double-wrap contextAlreadyDoneError
Signed-off-by: Michael Darr <michael.e.darr@gmail.com>
2021-07-07 18:14:36 -05:00
Michael Darr
b3e64d3cdb Simplify SafeToRetry for ErrTimeout
Signed-off-by: Michael Darr <michael.e.darr@gmail.com>
2021-07-07 18:14:36 -05:00
Michael Darr
c0b4d3bc05 Implement timeout error
Signed-off-by: Michael Darr <michael.e.darr@gmail.com>
2021-07-07 18:14:36 -05:00
Yuli Khodorkovskiy
033ca7d47f Fix unexpected EOF failure for StartupMessage 2021-07-07 18:10:44 -05:00
Nicholas Wilson
aafa04c156 Use zap.Any for handling interface{} -> zap.Field conversion
zap.Any falls back to zap.Reflect, but is better for this case, because
it first checks for the types that zap handles specially.  For example,
time.Duration, or error, which zap.Reflect will just treat as untyped
int64 or struct objects, but zap.Any is able to detect these types and
print them properly.
2021-07-06 20:18:16 -05:00
Jack Christensen
6bce4a1878
Merge pull request #19 from gitstashpop/backend-unexpected-eof
Extend handling of unexpected EOF to the backend
2021-07-06 20:08:14 -05:00
Jack Christensen
aaef9bbc35
Merge branch 'master' into backend-unexpected-eof 2021-07-06 20:07:55 -05:00
Yuli Khodorkovskiy
2d3823838e Perform StartupMessage length validation
PG provides a maximum size for a StartupMessage:
https://doxygen.postgresql.org/pqcomm_8h.html#a4c50c668c551887ac3a49872130349e3

Limiting the size ensures a malicious user doesn't send an
overwhelmingly large StartupMessage which could DOS a Go binary that
uses pgproto3.
2021-07-06 20:01:33 -05:00
Cameron Daniel
3eceab0f38 Maintain host bits for inet types 2021-07-06 19:59:41 -05:00
Yuli Khodorkovskiy
10c6c50ac9
Extend handling of unexpected EOF to the backend
In the original issue [1] and commit [2], support for unexpected EOF was
added to the frontend to detect when a connection was closed abruptly.
Additionally, this allows us to differentiate normal io.EOF errors with
unexpected errors in the backend.

[1] https://github.com/jackc/pgx/issues/662/
[2] 595780be0f
2021-07-01 17:53:14 -04:00
Joshua Brindle
a123e5b4e5 Add defaults for sslcert, sslkey, and sslrootcert
per https://www.postgresql.org/docs/current/libpq-ssl.html
psql will use client certs located in ~/.postgresql on posix systems
or %APPDATA%\postgresql on Windows systems.
2021-06-26 11:25:41 -05:00
Aliaksandr Mianzhynski
4a2209a1b4 Don't allocate buffer when tx opts are empty 2021-06-26 10:52:09 -05:00
Jack Christensen
2ca304d461 pgtype.Inet preserves masked address portion
fixes #111
2021-06-26 10:49:56 -05:00
mgoddard
bf76d1ed51 Solve issue with 'sslmode=verify-full' when there are multiple hosts 2021-06-19 10:14:17 -05:00
Sivabalan Thirunavukkarasu
bacf81fb4e Bumping versions for other dependencies 2021-06-19 10:06:47 -05:00
Sivabalan Thirunavukkarasu
cfcd61d0cb Updating dependency versions 2021-06-19 10:06:47 -05:00
Sivabalan Thirunavukkarasu
2c22da0155 Bumping versions for other dependencies 2021-06-19 10:06:24 -05:00
Sivabalan Thirunavukkarasu
821e0521e4 Updating dependency versions 2021-06-19 10:06:24 -05:00
Yuli Khodorkovskiy
7c9e840726 Add support for identifying authentication messages
The pgprotocol overloads 'p' messages with PasswordMessage,
SASLInitialResponse, SASLResponse, and GSSResponse. This patch allows
contextual identification of the message by setting the authType in the
frontend and then setting this value in the backend when a
AuthenticationResponseMessage is received.
2021-06-12 13:54:34 -05:00
Dmytro Haranzha
cabb58cc40 ResetSession hook is called before a connection is reused from pool for another query. 2021-06-12 13:48:31 -05:00
Yuli Khodorkovskiy
28c20e93c0 Fix json marshal/unmarshal implementations
Fix marshal/unmarshal for:
- authentication_{cleartext_password, md5_password, ok, sasl, sasl_continue, sasl_final}
- error_response
2021-05-29 11:06:05 -05:00
Henrique Vicente
9c2c389e06 json: fix implementation of json Unmarshalers.
* AuthenticationMD5Password was wrong and is not needed
* Bind was wrong
* ErrorResponse is not needed
* Minor improvements for reliability
2021-05-22 11:34:25 -05:00
Henrique Vicente
ba924e5715 json: Implement json.Unmarshaler for messages.
This will allow using pgmockproxy output as ingestion data for pgmock.
2021-05-22 11:34:25 -05:00
Ivan Daunis
5bca076182 Refactor to interface convert 2021-05-22 11:33:42 -05:00
Ivan Daunis
0977e29341 Support pointers of wrapping structs 2021-05-22 11:33:42 -05:00
Jack Christensen
fb42201c18 Fix default host when parsing URL without host but with port
fixes https://github.com/jackc/pgconn/issues/72
2021-05-14 18:39:31 -05:00
dkinder
00feeaa5c9 stdlib: style nit in RandomizeHostOrderFunc 2021-05-10 08:17:53 -05:00
dkinder
a8020a21e8 stdlib: add OptionBeforeConnect and randomizer
Fixes https://github.com/jackc/pgconn/issues/71
2021-05-10 08:17:53 -05:00
Rueian
cae98b5e45
Register JSONBArray at NewConnInfo() 2021-05-03 22:20:58 +08:00
Jack Christensen
4380e23ae1 CompositeTextScanner handles backslash escapes
fixes https://github.com/jackc/pgx/issues/874
2021-04-24 08:08:34 -05:00
alex
e722ca608c added url connection example 2021-04-17 09:56:17 -05:00
Jack Christensen
3f76b98073 Allow dbname query parameter in URL conn string
fixes #69
2021-04-09 18:20:06 -05:00
Andrew Kimball
3ab8941921 stdlib: Do not reuse ConnConfig strings
Previously, stdlib.RegisterConnConfig would sometimes reuse the same connection
string for different ConnConfig options (specifically, it happened when a connection
was open and then closed, and then a new, different connection was opened). This
behavior interferes with callers that expect that two connections with the same data
source name are connecting to the same backend database in the same way.

This fix updates stdlib.RegisterConnConfig to use an incrementing sequence
counter to uniquify all returned connection strings.

Fixes #947
2021-04-03 10:52:37 -05:00
Jonathan Amsterdam
88ede6efb5 stdlib: implement Conn.ResetSession
This prevents closed connections from being returned
by `database.sql.DB.Conn`.

Fixes #974.
2021-03-26 10:25:07 -05:00
Jack Christensen
909b81a163 Release v4.11.0 2021-03-25 10:21:28 -04:00
Jack Christensen
799cf1f04b Update copyright date 2021-03-25 10:08:09 -04:00
Jack Christensen
096f8ca754 Fix test in other time zones 2021-03-25 10:07:34 -04:00
Jack Christensen
2114ca9458 Update pgconn and pgtype 2021-03-25 09:59:38 -04:00
Jack Christensen
a49f4bb135 Use errors instead of golang.org/x/xerrors 2021-03-25 09:55:12 -04:00
Jack Christensen
4a3a424dff Release v1.7.0 2021-03-25 09:16:43 -04:00
Jack Christensen
464a7d88d9 Release v1.8.1 2021-03-25 09:15:45 -04:00
Jack Christensen
cdb667b5b0 Update copyright date 2021-03-25 09:15:45 -04:00
Jack Christensen
63e2dbefaf Update copyright date 2021-03-25 09:08:27 -04:00
Jack Christensen
dd160540c4 Use Go 1.13 errors instead of xerrors 2021-03-25 09:08:27 -04:00
Ethan Pailes
e8f75629d0 upgrade x/crypto to avoid CVE-2020-9283
I found this when scanning for security issues in some
dependencies. I doubt that this CVE will impact pgconn
since I don't think it uses the ssh cropto module, but
I think it is worth being fairly agressive about upgrading
security sensative libraries and this doesn't seem to be
a breaking change.
2021-03-23 07:44:35 -05:00
Jack Christensen
80147fd7cc Use sync.Once to guard pool.Close
This avoids the small possibility of 2 concurrent Close calls still
double closing the channel.
2021-03-19 08:58:56 -05:00
Matt Schultz
fe366b2cf3 Prevent panics caused by attempting to close an already closed pgxpool.Pool. 2021-03-19 08:58:56 -05:00
Matt Schultz
a0028cbd0d Handle SendBatch calls on closed transactions with null connections. This was previously panicking due to a null pointer exception as exposed in the provided unit test. 2021-03-19 08:38:19 -05:00
Jack Christensen
495d482f20 Fix PG version extraction in tests 2021-03-13 07:55:53 -06:00
Jack Christensen
1fcefdc73f Fix BeginTxFunc not passing txOptions
fixes #961
2021-03-13 07:43:08 -06:00
Rusakow Andrew
292539a590 Add comment support when sanitizing SQL queries 2021-03-13 07:31:56 -06:00
drewdogg
aa89720576 go 1.13 2021-03-13 07:23:29 -06:00
Jack Christensen
00704ce8b7 Skip test on too old PostgreSQL 2021-03-13 07:18:26 -06:00
Jack Christensen
e93da6c744 Fix ignored deferred error with database/sql QueryRow
fixes #958
2021-03-13 06:52:58 -06:00
Andrey Borodin
8990c125cf Stop fallback on ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION 2021-03-12 08:31:29 -06:00
Andrey Borodin
b6027e37f4 Stop fallback in case of invalid password 2021-03-12 08:31:29 -06:00
Andrey Borodin
70be4b4a02 Fix incoherent type assignment 2021-03-12 08:31:29 -06:00
Andrey Borodin
26ccb4ee08 Resume fallback on server error
When server responds with "TLS required" or too "many connections for role" fallbacks are not traversed any further. This could be OK, but fallbacks without TLS are added autoatically so that if we have multiple hosts requiring TLS we never traverse beyond first one.
2021-03-12 08:31:29 -06:00
Jack Christensen
0f1bda20b0 Fix numeric NaN support
fixes #93
2021-03-11 19:49:03 -06:00
Jack Christensen
38ab93613b Make CopyFrom log message more specific 2021-03-11 19:22:40 -06:00
Patrick Hemmer
41fa6e844c Add logging on CopyFrom
Logging previously only logged statement results on Exec() and Query(), but not CopyFrom(). This makes change makes it consistent.
2021-03-11 19:21:42 -06:00
Jack Christensen
0cc35d7a60 Update supported versions 2021-03-06 17:25:15 -06:00
Jack Christensen
d46e447a39 Add CockroachDB to CI 2021-03-06 17:21:45 -06:00
Jack Christensen
9e55cff611 Fix testing Fatalf in goroutines 2021-03-06 17:21:45 -06:00
Jack Christensen
5daa019e4e Update README.md to authentication test setup 2021-03-06 16:08:38 -06:00
Jack Christensen
0d307bcc5e Add CockroachDB to CI 2021-03-06 16:06:32 -06:00
Jack Christensen
1e905d8e38 Refactor connection strings into build matrix
This is in preparation for adding CockroachDB to the build matrix.
2021-03-06 15:20:03 -06:00
Jack Christensen
7de3392269 Manually specify all build matrix options
- Saves some CI time by only testing older version of Go once
- Specify connection
2021-03-06 15:15:03 -06:00
Jack Christensen
a0350a932a ci.yml consistently uses kebab case 2021-03-06 15:01:44 -06:00
Jack Christensen
cf5894e092 Use std errors instead of golang.org/x/xerrors
New error functionality was introduced in Go 1.13. pgconn only
officially supports 1.15+. Transitional xerrors package can now be
removed.
2021-03-06 14:45:33 -06:00
Jack Christensen
3b0400a0d4 Test Go 1.15 and 1.16 in CI 2021-03-06 14:42:22 -06:00
Georges Varouchas
36c8fb8257 fix #65 : close cleanupDone channel on "FATAL" messages 2021-03-06 09:33:36 -06:00
Georges Varouchas
b9a1aad8d9 add failing test to highlight issue #65
if frontend returns a message with "Severity: FATAL", even after
calling "conn.Close()", the 'CleanupDone()' channel is still blocking
2021-03-06 09:33:36 -06:00
Jack Christensen
d245ed47f1 Increase pool release conn wait time for CI
No simple way around sleeping for certain tests.
2021-03-04 20:06:15 -06:00
Jack Christensen
f8c43c97ab Document prefer_simple_protocol config option 2021-03-04 19:58:52 -06:00
Jack Christensen
d9ac491657 Add prefer_simple_protocol option to ParseConfig
refs #650
2021-03-04 19:56:14 -06:00
Jack Christensen
1dc7133a63 Simplify CockroachDB detection 2021-02-27 10:40:06 -06:00
Jack Christensen
09371f21d0 Use JSON format compat with PG and CockroachDB 2021-02-27 10:28:45 -06:00
Jack Christensen
fea4bc4318 Use bigint in tests for compat. with CockroachDB 2021-02-27 10:26:14 -06:00
Jack Christensen
fb60d0780e Skip unsupported testing functionality on CockroachDB 2021-02-27 10:25:45 -06:00
Jack Christensen
2b63da6f12 Skip backend PID dependent tests on CockroachDB 2021-02-27 10:03:58 -06:00
Jack Christensen
1e15bdc874 Remove deferred from test tables where unneeded 2021-02-27 09:57:14 -06:00
Jack Christensen
ff6ab48e00 Skip tests with parameter description server issues 2021-02-27 09:52:51 -06:00
Jack Christensen
674cf70c51 Skip tests with known server issues 2021-02-27 09:52:51 -06:00
Jack Christensen
775d3b1049 Fake success on example 2021-02-27 09:52:51 -06:00
Jack Christensen
30d44c0369 Test code instead of error message for CockroachDB 2021-02-27 09:52:51 -06:00
Jack Christensen
378bd72b67 Allow flexible number types for CockroachDB 2021-02-27 09:52:51 -06:00
Jack Christensen
44733732e4 Skip number type sensitive test on CockroachDB 2021-02-27 09:52:51 -06:00
Jack Christensen
eaf3e84963 Add timeout to serialization failure test
When run with CockroachDB it appears to hang without the timeout.
2021-02-27 09:52:51 -06:00
Jack Christensen
bac0905915 Specify type for CockroachDB 2021-02-27 09:52:51 -06:00
Jack Christensen
ef2adcee08 Skip number type dependent test for CockroachDB 2021-02-27 09:52:51 -06:00
Jack Christensen
b01dd934e5 Allow different int size for CockroachDB 2021-02-27 09:52:51 -06:00
Jack Christensen
fe3a710a5a Skip test depending on exact int types on CockroachDB 2021-02-27 09:52:51 -06:00
Jack Christensen
9fe7962445 Skip another initially deferred test on CockroachDB 2021-02-27 09:52:51 -06:00
Jack Christensen
80e7f6b0d3 Skip testing circle type on CockroachDB 2021-02-27 09:52:51 -06:00
Jack Christensen
c588c47ddd Skip client_encoding test for CockroachDB 2021-02-27 09:52:51 -06:00
Jack Christensen
3a27bcd459 Skip standard_conforming_strings = off test for CockroachDB 2021-02-27 09:52:51 -06:00
Jack Christensen
5a16bad252 Skip deferred constraint tests on CockroachDB 2021-02-27 09:52:51 -06:00
Jack Christensen
c4e66b05de Test accepts bigint vs int for CockroachDB 2021-02-27 09:52:51 -06:00
Jack Christensen
f29aef4409 Skip test with serial on CockroachDB 2021-02-27 09:52:51 -06:00
Jack Christensen
2e50e59491 Skip isolation level test for CockroachDB 2021-02-27 09:52:51 -06:00
Jack Christensen
2f95f67ef5 Remove initially deferred when not needed
Makes test compatible with CockroachDB.
2021-02-27 09:52:51 -06:00
Jack Christensen
c8a7c89f15 Skip tests using pg_terminate_backend on CockroachDB 2021-02-27 09:52:51 -06:00
Jack Christensen
19441eee28 CockroachDB returns bigint instead of int 2021-02-27 09:52:51 -06:00
Jack Christensen
5f58e0e443 Cockroach DB uses different error code 2021-02-27 09:52:51 -06:00
Jack Christensen
ec5dfb472c Skip domain types for CockroachDB 2021-02-27 09:52:51 -06:00
Jack Christensen
a6d902777d Skip LISTEN / NOTIFY tests for CockroachDB 2021-02-27 09:52:51 -06:00
Jack Christensen
45c2b71377 Skip tests for cidr type for CockroachDB 2021-02-27 09:52:51 -06:00
Jack Christensen
909398127d Skip large objects tests for CockroackDB 2021-02-27 09:52:51 -06:00
Jack Christensen
37884a20e7 Use testify and more forgiving numeric equality
PostgreSQL generate_series can return type int while CockroachDB always
returns bigint.
2021-02-27 09:52:51 -06:00
Jack Christensen
ac2918b9a3 Add BeginFunc and BeginTxFunc
fixes #821
2021-02-20 18:30:18 -06:00
Jack Christensen
373bb84e9d Add *pgxpool.AcquireFunc
refs #821
2021-02-20 17:16:33 -06:00
Jack Christensen
fb88a34cb4 Skip test with known issue on CockroachDB 2021-02-20 16:40:16 -06:00
Jack Christensen
abeb337246 Accept nil *time.Time in Time.Set 2021-02-20 09:28:14 -06:00
Pau Sanchez
9b58ab2db8 Make ScanArgError fields public 2021-02-20 08:43:57 -06:00
Jack Christensen
4bde08d1a6 LRU statement cache tests handle CockroackDB 2021-02-13 11:19:09 -06:00
Jack Christensen
927a15124e Update supported versions 2021-02-13 10:59:00 -06:00
Jack Christensen
d05c52217a Initial CockroachDB testing 2021-02-13 10:47:22 -06:00
Jack Christensen
a78ab5bdcd Test should abort if cannot setup database 2021-02-13 09:39:42 -06:00
Pau Sanchez
8ad672475a Make ScanArgError public to allow identification of offending column
fixes #931
2021-02-13 08:56:11 -06:00
Jack Christensen
922508c785 Replace Travis with Github CI 2021-01-30 17:17:42 -06:00
Jack Christensen
9cf5752625 Change Github CI to run on master 2021-01-30 16:48:51 -06:00
Jack Christensen
609cd81d64 Remove obsolete Travis badge 2021-01-30 16:47:51 -06:00
Jack Christensen
ed0090f610 Use race detector on Github CI 2021-01-30 16:44:17 -06:00
Jack Christensen
c10c60cad5 Add build matrix for Go and PG 2021-01-30 16:38:58 -06:00
Jack Christensen
c107f909a2 Create user for Unix domain socket 2021-01-30 16:28:27 -06:00
Jack Christensen
eb32285906 Use native PostgreSQL package
Also remove travis integration.
2021-01-30 16:22:38 -06:00
Jack Christensen
94608a2482 Merge branch 'master' into github-ci-wip 2021-01-30 13:05:59 -06:00
Jack Christensen
74517d7315 Fix test when PGSSLMODE=disable
When PGSSLMODE=disable no fallback config was created which would cause
the check that fallbacks are deep copied to crash on:

copied.Fallbacks[0].Port = uint16(5433)
2021-01-30 13:03:56 -06:00
Jack Christensen
a9c2b5c3cb Revert "Try to debug failing CI test"
This reverts commit 6c2a423dbc25d634270b04ecaac7a1d644037945.
2021-01-30 13:01:27 -06:00
Jack Christensen
6c2a423dbc Try to debug failing CI test 2021-01-30 12:58:25 -06:00
Jack Christensen
63bcdfde61 Fix CI link 2021-01-30 12:48:58 -06:00
Jack Christensen
7d8845a9d8 Initial import from pgtype 2021-01-30 12:47:34 -06:00
Fabrice Aneche
14050e286d fix data map 2021-01-27 16:36:58 -06:00
Fabrice Aneche
9b59dd0346 added a kitlog level log adapter 2021-01-27 16:36:58 -06:00
davidsbond
aa8604b5c2 Add Ping method to pgxpool.Conn
Adds the Ping method to pgxpool.Conn, returning the result of calling Ping on
the underlying pgx.Conn.
2021-01-26 20:46:57 -06:00
Jack Christensen
120139a206 Add link to PG docs for connString format
fixes #62
2021-01-14 18:22:21 -06:00
Stephane Martin
6830cc0984 Fix: also consider \r, \f, \t as whitespace (jackc/pgtype#86) 2021-01-14 18:10:48 -06:00
Stephane Martin
59b79a2e49 Fix: escaped strings when they start or end with a newline char (jackc/pgtype#86) 2021-01-14 18:10:48 -06:00
Vasilii Novikov
1e141d8c32 Add tsrange array type. 2021-01-09 09:40:43 -06:00
Moshe Katz
724bf94515 use proper pgpass location on Windows 2021-01-09 09:36:36 -06:00
Robert Froehlich
210a217818 Add BeforeConnect callback to pgxpool.Config.
This allows for connection settings to be updated without having to create
a new pool. The callback is passed a copy of the pgx.ConnConfig and will
not impact existing live connections.
2021-01-02 15:08:59 -08:00
Jack Christensen
b23d41c399 Add CI badge 2020-12-28 13:11:36 -06:00
Jack Christensen
6e11216708 Yet another CI tweak 2020-12-28 13:02:34 -06:00
Jack Christensen
be67555d02 Another CI tweak 2020-12-28 12:56:41 -06:00
Jack Christensen
e2115310b7 More CI 2020-12-28 12:50:42 -06:00
Jack Christensen
ea92194719 Add PostgreSQL service to CI 2020-12-28 12:35:01 -06:00
Jack Christensen
97f8f6a25a
Begin CI with Github Actions 2020-12-28 12:22:56 -06:00
Yuli Khodorkovskiy
1213b69774 Add support to ErrorResponse for unlocalized severity
Add missing 'V' field for unlocalized severity added in PG versions 9.6
and greater. See https://www.postgresql.org/docs/current/protocol-error-fields.html
2020-12-28 10:22:55 -06:00
Jack Christensen
e276d9b832 Add more documentation to TxStatus 2020-12-23 12:21:34 -06:00
Jack Christensen
b77cee2a28 Fix scanning int into **sql.Scanner implementor
See https://github.com/jackc/pgx/issues/897.
2020-12-23 11:17:02 -06:00
Musbah Sinno
b664891853 Updated CopyFromSlice example in doc.go
The example had a syntax error and didn't explain what user was to begin with. Fixed it.
2020-12-23 10:42:48 -06:00
Jack Christensen
fc4d50f6c2 Release v4.10.1 2020-12-19 10:18:49 -06:00
Jack Christensen
9b0e57c4a9 Fix panic on query error with nil stmtcache
fixes #895
2020-12-19 10:17:41 -06:00
Jack Christensen
e8f959e0e1 Add QueryFunc
refs #821
2020-12-12 09:39:58 -06:00
Jack Christensen
0cbbf55dde Fix typo 2020-12-12 08:28:55 -06:00
ip.novikov
e0d22c1100 improve regexp
get shortest sequence between : and @
2020-12-05 22:11:52 +03:00
ip.novikov
a581247a12 Add check for url with broken password
replace broken password in parseConfigError message
2020-12-05 15:28:01 +03:00
Jack Christensen
e14638e125 Release v4.10.0 2020-12-03 19:50:07 -06:00
Jack Christensen
8c7667376e Update dependencies 2020-12-03 19:43:27 -06:00
Jack Christensen
82bac82213 stdlib: consider any Ping failure as fatal
refs #672
2020-12-03 19:41:03 -06:00
Jack Christensen
880863b70a Release v1.6.2 2020-12-03 19:20:11 -06:00
Jack Christensen
7a47d60bbd Update missing changelog entries for v1.6.0 and v1.6.1 2020-12-03 19:18:40 -06:00
Jack Christensen
3742d6209e Release v1.8.0 2020-12-03 19:12:18 -06:00
Jack Christensen
0a2b67c5c5 CopyFromSlice should remember error 2020-12-02 09:32:01 -06:00
Egon Elbre
e23c5bec24 add pgx.CopyFromSlice
Using CopyFromRows can often be inconvenient to use, because you would
need to convert a typed array to an [][]interface{}. Similarly,
implementing a custom CopyFromSource is too verbose for one-off things.

Add CopyFromSlice that allows to more easily convert a slice to a
CopyFromSource. Example:

    copyCount, err := conn.CopyFrom(
        context.Background(),
        pgx.Identifier{"people"},
        []string{"first_name", "last_name", "age"},
        pgx.CopyFromSlice(len(rows), func(i int) ([]interface{}, error) {
            return []interface{user.FirstName, user.LastName, user.Age}, nil
        }),
    )
2020-12-02 11:16:36 +02:00
Jack Christensen
00d516f5c4 Fix panic on assigning empty array to non-slice or array
See https://github.com/jackc/pgx/issues/881
2020-11-27 11:56:21 -06:00
Roman Tkachenko
88b6398594 Add CopyData and CopyDone messages support to Backend 2020-11-17 16:57:05 -06:00
Ethan Pailes
1df45d758d fix stmtcache invalidation
This patch fixes jackc/pgx#841. The meat of the fix lives
in [a PR to the pgconn repo][1]. This change just checks
for errors after executing a prepared statement and informs
the underlying stmtcache about them so that it can properly
clean up. We don't try to get fancy with retries or anything
like that, just return the error and allow the application to handle it.

I had to make [some][1] [changes][2] to to the jackc/pgconn package as well
as this package.

Fixes #841

[1]: https://github.com/jackc/pgconn/pull/56
[2]: https://github.com/jackc/pgconn/pull/55
2020-11-12 08:15:13 -05:00
Jack Christensen
cba610c245 StatementErrored does not need context nor return an error 2020-11-11 15:52:59 -06:00
Jack Christensen
426124b32f Add stmtcache.LRU test thjat integrates over the database 2020-11-11 15:48:49 -06:00
Ethan Pailes
a885de9c94 stmtcache: add new StatementErrored method
This patch adds a new StatementErrored method to the stmtcache.
This routine MUST be called by users of the cache whenever the
execution of a statement results in an error. This will allow
the cache to make an intelligent decision about whether or not
the statement needs to be purged from the cache.
2020-11-11 11:18:21 -05:00
Jack Christensen
93c6b60429 Explicityly state pgxpool's concurrency-safety
refs #866
2020-11-09 17:01:51 -06:00
Jack Christensen
740b3a5115 Fix: Text array parsing disambiguates NULL and "NULL".
This solution is a little awkward, but it avoids breaking backwards
compatibility.

fixes #78
2020-11-07 07:41:49 -06:00
Jack Christensen
b82b993fa8 Release v1.7.2 2020-11-03 19:20:03 -06:00
Jack Christensen
0f17ba2cf3 Fix unconstrained data value slices
See https://github.com/jackc/pgx/issues/859
2020-11-03 19:17:52 -06:00
Jack Christensen
c34a8731b6 Data row value slices need to be capacity limited
Otherwise, appending to a slice that came from a data row could
overwrite adjacent memory.
2020-11-03 19:15:07 -06:00
Jack Christensen
36a8da55cc Fix Timestamptz.DecodeText with too short text
fixes #74
2020-11-03 08:31:05 -06:00
Jack Christensen
af0ca3a39b Fix simple protocol empty array and original recursive empty array issue
Original issue https://github.com/jackc/pgtype/issues/68

This crash occurred in the recursive assignment system used to support
multidimensional arrays.

This was fixed in 9639a69d451f55456f598c1aa8b93053f8df3088. However,
that fix incorrectly used nil instead of an empty slice.

In hindsight, it appears the fundamental error is that an assignment to
a slice of a type that is not specified is handled with the recursive /
reflection path. Or another way of looking at it is as an unexpected
feature where []T can now be scanned if individual elements are
assignable to T even if []T is not specifically handled.

But this new reflection / recursive path did not handle empty arrays.

This fix handles the reflection path for an empty slice by allocating an
empty slice.
2020-10-31 17:12:16 -05:00
Jack Christensen
9c2888b49e Release v1.7.1 2020-10-31 16:25:01 -05:00
Jack Christensen
340bfece2c Do not asyncClose in response to a FATAL PG error
This will reduce spurious server log messages on authentication
failures. See https://github.com/jackc/pgconn/pull/53.
2020-10-29 21:20:28 -05:00
Feike Steenbergen
f3f5b70a87 Ensure the example code snippet compiles again
There were 2 errors when using the example code:

- not enough arguments in call to pgConn.Close
- no new variables on left side of :=

With these changes, the example works again.
2020-10-29 20:49:03 -05:00
Jack Christensen
9d7fc8e63a AssignTo pointer to pointer to slice and named types
fixes #69
2020-10-24 09:21:42 -05:00
Simo Haasanen
9639a69d45 Adds checks for zero length arrays.
Assigning values from nil or zero length elements or dimensions now return immediately as there are no values to assign.
2020-10-20 19:52:05 +01:00
Tomas Volf
e92478ec70
Fix Inet.Set to handle nil net.IP correctly
When nil IP is returned from net.ParseIP, it is accepted into Inet type,
but not properly marked as being Null. That introduces issues later on
when calling for example EncodeBinary, since it does not assume this can
happen.

This commit resolves that by properly detecting zero-length net.IP and
setting status to Null if that is the case.
2020-10-13 15:26:09 +02:00
duohedron
2bc8c67e4a Fix misleading names parseString and parseFloat64 in polygon.go 2020-10-08 07:59:44 -05:00
duohedron
b55f972f49 Add comment to Polygon.Set() 2020-10-08 07:59:44 -05:00
duohedron
8aa7211df5 Add tests to Polygon 2020-10-08 07:59:44 -05:00
duohedron
6166c99b77 Add Undefined status to invalid Polygon 2020-10-08 07:59:44 -05:00
duohedron
e09987f1d6 Add tests to Polygon 2020-10-08 07:59:44 -05:00
duohedron
2dca42ee7d Add Set(string|[]Vec2|[]float64) to Polygon 2020-10-08 07:59:44 -05:00
Erik Agsjö
66c36ff24f Support setting infinite timestamps 2020-10-08 07:58:57 -05:00
Jack Christensen
416f037e77 Fix docs for Timeout 2020-10-05 19:39:05 -05:00
Jack Christensen
376361f53d Add tests for Int(2|4|8).Set accepting float(32|64) 2020-10-03 08:36:40 -05:00
lqu3j
909d814f65 support float64, float32 convert to int2, int4, int8 2020-09-29 13:10:38 +08:00
Jack Christensen
116eba4401 Release v1.5.0 2020-09-26 11:48:37 -05:00
Jack Christensen
035868ca0c Release v1.7.0 2020-09-26 11:39:23 -05:00
Jack Christensen
28d24269e9 Upgrade pgproto3 to v2.0.5 2020-09-26 11:35:23 -05:00
Bekmamat
d7f92427ad fixed marshaling and unmarshaling 2020-09-22 08:18:39 -05:00
Jack Christensen
835cf1b068 Fix: Bind.MarshalJSON when ParameterFormatCodes is nil or single element
refs #10
2020-09-17 17:03:30 -05:00
Jack Christensen
fbe354aea1 Remove editor specific .gitignore 2020-09-15 17:21:13 -05:00
bakmataliev
6777e0294b eliminate regex dep 2020-09-15 13:24:17 +03:00
bakmataliev
cd9b888ff6 Remove unnecessary check for null 2020-09-11 16:28:49 +03:00
bakmataliev
d540ca39be New marshalers have been added 2020-09-11 16:24:48 +03:00
Jack Christensen
be69c1c10b Fix parseDSNSettings with bad backslash
fixes #49
2020-09-10 19:40:52 -05:00
Jack Christensen
b6b3a86310 Update CI Go versions 2020-09-05 13:26:56 -05:00
Jack Christensen
0d4f029683 Exec(Params|Prepared) return ResultReader with FieldDescriptions loaded
Previously, it wasn't loaded until NextRow was called the first time.
2020-09-05 13:14:14 -05:00
Jack Christensen
fede0ce5d6 Document that received messages are only valid until the next receive. 2020-09-05 11:30:23 -05:00
Jack Christensen
e7d2b057a7 Text formatted values except bytea can be directly scanned to []byte
This significantly improves performance of scanning text to []byte as it
avoids multiple allocations and copies.
2020-09-05 11:13:53 -05:00
Jack Christensen
9da6afcad7 Fix selecting empty array
Failing test was in pgx: TestReadingValueAfterEmptyArray
2020-09-05 10:56:22 -05:00
Jack Christensen
79b05217d1 Fix JSONBArray to have elements of JSONB 2020-09-04 18:41:34 -05:00
Yuli Khodorkovskiy
08088ecf9a Fix notification response
Notification response was missing the PID in the Encode function
2020-08-31 17:09:20 -05:00
Jack Christensen
6677e2430f
Merge pull request #4 from yulicrunchy/fix-malformed-sasl
Fix malformed SASL messages
2020-08-28 16:49:24 -05:00
Jack Christensen
5847a2671a
Merge pull request #3 from yulicrunchy/gss
Add missing GSSEncRequest
2020-08-28 16:48:40 -05:00
Sebastiaan Mannem
5db484908c Changing SendBytesWithResults to ReceiveResults (that only does the reading). 2020-08-22 20:38:04 -05:00
Sebastiaan Mannem
1debbfeec4 Adding SendBytesWithResults option to receive data after sending a message (used by copy-both) 2020-08-22 20:38:04 -05:00
Jack Christensen
fdfc783345 Rename CleanupChan to CleanupDone 2020-08-20 22:08:40 -05:00
Jack Christensen
3eb5432c47 Add PgConn.CleanupChan 2020-08-20 22:00:21 -05:00
Simo Haasanen
ec14212d30 Add comments to explain the use of reflection after type assertion.
Removes one local variable, which is used twice only in an error.
2020-08-09 09:17:40 +01:00
Simo Haasanen
b90570feb5 Restored more optimised array type conversions for a few select 1D-slice types.
Results of calls to the reflect lib are now stored as local variables for small performance gains.
2020-08-08 19:51:37 +01:00
Simo Haasanen
449a8a4f8e Add multidimensional array and slice support.
Adds array support - previously only slices were supported.
Adds new test cases for multidimensional arrays and slices.
All previous test cases are unmodified and passed (fully backwards compatible).
Removes hard-coded type conversions for arrays, instead now relies on the type support of the array element's type conversion support.
Less maintenance for arrays, new type conversions are automatically supported when array's element gains new type support.
Simplifies typed_array_gen.sh generator script by removing the hard-coded single-dimensional types for arrays.
Only typed_array.go.erb and typed_array_gen.sh have been changed + 1 new auxiliary function in array.go file + additional tests in test files for each array. Other changes are from generated code.
2020-08-07 13:10:32 +01:00
Jack Christensen
c894ca8b7d Update pgproto3 to v2.0.4 2020-08-01 05:49:56 -05:00
Jack Christensen
72288731fc
Merge pull request #8 from mjibson/patch-1
mark CopyDone as frontend too
2020-08-01 05:46:49 -05:00
Matt Jibson
2799a6e9a6
mark CopyDone as frontend too 2020-07-31 16:13:23 -06:00
Jack Christensen
b6e34b44e5 Update pgproto3 2020-07-31 17:04:18 -05:00
Jack Christensen
3443d78a73
Merge pull request #7 from mjibson/patch-1
correctly encode CopyInResponse's format field
2020-07-31 17:02:15 -05:00
Matt Jibson
6d0b4c45e4
correctly encode CopyInResponse's format field 2020-07-31 15:42:06 -06:00
Jack Christensen
f45b4d6b76 Release v1.6.4 2020-07-29 22:17:02 -05:00
Jack Christensen
44079b0d2c Fix panic on parsing DSN with trailing '='
Also correctly return error with leading '='.

fixes #47
2020-07-29 22:11:15 -05:00
Jack Christensen
4e4c4ea541 Fix deadlock on error after CommandComplete but before ReadyForQuery
See: https://github.com/jackc/pgx/issues/800
2020-07-29 21:47:23 -05:00
Jack Christensen
d831ba712a Release v1.4.2 2020-07-22 06:46:27 -05:00
Jack Christensen
7673c8578d Update changelog 2020-07-22 06:45:10 -05:00
Jack Christensen
37c9edc242 Release v1.6.3 2020-07-22 06:43:39 -05:00
Yaz Saito
b939bc8d68 Fix encoding of a large composite data type
If encoding a field caused a buffer reallocation, the its length would be
written to a wrong place.
2020-07-21 23:52:20 -07:00
vahid-sohrabloo
271b0ac95e AppendCertsFromPEM doesn't have error and removes pgTLSArgs
AppendCertsFromPEM doesn't have error and removes pgTLSArgs because not used
2020-07-18 08:50:12 -05:00
Jack Christensen
9295bf7483 Update changelog 2020-07-14 12:07:27 -05:00
Jack Christensen
7a3e774a52 Fix ArrayType DecodeBinary empty array breaks future reads 2020-07-14 11:58:10 -05:00
Jack Christensen
12752ce5d6 Update pgservicefile 2020-07-13 19:34:45 -05:00
Jack Christensen
aa245bcc44
Merge pull request #47 from bakape/fix/struct-padding
optimise struct padding
2020-07-13 07:38:09 -05:00
bakape
193ecfec73
optimise struct padding 2020-07-12 13:52:32 +03:00
James Lawrence
5576567c19 support unformatted uuid hex string.
adds the abiility to support uuids in the form:
000102030405060708090a0b0c0d0e0f
2020-07-06 11:27:15 -04:00
Jack Christensen
efe4704c57 Release v1.4.0 2020-06-27 12:25:17 -05:00
Jack Christensen
c4e2b4bda3 Update changelog 2020-06-27 12:24:46 -05:00
Jack Christensen
503c2b445f Release v1.6.1 2020-06-27 11:51:30 -05:00
Jack Christensen
bd7ffdb480 Update golang.org/x/crypto dependency 2020-06-27 11:48:20 -05:00
Jack Christensen
65717779e4 Fix crash when PGSERVICE not found 2020-06-27 11:46:16 -05:00
Jack Christensen
82c2752e71 Update golang.org/x/text to 0.3.3
golang.org/x/text had a vulnerability:

https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-14040

pgconn does not appear to use the affected code path, but it is still
worth updating away from the vulnerable version.

fixes #44
2020-06-27 11:35:23 -05:00
Jack Christensen
473062b114 Remove NewConfig and add more docs for ParseConfig
refs #42
2020-06-27 11:29:21 -05:00
Jack Christensen
66a0b33655 Rerun typed_array_gen.sh 2020-06-24 08:40:34 -05:00
Andrei Tserakhau
4f940b06eb
Merge branch 'master' into fix-490 2020-06-21 14:23:49 +03:00
tserakhau
44f45c6c62 Use erb for jsonb array generation 2020-06-21 14:21:16 +03:00
Jack Christensen
41a185b611 Allow converting intervals with months and days to duration
It's a lossy conversion but so is numeric to float.

fixes #42
2020-06-20 13:26:06 -05:00
Jack Christensen
3d6abc3c5b
Merge pull request #41 from TrueLevelSA/feat/slice-of-pointers
Create pgtypes (and slices of) from pointers
2020-06-20 13:14:43 -05:00
Jack Christensen
7cf5101bb2 Add NewConfig()
refs #42
2020-06-20 12:59:15 -05:00
tserakhau
35d2873de1 Fix 490: Add jsonb arrays for pgx v4 2020-06-18 17:11:54 +03:00
megaturbo
bc07106f0e Add Code generated notice at the top of the file 2020-06-17 17:04:43 +02:00
megaturbo
066bc77610 Add support for slice of nullable types in array types 2020-06-17 17:04:43 +02:00
megaturbo
3105c6e706 Add support for nullable types in Value.Get implementations 2020-06-17 17:04:43 +02:00
Jack Christensen
6c16d6c00e
Merge pull request #43 from lukedirtwalker/redactPW
redact passwords in parse config errors
2020-06-13 07:19:15 -05:00
Lukas Vogel
f27e874d55 redact passwords in parse config errors
Redact passwords when printing the parseConfigError in a best effort manner.
This prevents people from leaking the password into logs, if they just print the error in logs.
2020-06-12 13:01:57 +02:00
Jack Christensen
09efc38390 Update changelog 2020-06-11 21:36:50 -05:00
Jack Christensen
a0bff84d73 Merge branch 'leighhopcroft-numeric-nan-support' 2020-06-11 21:35:49 -05:00
Jack Christensen
7bcd9fbdaf Rename IsNaN to NaN 2020-06-11 21:35:32 -05:00
Jack Christensen
ee66a98ace Merge branch 'numeric-nan-support' of git://github.com/leighhopcroft/pgtype into leighhopcroft-numeric-nan-support 2020-06-11 21:29:49 -05:00
Jack Christensen
a1b9eb4d4e Fix parseServiceSettings not returning error 2020-06-11 20:55:41 -05:00
Jack Christensen
6b254a445e Fix doc for ParseConfig 2020-06-11 20:51:40 -05:00
Jack Christensen
43696815c2
Merge pull request #39 from powersjcb/add-travis-config
Duplicate travis config from pgx
2020-06-11 19:45:24 -05:00
Jacob Powers
25d18b98e5 fix regression 2020-06-10 09:26:59 -07:00
Jacob Powers
de77c70f48 enable hstore extension before running tests 2020-06-10 09:05:14 -07:00
leighhopcroft
0b762c6e26 updated to use boolean IsNaN field on Numeric 2020-06-10 16:59:08 +01:00
Jacob Powers
97e4debcc0 disable test cases that require a binary sql snapshot 2020-06-10 08:27:56 -07:00
Jacob Powers
6d62aec6b1 remove irrelevant test from pgx 2020-06-09 18:31:49 -07:00
Jacob Powers
96f49eb89b copy travis configs over from pgx 2020-06-09 18:16:23 -07:00
Jacob Powers
3e586004db add travis config 2020-06-09 18:08:38 -07:00
Jack Christensen
e32805888d
Merge pull request #35 from georgysavva/scan-into-interface-dst
Make it possible to scan destination of *interface{} type.
2020-06-08 13:14:01 -05:00
georgysavva
a6d42976c6 Make it possible to scan destination of *interface{} type. 2020-06-08 13:18:54 +03:00
Jack Christensen
9b79c87d64 Update changelog 2020-06-06 10:59:27 -05:00
Jack Christensen
59a0074b0a Release v1.6.0 2020-06-06 10:52:55 -05:00
Jack Christensen
6cd2127b96 Update pgproto3 dependency 2020-06-06 10:52:07 -05:00
Jack Christensen
36944b232f Fix hstore with empty string values 2020-06-06 10:26:34 -05:00
Jack Christensen
937aec9841 Fix tests with newest pgx 2020-06-06 09:55:14 -05:00
Jack Christensen
f6355165a9 Remove superfluous argument from ScanPlan 2020-06-06 09:10:11 -05:00
Jack Christensen
91a46ce219 Clarify and normalize Value semantics
Previously, Get implicitly allowed returning a reference to an internal
value (e.g. a []byte) but AssignTo was documented as requiring a deep
copy.

This inconsistency meant that either Get was unsafe or the deep copy in
AssignTo was superfluous. In addition, Scan into a []byte skips going
through Bytea and returns a []byte of the unparsed bytes directly. i.e.
a reference not a copy.

Standardize on allowing Get and AssignTo to return internal references
but require a Value never mutate internal values - only replace them.
2020-06-06 08:34:56 -05:00
Jack Christensen
43e4070cb4 Better CompositeType and ArrayType Get implementation 2020-06-05 13:40:03 -05:00
leighhopcroft
f2a2797a88 support NaN in Numeric encode and decode methods 2020-06-02 20:14:51 +01:00
leighhopcroft
b708c8b985 support NaN in Numeric.AssignTo 2020-06-02 19:07:10 +01:00
leighhopcroft
3cbb81631a added NaN support to Numeric.Set 2020-06-02 18:35:58 +01:00
Jack Christensen
fa742c5248
Merge pull request #41 from georgysavva/add-config-copy
Add Config.Copy() method that returns a smart copy of the config.
2020-06-01 13:11:01 -05:00
georgysavva
a6d9265506 Implement deep copy manually, stop using an external deep copy library. Add comment to the Config.Copy() method. 2020-06-01 20:52:08 +03:00
georgysavva
8d541d0004 Add Config.Copy() method that return a smart copy of the config. 2020-06-01 19:20:17 +03:00
Jack Christensen
2647eff567 Fix ValidateConnect with cancelable context
fixes #40
2020-05-25 11:49:37 -05:00
Jack Christensen
8c33aa2443 Remove CPU wasting empty default statement
fixes #39
2020-05-23 11:47:42 -05:00
Jack Christensen
9d847241cb
Merge pull request #29 from pmorelli92/master
Add the possibility of assigning a TID to a string
2020-05-21 09:46:10 -05:00
Pablo Morelli
afff6abc6c
TID AssignTo string 2020-05-20 15:01:21 +02:00
Jack Christensen
7746f223e0
Merge pull request #1 from felixge/fix-auth-md5
fix: AuthenticationMD5Password AuthType
2020-05-18 08:52:34 -05:00
Jack Christensen
2ccb66fe21 Doc fix 2020-05-16 18:48:05 -05:00
Jack Christensen
18d2604119
Merge pull request #38 from lukedirtwalker/fixIPv6
Handle IPv6 in connection URLs
2020-05-13 08:50:38 -05:00
Jack Christensen
238967ec4e Improve accuracy of numeric to float
fixes #27
2020-05-13 08:05:19 -05:00
Jack Christensen
39f9d6079c Merge branch 'georgysavva-improve-connect-timeout' 2020-05-13 07:43:45 -05:00
Jack Christensen
fafefa6063 Merge branch 'improve-connect-timeout' of git://github.com/georgysavva/pgconn into georgysavva-improve-connect-timeout 2020-05-13 07:43:15 -05:00
Jack Christensen
fb54a80edf
Merge pull request #36 from Eun/patch-1
concludeCommand should not throw away fieldDescriptions
2020-05-13 07:38:08 -05:00
Jack Christensen
6a1a9d05bc Add pgxtype package for simpler type registration 2020-05-13 07:34:10 -05:00
Jack Christensen
f8471ebfa8 ArrayType requires element OID 2020-05-13 07:11:10 -05:00
Jack Christensen
ee0e207ee4 CompositeType fields contain name and oid 2020-05-13 07:09:52 -05:00
Jack Christensen
0e2bc3467a Fix ext/gofrs-uuid AssignTo *uuid.UUID 2020-05-12 21:01:06 -05:00
Jack Christensen
b3e1355a46 CompositeType can assign to struct via reflection 2020-05-12 16:58:16 -05:00
Jack Christensen
9a3923b6e0 EncodeRow is superceded by CompositeFields 2020-05-12 15:51:27 -05:00
Jack Christensen
506ea36835 Do not export quoteCompositeFieldIfNeeded 2020-05-12 15:47:44 -05:00
Jack Christensen
eebc6975de Add EncodeText support for CompositeType 2020-05-12 15:45:16 -05:00
Jack Christensen
e45ef46424 Refactor and add CompositeTextBuilder 2020-05-12 15:42:26 -05:00
Jack Christensen
fcb385dccb Add ScanDecoder and ScanValue to composite scanners.
Rename Scan to Next to disambiguate.
2020-05-12 15:04:14 -05:00
Jack Christensen
e51cb1ef09 Add CompositeBinaryBuilder 2020-05-12 14:04:11 -05:00
Jack Christensen
2186634638 Add CompositeFields encoders 2020-05-12 11:55:24 -05:00
Jack Christensen
e92ee69901 Expose EnumType directly instead of behind interface 2020-05-12 10:41:50 -05:00
Jack Christensen
9cdd928cb8 CompositeType implements TypeValue 2020-05-12 10:40:13 -05:00
Jack Christensen
e5992d0aed TypeValue should include Value 2020-05-12 10:28:13 -05:00
Jack Christensen
682201a4fc Rename CloneTypeValue to NewTypeValue 2020-05-12 10:26:51 -05:00
Jack Christensen
bff2829b0f Move ComposteType.Scan functionality into AssignTo
Also remove adapter functions that are no longer used.
2020-05-12 10:19:41 -05:00
Jack Christensen
247043b597 Merge SetFields functionality into Set 2020-05-12 08:35:45 -05:00
Jack Christensen
c41160bcbb Make CompositeType status private 2020-05-12 08:01:10 -05:00
Jack Christensen
4a6bd41a36 Rename Composite to CompositeType.
This harmonizes the naming with EnumType and ArrayType.
2020-05-12 07:58:10 -05:00
Lukas Vogel
08d071c094 Handle IPv6 in connection URLs
Previously IPv6 addresses were wrongly split and lead to a parse error.
This commit fixes the behavior.
2020-05-12 10:39:35 +02:00
Jack Christensen
036101deb5 Allow scanning to nil as no-op 2020-05-11 17:41:20 -05:00
Jack Christensen
36dbbd983d Add CompositeFields type
This adds support for the text format and removes the need for the
ScanRowValue function.
2020-05-11 17:21:21 -05:00
Jack Christensen
1b3d694469 Add ArrayType 2020-05-10 19:37:25 -05:00
Jack Christensen
6cef4638ad Update pgx dependency for tests 2020-05-10 14:11:24 -05:00
Jack Christensen
8cd94a14c7 Allow types to specify preference format result and param formats
This will be useful for array and composite types that may have to
support elements that may not support binary encoding.

It also is slightly more convenient for text-ish types to have a default
format of text.
2020-05-10 14:05:16 -05:00
Jack Christensen
cc4d1eafe0 Doc tweaks and renames 2020-05-10 12:45:12 -05:00
Jack Christensen
a71c179ce3 Extract nullAssignmentError 2020-05-10 12:28:47 -05:00
Jack Christensen
52729c1b77 Back off some aggressive PlanScan optimizations
PlanScan used to require the exact same value be used every time. While
this was great for performance, on further consideration I think it is
too much of a potential foot-gun.

This moves back in the other direction. A plan tolerates a change in
destination. It even detects a change in destination type and falls
back to a new plan.

Perfectly matched hot scan paths (e.g. PG int4 to Go int32) are still
much faster than they were before this set of optimizations. The first
scan of a destination that uses a decoder is faster due to not
allocating. It's a little bit slower on subsequent runs than before
this set of optimizations. But it is preferable to optimize for the
most common scan targets (e.g. *int32, *int64, *string) over generic
decoder destinations.

In addition this fees pgx.connRows.Scan from having to check that
the destination is unchanged.
2020-05-10 09:37:15 -05:00
Jack Christensen
7e66ab1e14 Add scan plan system
This can improve performance now and will be useful for the future
transcoder system.
2020-05-09 23:52:48 -05:00
Jack Christensen
c4e6445cc7 Explicitly test supported Go and PostgreSQL versions 2020-05-09 10:19:39 -05:00
Jack Christensen
97bbe6ae20 Add RegisterDefaultPgType
This allows registering a mapping of a Go type to a PostgreSQL type
name. If the OID of a value to be encoded or decoded is unknown, this
additional mapping will be used to determine a suitable data type.
2020-05-08 16:13:15 -05:00
Jack Christensen
4a50a63f12 Refactor Scan optimization
Instead of hardcoding specific types and skipping type assertions based
on that, only check if a destination is a (sql.Scanner) after a failed
AssignTo.

This is slightly slower in the non-decoder case and *very* slightly
faster in the decoder. However, this approach is cleaner and has the
potential for further optimizations.
2020-05-07 19:48:48 -05:00
Jack Christensen
452511dfc5 Rename RecordFieldIter to CompositeBinaryScanner and adjust interface
Use interface similar to bufio.Scanner and pgx.Rows.
2020-05-07 13:38:19 -05:00
Jack Christensen
ff9bc5d68d Merge binary package into pgtype package 2020-05-07 10:15:23 -05:00
Jack Christensen
37e976192b ScanRowValue accepts interface{} dst 2020-05-06 14:56:25 -05:00
Jack Christensen
10838b39f6 Remove vscode settings 2020-05-06 14:45:55 -05:00
Jack Christensen
cce17427e1 Merge branch 'record-expect' of git://github.com/redbaron/pgtype into redbaron-record-expect 2020-05-06 14:43:10 -05:00
Jack Christensen
2938981516 Make EnumType implementation private 2020-05-06 10:30:43 -05:00
Jack Christensen
4d2b5a18c4 Clarify Value.Get() documentation
Specifying behavior for Status Null and Undefined is incorrect because
a Value is not required to have a Status. In addition, standard
behavior is to return nil, not pgtype.Null when the Status is
pgtype.Null.
2020-05-06 09:51:41 -05:00
Jack Christensen
3b7c47a2a7 Add EnumType 2020-05-05 13:23:14 -05:00
Jack Christensen
ab5e597826 Avoid type assertion in Scan
Before:
BenchmarkConnInfoScanInt4IntoBinaryDecoder-16           79859755                14.6 ns/op             0 B/op          0 allocs/op
BenchmarkConnInfoScanInt4IntoGoInt32-16                 38969991                30.0 ns/op             0 B/op          0 allocs/op

After:
BenchmarkConnInfoScanInt4IntoBinaryDecoder-16    	458046958	        13.3 ns/op	       0 B/op	       0 allocs/op
BenchmarkConnInfoScanInt4IntoGoInt32-16          	275791776	        20.6 ns/op	       0 B/op	       0 allocs/op
2020-05-02 20:30:58 -05:00
Jack Christensen
18c64dceee ConnInfo Scan optimizes common native types
This comes at a small expense to scanning into a type that implements
TextDecoder or BinaryDecoder but I think it is a good trade.

Before:
BenchmarkConnInfoScanInt4IntoBinaryDecoder-16    	88181061	        12.4 ns/op	       0 B/op	       0 allocs/op
BenchmarkConnInfoScanInt4IntoGoInt32-16          	30402768	        36.8 ns/op	       0 B/op	       0 allocs/op

After:
BenchmarkConnInfoScanInt4IntoBinaryDecoder-16    	79859755	        14.6 ns/op	       0 B/op	       0 allocs/op
BenchmarkConnInfoScanInt4IntoGoInt32-16          	38969991	        30.0 ns/op	       0 B/op	       0 allocs/op
2020-05-02 20:18:51 -05:00
Jack Christensen
6357d3b3f3 Avoid extra type assertion on native type Scan path
Before:
BenchmarkConnInfoScanInt4IntoBinaryDecoder-16    	89744814	        12.5 ns/op	       0 B/op	       0 allocs/op
BenchmarkConnInfoScanInt4IntoGoInt32-16          	27688370	        41.1 ns/op	       0 B/op	       0 allocs/op

After:
BenchmarkConnInfoScanInt4IntoBinaryDecoder-16    	88181061	        12.4 ns/op	       0 B/op	       0 allocs/op
BenchmarkConnInfoScanInt4IntoGoInt32-16          	30402768	        36.8 ns/op	       0 B/op	       0 allocs/op
2020-05-02 17:31:53 -05:00
Jack Christensen
a4dd4af756 Add benchmarks for scan into native type vs decoder 2020-05-02 17:31:34 -05:00
Jack Christensen
e6c6de9494 Improved ext/shopspring-numeric binary decoding performance
Before:
BenchmarkDecode/Zero-Binary-16       	 3944304	       292 ns/op	     128 B/op	       7 allocs/op
BenchmarkDecode/Small-Binary-16      	 2034132	       585 ns/op	     184 B/op	      13 allocs/op
BenchmarkDecode/Medium-Binary-16     	 1747191	       690 ns/op	     192 B/op	      12 allocs/op
BenchmarkDecode/Large-Binary-16      	 1334006	       899 ns/op	     304 B/op	      14 allocs/op
BenchmarkDecode/Huge-Binary-16       	  702382	      1590 ns/op	     584 B/op	      18 allocs/op

After:
BenchmarkDecode/Zero-Binary-16       	14592645	        80.1 ns/op	      64 B/op	       2 allocs/op
BenchmarkDecode/Small-Binary-16      	 5729318	       212 ns/op	     104 B/op	       7 allocs/op
BenchmarkDecode/Medium-Binary-16     	 4930009	       241 ns/op	      88 B/op	       5 allocs/op
BenchmarkDecode/Large-Binary-16      	 3369573	       344 ns/op	     144 B/op	       7 allocs/op
BenchmarkDecode/Huge-Binary-16       	 2587156	       453 ns/op	     216 B/op	       9 allocs/op
2020-05-02 11:34:14 -05:00
georgysavva
01a7510ae9 Reformat imports 2020-05-02 16:43:02 +03:00
georgysavva
2d5a17beab Add comment. 2020-05-02 16:39:51 +03:00
georgysavva
391e1ef2ce Parse connect timeout setting into Config. Restrict context timeout via Config.ConnectTimeout on .Connect() call. 2020-05-02 16:35:22 +03:00
Maxim Ivanov
63c5d350a3 Add JSON benchmarks 2020-05-02 10:54:19 +01:00
Maxim Ivanov
700df0d05a Request binary format in Composite tests 2020-05-01 23:37:05 +01:00
Tobias Salzmann
8d9293e1e7
Update pgconn.go 2020-04-30 11:27:01 +02:00
Tobias Salzmann
8f3f335b0f
concludeCommand should not throw away fieldDescriptions 2020-04-30 11:22:43 +02:00
Maxim Ivanov
5f0d5f4255 Remove pgtype.Row(), introduce Composite.Scan()
pgtype.Row() was optimized for a single line use
without much ceremony at a cost of OID registration,
which is cumbersome. In practice it so much incovnenience
to create new Composite just before making a query.

So now there is just a Composite type and 2 helper methods:

- SetFields sets composite fields to values passed. This assignment
  fails if types passed are not assignable to Values pgtype is
  made of.

- Scan acts exactly like query.Scan, but for a composite value. Passed
  values are set to values from SQL composite.
2020-04-27 00:48:02 +01:00
Maxim Ivanov
e283f322e1 Composite().Row() helper for working with composites without registration 2020-04-20 22:38:20 +00:00
Maxim Ivanov
04ff904ff5 Add binary decoding benchmarks
```
BenchmarkBinaryDecodingManual-4         10479085               106 ns/op              40 B/op          2 allocs/op
BenchmarkBinaryDecodingHelpers-4         4485451               263 ns/op              64 B/op          4 allocs/op
BenchmarkBinaryDecodingRow-4             1999726               587 ns/op              96 B/op          5 allocs/op
```
2020-04-19 15:46:14 +00:00
Maxim Ivanov
72680d61f8 Move value createion outside of encoding benchmark 2020-04-19 11:30:21 +00:00
Maxim Ivanov
53e0f25a4e Make ScanRowValue error message clearer 2020-04-18 19:29:18 +00:00
Maxim Ivanov
b88a3e0765 Tighten ScanRowValue input types
ScanRowValue  needs not  Value, but BinaryEncoder
2020-04-18 14:08:31 +01:00
Maxim Ivanov
54a03cb143 Add benchmark for various composite encoder implementations
```
BenchmarkBinaryEncodingManual-12   824053234   28.9 ns/op    0 B/op     0 allocs/op
BenchmarkBinaryEncodingHelper-12   76815436    314 ns/op     192 B/op   5 allocs/op
BenchmarkBinaryEncodingRow-12      65302958    364 ns/op     192 B/op   5 allocs/op
```
2020-04-16 22:24:43 +01:00
Maxim Ivanov
2e13f2fe76 Move lowlevel binary routines into own package 2020-04-16 21:34:06 +01:00
Maxim Ivanov
a6747b513f Split composite examples 2020-04-13 17:44:02 +01:00
Maxim Ivanov
3ce29f9e05 Add Composite type for inplace row() values handling
Composite() function returns a private type, which should
be registered with ConnInfo.RegisterDataType for the composite
type's OID.

All subsequent interaction with Composite types is to be done
via Row(...) function. Function return value can be either
passed as a query argument to build SQL composite value out of
individual fields or passed to Scan to read SQL composite value
back.

When passed to Scan, Row() should have first argument of type
*bool to flag NULL values returned from query.
2020-04-13 17:41:44 +01:00
Maxim Ivanov
8ae83b19f7 Add EncodeRow helpers
Also extend example to show how EncodeRow can be used
to create binary encoders for composite type
2020-04-13 00:09:03 +01:00
Maxim Ivanov
368295d3ee Create ROW helper for adhoc decoding of records 2020-04-12 18:40:52 +01:00
Maxim Ivanov
71ed747f3a Add example of CompositeType handling with ScanRowValue helper 2020-04-12 17:36:39 +01:00
Maxim Ivanov
ff95f82f70 Add ScanRowValue helper function
ScanRowValue is useful when reading ROW() values with
known field types as well as composite types. It accepts
pgtype.Value arguments, where ROW() fields are written to
on successfull scan.
2020-04-12 12:26:12 +01:00
Maxim Ivanov
9a869c8359 Refactor record field binary decoder preparation 2020-04-11 11:08:53 +01:00
Maxim Ivanov
087df120bb Refactor lowlevel record field iteration 2020-04-11 10:38:23 +01:00
Jack Christensen
15856c001a
Merge pull request #33 from pjediny/sslmode-require-with-rootca
Fix behavior of sslmode=require with sslrootcert present
2020-04-07 20:00:31 -05:00
Jack Christensen
98c9ec4f7b
Merge pull request #23 from lbcjbb/clean-go-mod-file
Clean go.sum file to remove old version of pgx v3
2020-04-07 19:49:44 -05:00
Jack Christensen
5d2be99c25 Fix panic when closing conn during cancellable query
fixes #29
2020-04-07 19:38:21 -05:00
Petr Jediný
84aee0ab44 Fix behavior of sslmode=require with sslrootcert present
According to PostgreSQL documentation the behavior should be
the same as that of verify-ca sslmode

https://www.postgresql.org/docs/12/libpq-ssl.html
2020-04-08 00:08:53 +02:00
Jean-Baptiste Bronisz
1fcc71410c
Clean go.sum file to remove old version of pgx v3 2020-04-06 19:45:25 +02:00
Jack Christensen
9016875cae Add JSON support to ext/gofrs-uuid 2020-04-02 14:01:16 -05:00
Jack Christensen
ef5f8b54af Update dependencies 2020-03-30 11:30:37 -05:00
Jack Christensen
b26cd22378 Update changelog for v1.3.0 2020-03-30 11:18:27 -05:00
Jack Christensen
e4f3224f4c Update changelog for v1.5.0 2020-03-30 11:15:08 -05:00
Jack Christensen
11d9f4e54f Update golang.org/x/crypto for security fix 2020-03-30 11:09:29 -05:00
Jack Christensen
d3d80cd2de Merge branch 'rwelin-rw_format' 2020-03-27 16:10:54 -05:00
Jack Christensen
65bb544ba9 Merge branch 'rw_format' of git://github.com/rwelin/pgtype into rwelin-rw_format 2020-03-27 16:10:37 -05:00
Jack Christensen
523cdad66f Truncate nanoseconds in EncodeText for Timestamptz and Timestamp
PostgreSQL has microsecond precision. If more than this precision is
supplied in the text format it is rounded. This was inconsistent with
the binary format.

See https://github.com/jackc/pgx/issues/699 for original issue.
2020-03-27 15:59:54 -05:00
Robert Welin
43bf713180 Use correct format verb for unknown type error 2020-03-27 13:20:33 +00:00
Jack Christensen
87c8ddd0d1
Merge pull request #32 from gcurtis/verify-ca
Implement "verify-ca" SSL mode
2020-03-21 11:13:55 -05:00
Greg Curtis
4ed48d05d2 Implement "verify-ca" SSL mode
ParseConfig currently treats the libpq "verify-ca" SSL mode as
"verify-full". This is okay from a security standpoint because
"verify-full" performs certificate verification and hostname
verification, whereas "verify-ca" only performs certificate
verification.

The downside to this approach is that checking the hostname is
unnecessary when the server's certificate has been signed by a private
CA. It can also cause the SSL handshake to fail when connecting to an
instance by IP. For example, a Google Cloud SQL instance typically
doesn't have a hostname and uses its own private CA to sign its
server and client certs.

This change uses the tls.Config.VerifyPeerCertificate function to
perform certificate verification without checking the hostname when the
"verify-ca" SSL mode is set. This brings pgconn's behavior closer to
that of libpq.

See https://github.com/golang/go/issues/21971#issuecomment-332693931
and https://pkg.go.dev/crypto/tls?tab=doc#example-Config-VerifyPeerCertificate
for more details on how this is implemented.
2020-03-17 23:36:06 -07:00
Jack Christensen
9e700ff067 Date.Set parses string 2020-03-09 10:40:40 -05:00
Jack Christensen
ccf634cf2e Release 1.4.0 2020-03-07 13:21:51 -06:00
Jack Christensen
cfbd2519e3 Add PGSERVICE and PGSERVICEFILE support 2020-03-07 13:17:39 -06:00
Jack Christensen
911e727d78 ExecParams and ExecPrepared handle empty query
An empty query does not return CommandComplete. Instead it returns
EmptyQueryResponse.
2020-03-07 10:55:29 -06:00
Jack Christensen
8117205a75 Range types Set method supports its own type, string, and nil
Previously Set would always return an error when called on a range type.
Now it will accept an instance of itself, a pointer to an instance of
itself, a string, or nil. Strings are parsed with the same logic as
DecodeText.
2020-03-03 15:25:57 -06:00
Jack Christensen
55a56add23 Set will call Get on src if possible 2020-02-19 11:58:49 -06:00
Jack Christensen
666bd514e2 Add standard nil test to gofrs-uuid.UUID.Set 2020-02-19 10:50:58 -06:00
Jack Christensen
f3816bd1c0 Get implemented on T instead of *T
Methods defined on T are also available on *T. Thought this technically
changes the interface, because *T will be automatically dereferenced as
needed it shouldn't be a breaking change.

See a8802b16cc593842f5c69b0f7cfb0de11d5cd3a8 for similar change.
2020-02-19 10:48:09 -06:00
Jack Christensen
6db848c6fc Update chunkreader to v2.0.1 2020-02-14 17:56:59 -06:00
Jack Christensen
3c4a99247c
Merge pull request #1 from furdarius/increase-buffer
Increase buffer size to 8KB
2020-02-14 17:52:59 -06:00
Jack Christensen
ac364e7a43 Use writeError for Write error 2020-02-07 15:40:50 -06:00
Jack Christensen
06c4e181b1 go mod tidy 2020-02-05 11:49:40 -06:00
Jack Christensen
282b7936a2 Release 1.2.0 2020-02-05 11:10:17 -06:00
Jack Christensen
406afa0eb7 Release v1.3.1 2020-02-05 11:06:09 -06:00
Jack Christensen
0ab69ce885 Merge branch 'freb-json_marshaling' 2020-01-29 09:26:45 -06:00
Jeffrey Stiles
5f363cb1f0 Add JSON marshalling for Bool, Date, JSON/B, Timestamptz 2020-01-27 16:19:43 -08:00
Jack Christensen
c9abb86f21 Ensure write failure in CopyFrom closes connection 2020-01-25 20:40:21 -06:00
Jack Christensen
67f2418279 Make copyErrChan buffered so goroutine can always terminate
It is possible the goroutine that is reading from copyErrChan will not
read in case of error.
2020-01-25 20:39:18 -06:00
Jack Christensen
139342081e Fix CopyFrom deadlock when multiple NoticeResponse received during copy
fixes #21
2020-01-25 20:32:42 -06:00
Jack Christensen
53a5c14d50
Merge pull request #17 from freb/null_unmarshaljson
Support Null Status in UnmarshalJSON
2020-01-25 14:13:00 -06:00
Jeffrey Stiles
06942241c4 Support Null Status in UnmarshalJSON 2020-01-24 16:38:15 -08:00
Jack Christensen
cf87e34792 Add JSON to shopspring-numeric extension 2020-01-24 17:07:41 -06:00
Jack Christensen
b01b35f466 Fix typo in docs 2020-01-24 14:58:59 -06:00
Jack Christensen
0bbaad1348 Add zeronull package for easier NULL <-> zero conversion 2020-01-24 11:23:28 -06:00
Jack Christensen
6124b07bb1 Update changelog 2020-01-23 20:57:13 -06:00
Jack Christensen
f909a64ff5 Update pgproto3 to v2.0.1 2020-01-23 20:55:52 -06:00
Jack Christensen
a4375eb53f Add test that Hijack'ed conn is no longer usable. 2020-01-17 17:42:20 -06:00
Jack Christensen
5952524511 Add Hijack and Construct
fixes #9
2020-01-17 17:38:56 -06:00
Jack Christensen
8be01d690f Make Host comment more precise 2020-01-17 17:38:07 -06:00
Jack Christensen
595780be0f Map io.EOF errors to io.ErrUnexpectedEOF
io.EOF is never expected during valid usage. In addition, database/sql
uses io.EOF as a sentinal value that all rows from a query have been
received.

See https://github.com/jackc/pgx/issues/662.
2020-01-17 16:55:05 -06:00
Jack Christensen
e7dd01e064 Update changelog 2020-01-13 08:48:32 -06:00
Jack Christensen
2582879459 Fix typo - rename ayncClose to asyncClose 2020-01-12 16:28:56 -06:00
Jack Christensen
0df97353b8 Fix racy usage of pgConn.contextWatcher in ayncClose 2020-01-12 16:27:46 -06:00
Jack Christensen
186f4b3539 Update changelog 2020-01-11 19:15:23 -06:00
Jack Christensen
a48e9bf63c Update changelog 2020-01-11 19:07:39 -06:00
Jack Christensen
fd2093cef8 Add statement type convenience methods to CommandTag and optimize
Added convenient way to check whether a statement was a select, insert,
update, or delete. These methods do not allocate.

RowsAffected now does not allocate even when a large number of rows are
affected. It also is multiple times faster, though the absolute change
is inconsequential.
2020-01-11 18:42:31 -06:00
Jack Christensen
b6669ae6dd Add PgError.SQLState method
fixes #15
2020-01-11 18:23:41 -06:00
Jack Christensen
70bb7ab6cb Merge branch 'bakape-master' 2020-01-11 18:10:33 -06:00
Jack Christensen
ed1391568c Merge branch 'master' of git://github.com/bakape/pgconn into bakape-master 2020-01-11 18:08:34 -06:00
bakape
9decdbc2ec Revert nil context support 2020-01-11 16:53:50 +02:00
Jack Christensen
98b3c57584 Try to cancel any in-progress query when a conn is closed by ctx cancel
See https://github.com/jackc/pgx/issues/659
2020-01-08 10:03:54 -06:00
bakape
9372218107 Don't synchronize with context.Background() 2020-01-01 19:34:56 +02:00
bakape
4d345164f1 Branch tests for nil context 2020-01-01 14:36:38 +02:00
bakape
7196234521 Benchmark nil context execution 2020-01-01 14:01:30 +02:00
bakape
89416dd805 Enable passing nil context 2020-01-01 13:09:50 +02:00
Jack Christensen
3e503b7b1a Add PostgreSQL 11 and 12 to the Travis build matrix 2019-12-21 14:41:09 -06:00
Jack Christensen
5fc867a833 Remove unused travis environment variable 2019-12-21 14:40:30 -06:00
Jack Christensen
18d1ed5ee5 Remove PostgreSQL 9.3 from Travis build matrix
PostgreSQL 9.3 is EOL so it doesn't make sense for pgconn to
specifically support. There are no known incompatibilities but it will
not longer be tested.
2019-12-21 14:37:09 -06:00
Jack Christensen
dd53b7488d Restart signalMessage when receiving non-error message in CopyFrom
fixes #21
2019-12-21 11:52:45 -06:00
Jack Christensen
bd0ce203e9 CopyFrom not table test was failing with syntax error 2019-12-21 10:31:27 -06:00
Jack Christensen
c7502af68b Add PostgreSQL time type support
fixes #15
2019-12-19 21:35:35 -06:00
Yuli Khodorkovskiy
1c20e7d36e Fix malformed SASL messages
Per the PG documentation [0], an AuthenticationSASLContinue message has:

    AuthenticationSASLContinue (B)
	Byte1('R')
	    Identifies the message as an authentication request.
	Int32
	    Length of message contents in bytes, including self.
	Int32(11)
	    Specifies that this message contains a SASL challenge.
	Byten
	    SASL data, specific to the SASL mechanism being used.

The current implementation was mistakenly adding the lengh of msg bytes
in between the Int32(11) and Byten. There was a similar issue for
AuthenticationSASLFinal.

[0] https://www.postgresql.org/docs/current/protocol-message-formats.html
2019-12-17 20:28:01 -05:00
Yuli Khodorkovskiy
e6b823d649 Add missing GSSEncRequest 2019-12-17 20:21:57 -05:00
Jack Christensen
038f263a44 Add remaining int array conversions 2019-11-27 20:23:43 -06:00
Jack Christensen
52cb969ea1 Merge branch 'JohnnyQQQQ-master' 2019-11-27 20:17:12 -06:00
Jean-Philippe Quéméner
9ff83bc41c feat: add tests for less stricter numeric conversion 2019-11-26 17:31:13 +01:00
Jean-Philippe Quéméner
01ae643a48 feat: make conversion between numeric values and arrays less strict
closes https://github.com/jackc/pgx/issues/642
2019-11-26 17:11:54 +01:00
Jack Christensen
32350bd1dc TestConnectCustomLookup must test with TCP connection
Test (correctly) fails if run on a Unix domain socket.
2019-11-18 07:29:57 -06:00
Jack Christensen
eb81d2926b Ignore errors sending Terminate message while closing connection
This mimics the behavior of libpq PGfinish.

refs #637
2019-11-18 07:29:57 -06:00
Jack Christensen
7e1301257e Release 1.0.3 2019-11-16 11:10:32 -06:00
Jack Christensen
be36a7e14b Fix test and avoid change to array signatures
typed_array.go.erb was not updated back in
a8802b16cc593842f5c69b0f7cfb0de11d5cd3a8 when Value, EncodeBinary,
EncodeText, and MarshalJSON were changed to be defined on T instead of
*T. This has been corrected.
2019-11-14 20:40:41 -06:00
Alex Gaynor
0079108e29 Fixes #11 -- support initializing Array types from a slice of the value 2019-11-08 14:59:19 -05:00
Jack Christensen
f711de3591 Release 1.0.2 2019-10-22 20:45:14 -05:00
Jack Christensen
3bc1f8ac57
Merge pull request #10 from jaltavilla/pointer-to-custom-type
Scan into nullable custom types (pointers to pointers).
2019-10-22 20:42:19 -05:00
jaltavilla
af517d68fc Scan into nullable custom types (pointers to pointers). 2019-10-21 17:21:42 -04:00
Jack Christensen
f395b32fa6 Added failing test for pointer to custom type 2019-10-19 11:43:24 -05:00
Jack Christensen
9449f4b081
Merge pull request #17 from skipcloud/sgibson/fix-comment
config: fix ValidateConnect comment
2019-10-16 09:21:38 -05:00
Skip Gibson
81b6ad72f6 config: fix ValidateConnect comment 2019-10-16 10:01:16 +01:00
Jack Christensen
4df62cf3d0 Release v1.1.0 2019-10-12 11:23:48 -05:00
Jack Christensen
fcfd7d09a9 Add PgConn.IsBusy() method 2019-10-12 10:21:46 -05:00
Jack Christensen
0077ff0474
Merge pull request #16 from F21/patch-1
Fix minor errors and reword some sentences for readability
2019-10-07 13:37:34 -05:00
Francis Chuang
6c195c17b2
Fix minor errors and reword some sentences for readability 2019-10-03 09:49:12 +10:00
Jack Christensen
fa5c331c78 Add text format support to bit
fixes #7
2019-09-26 21:12:42 -05:00
Jack Christensen
90d22fb483 Add basic README.md 2019-09-26 21:08:20 -05:00
Jack Christensen
51e58f842b
Merge pull request #5 from quillchat/master
Add tstzrange data type
2019-09-26 21:00:43 -05:00
Jack Christensen
d3b475212c
Merge pull request #4 from alex/patch-1
Added a license -- fixes #3
2019-09-25 09:21:56 -05:00
Alex Gaynor
eb20ab8219 Added a license -- fixes #3 2019-09-20 10:14:48 -04:00
Jack Christensen
3f377acc1e
Merge pull request #14 from furdarius/resolve-hostnames-into-addrs
Validate all addresses resolved from hostname
2019-09-20 08:51:22 -05:00
Jack Christensen
9dc453458c Release v1.0.1 2019-09-19 21:57:09 -05:00
Jack Christensen
52ae698572 Fix daterange oid 2019-09-19 21:43:18 -05:00
Jack Christensen
d6b0287fcd Release v1.0.1 2019-09-19 21:41:20 -05:00
Jack Christensen
f5eead90fc Fix statement cache reuse bug 2019-09-19 21:04:14 -05:00
Andrew Huang
f517670ba5 Add tstzrange data type 2019-09-18 15:01:37 -07:00
Jack Christensen
bbc7f67a6f Update to pgproto3 v2.0.0 2019-09-14 20:22:50 -05:00
Jack Christensen
cf8fe4a477 uuid extension switched to gofrs from satori
Do not encourage library use that has serious outstanding bug:
https://github.com/satori/go.uuid/issues/73
2019-09-14 19:58:54 -05:00
Jack Christensen
99f22ac8e4 Port DSN parser from pgx v3
Original implementation: 2d9d8dc52ac211c6191c08e050c03588aa633038 by
Joshua Barone <joshua.barone@gmail.com>.

Also changed DSN tests to use "dbname" as key rather than "database" as
that is what the PostgreSQL documentation specifies. "database" still
actually works but it should not be encouraged as it is non-standard.
2019-09-14 18:37:33 -05:00
Artemiy Ryabinkov
17d3d592e9
add test for custom lookup func
Signed-off-by: Artemiy Ryabinkov <getlag@ya.ru>
2019-09-14 19:11:26 +03:00
Artemiy Ryabinkov
e538885fa7
skip resolve for unix sockets
Signed-off-by: Artemiy Ryabinkov <getlag@ya.ru>
2019-09-13 17:52:01 +03:00
Artemiy Ryabinkov
b2ca5d8f52
validate all addresses resolved from hostname
Signed-off-by: Artemiy Ryabinkov <getlag@ya.ru>
2019-09-13 17:26:09 +03:00
Artemiy Ryabinkov
51cf0d5480
Merge pull request #2 from jackc/master
Sync
2019-09-13 17:20:09 +03:00
Jack Christensen
f8be2b60ce go.sum changes 2019-09-10 17:25:25 -05:00
Jack Christensen
a8362ef96d Parse postgresql:// protocol 2019-09-10 17:14:04 -05:00
Felix Geisendörfer
a90ef7ed5b fix: AuthenticationMD5Password AuthType 2019-09-08 17:29:06 +02:00
Jack Christensen
eca1e51822 Add more pgfortune output 2019-09-07 11:41:31 -05:00
Jack Christensen
80f2cbce25 Add pgfortune example 2019-09-07 11:37:43 -05:00
Jack Christensen
2f6b8f3f56 Fix context timeout on connect 2019-08-31 17:01:54 -05:00
Jack Christensen
4c03ce451f Add MarshalJSON for FieldDescription 2019-08-31 16:00:41 -05:00
Jack Christensen
2fabfa3c18 Update to newest pgproto3 2019-08-31 15:44:54 -05:00
Jack Christensen
0d1ceed7a6 Refactor authentication message handling 2019-08-31 15:43:07 -05:00
Jack Christensen
439ea11d47 NewFrontend and NewBackend cannot fail 2019-08-31 14:49:55 -05:00
Jack Christensen
6bba3c4810 Update pgproto3 2019-08-31 11:55:02 -05:00
Jack Christensen
2bc8f2e6af Remove pkg/errors package 2019-08-31 11:53:26 -05:00
Jack Christensen
1ba5dcbe01 Support SSLRequest and CancelRequest 2019-08-31 11:48:01 -05:00
Jack Christensen
76538434cf MarshalJSON should be defined on T not *T
Otherwise "%v" format would be used by json.Marshal(T).
2019-08-27 21:13:45 -05:00
Jack Christensen
a8802b16cc Value, EncodeBinary, EncodeText, and MarshalJSON on T instead of *T
Methods defined on T are also available on *T. This change makes Value
consistent with database/sql Value implementations. It also makes Value,
EncodeBinary, and EncodeText more convenient to use because you can
pass T or *T as an argument to a query.

The MarshalJSON change is even more significant because without it
json.Marshal would generate the "%v" format instead of the implemented
MarshalJSON.

Thought this technically changes the interface, because *T will be
automatically dereferenced as needed it shouldn't be a breaking change.

See: https://github.com/jackc/pgx/issues/538 for initial discussion.
2019-08-27 20:46:16 -05:00
Jack Christensen
66aaed7c9e Remove public fields from PgConn
- Access TxStatus via method
- Make Config private

fixes #7
2019-08-27 18:11:50 -05:00
Jack Christensen
138254da5b Refactor errors
- Use strongly typed errors internally
- SafeToRetry(error) streamlines retry logic over ErrNoBytesSent
- Timeout(error) removes the need to choose between returning a context
  and an i/o error
2019-08-27 18:05:50 -05:00
Jack Christensen
b1e25e4ea4 Add format code helpers to ConnInfo 2019-08-25 00:32:11 -05:00
Jack Christensen
e6cf51b304 Expose min_read_buffer_size config param 2019-08-25 00:22:32 -05:00
Jack Christensen
595d09d6f1 Build fully operational Frontend 2019-08-24 23:57:24 -05:00
Jack Christensen
6feea0c1c5 Replace IsAlive with IsClosed
IsAlive is ambiguous because the connection may be dead and we do not
know it. It implies the possibility of a ping. IsClosed is clearer -- it
does not promise the connection is alive only that it hasn't been
closed.

fixes #2
2019-08-24 23:43:26 -05:00
Jack Christensen
da9fc85c44 Rename PreparedStatementDescription to StatementDescription
PreparedStatementDescription was too long. It also no longer entirely
represents its purpose now that it is also intended for use with
described statements.
2019-08-24 20:39:03 -05:00
Jack Christensen
78abbdf1d7 Rename LRUCache to LRU 2019-08-24 19:48:43 -05:00
Jack Christensen
bcd6b9244a Rename pscache to stmtcache 2019-08-24 19:46:14 -05:00
Jack Christensen
beba629bb5 Fix result reader returned by locked conn 2019-08-24 17:18:29 -05:00
Jack Christensen
2209d2e36a Rename mode constants 2019-08-24 16:27:54 -05:00
Jack Christensen
797a44bf04 Rename BuildFrontendFunc to BuildFrontend
For consistency with other functions supplied in Config.
2019-08-24 16:18:04 -05:00
Jack Christensen
e6bd739067 Add pscache package 2019-08-24 16:02:27 -05:00
Jack Christensen
e540a05760 Fix typo in docs 2019-08-24 14:16:38 -05:00
Jack Christensen
7d83f9ba53 Update pgx for tests
Finish previous go mod dependency bounce.
2019-08-24 13:59:25 -05:00
Jack Christensen
ab885b375b OID type should only be used for scanning and encoding values
It was a mistake to use it in other contexts. This made interop
difficult between pacakges that depended on pgtype such as pgx and
packages that did not like pgconn and pgproto3. In particular this was
awkward for prepared statements.

Because pgx depends on pgtype and the tests for pgtype depend on pgx
this change will require a couple back and forth commits to get the
go.mod dependecies correct.
2019-08-24 13:49:12 -05:00
Jack Christensen
760dd75542 Require Config to be created by ParseConfig 2019-08-24 09:28:44 -05:00
Jack Christensen
1558987979 ReceiveMessage returns context error instead of io error on cancel 2019-08-22 20:11:27 -05:00
Jack Christensen
4cf1c44817 Fix unknown OID scanning into string and []byte 2019-08-22 18:20:36 -05:00
Jack Christensen
11255efe7a Make ErrorResponseToPgError public 2019-08-20 15:49:57 -05:00
Jack Christensen
d364370a31 Add SendBytes and ReceiveMessage 2019-08-20 14:12:07 -05:00
Jack Christensen
9010c554ed Port 251e6b7730c7b31b600e6fe06162e541f3032604 from pgx v3
commit 251e6b7730c7b31b600e6fe06162e541f3032604
Author: Nicholas Wilson <nicholas.wilson@realvnc.com>
Date:   Wed Jul 24 12:32:43 2019 +0100

    Tidying: make underlyingTimeType consistent with other underlyingFooType

    The first return value is ignored when returning false - so there's no
    point returning an empty time.Time when it can be nil.
2019-08-17 13:33:34 -05:00
Jack Christensen
bcc139a365 Port fc020c24ac9590f6547f8ad1d291fc75b4873a84 from pgx v3
commit fc020c24ac9590f6547f8ad1d291fc75b4873a84
Author: Nicholas Wilson <nicholas.wilson@realvnc.com>
Date:   Wed Jul 24 12:32:18 2019 +0100

    Add support for pgtype.UUID to write into any [16]byte type
2019-08-17 13:30:41 -05:00
Jack Christensen
0a2ed72cf7
Merge pull request #10 from furdarius/configurable-chunkreader-buf
Configurable chunkreader buffer size
2019-08-08 15:49:17 -05:00
Artemiy Ryabinkov
e204afcc8c
Add explanation for default buffer size
Signed-off-by: Artemiy Ryabinkov <getlag@ya.ru>
2019-08-08 13:43:26 +03:00
Artemiy Ryabinkov
f76af93c21
Increase buffer size to 8KB
Signed-off-by: Artemiy Ryabinkov <getlag@ya.ru>
2019-08-08 13:41:51 +03:00
Artemiy Ryabinkov
c9660e30c8
Use go mod download to install deps on travis-ci. Add cache for travis-ci.
Signed-off-by: Artemiy Ryabinkov <getlag@ya.ru>
2019-08-08 13:12:27 +03:00
Artemiy Ryabinkov
dbb7aa8fd5
Add GOPROXY to travis builds to mitigate problems with github and etc
Signed-off-by: Artemiy Ryabinkov <getlag@ya.ru>
2019-08-08 12:52:04 +03:00
Artemiy Ryabinkov
0a99b543c0
Add BuildFrontendFunc in Config
Signed-off-by: Artemiy Ryabinkov <getlag@ya.ru>
2019-08-08 11:46:25 +03:00
Jack Christensen
f0b479097a Fix missing deferred constraint violations in certain conditions
See https://github.com/jackc/pgx/issues/570.
2019-08-06 17:07:11 -05:00
Artemiy Ryabinkov
fa7e06489b
Add MinReadBufferSize option to Config
Signed-off-by: Artemiy Ryabinkov <getlag@ya.ru>
2019-07-26 11:14:07 +03:00
Artemiy Ryabinkov
b599a26399
Merge pull request #1 from jackc/master
Sync
2019-07-26 10:58:16 +03:00
Jack Christensen
3dec184811 Split ValidateConnect from AfterConnect
This avoids the foot-gun of ParseConfig setting AfterConnect because of
target_session_attrs and the user inadvertently overriding it with an
AfterConnect designed to setup the connection.

Now target_session_attrs will be handled with ValidateConnect.
2019-07-13 10:22:09 -05:00
Jack Christensen
59941377c8 Rename Config.AfterConnectFunc to AfterConnect
No need to include the type in the name.
2019-07-13 09:52:22 -05:00
Jack Christensen
d2440c7fe6 Improve documentation 2019-06-22 16:54:10 -05:00
Jack Christensen
731f1eadf5
Merge pull request #1 from furdarius/linters-fixes
Fix linters notifications
2019-06-22 16:33:39 -04:00
Artemiy Ryabinkov
07904bd774
Remove unnecassary ctx cancel
Signed-off-by: Artemiy Ryabinkov <getlag@ya.ru>
2019-06-22 20:09:55 +03:00
Artemiy Ryabinkov
54ce9c6bb8
Update pgproto3 dependency
Signed-off-by: Artemiy Ryabinkov <getlag@ya.ru>
2019-06-22 14:35:17 +03:00
Artemiy Ryabinkov
529805557f
Fix linters notifications
Signed-off-by: Artemiy Ryabinkov <getlag@ya.ru>
2019-06-22 10:41:01 +03:00
Jack Christensen
432c2951c7 Add a lot of documentation 2019-06-08 19:38:34 -05:00
Jack Christensen
bf3a27ae3f Update to github.com/jackc/chunkreader/v2 2019-06-08 18:34:35 -05:00
Jack Christensen
2c463c0e7d Release v2 2019-06-08 18:32:30 -05:00
Jack Christensen
21088f2cb5 Improve documentation 2019-06-08 18:29:13 -05:00
Jack Christensen
4e6b8011b6 Shorten constructor function names 2019-06-08 18:10:49 -05:00
Jack Christensen
ecdcf4a367 Rename Option to Config 2019-06-08 18:06:29 -05:00
Jack Christensen
4e0ed911f5 Import Fix for -0 numeric
From pgx: d678216f468d1fe4dc28649feacd4b30a176769e
2019-06-08 11:45:47 -05:00
Jack Christensen
18e7e777be Import PortalSuspended from pgx v3
0ab6f80f9929384a8cf6cfc299b43233534eb705
2019-06-08 10:26:26 -05:00
Jack Christensen
a97dd2f9f6 Update test envvar and docs 2019-06-01 09:59:04 -05:00
Jack Christensen
71ec1f7821 Update xerrors package 2019-05-28 06:54:20 -05:00
Jack Christensen
de87e8be96 Fix: Use fallback config TLS config 2019-05-27 12:50:27 -05:00
Jack Christensen
3294a8cf1f Allow empty hstore keys
See pgx commit: 56f4f0b9d319a910016ce044a53f52fcf986ddc6
2019-05-20 16:26:58 -05:00
Jack Christensen
a340d5f15f CopyFail should be frontend message 2019-05-17 13:27:11 -05:00
Jack Christensen
d30cf1c19f Adjust buffer size for CopyFrom 2019-05-09 15:15:40 -05:00
Jack Christensen
1baf0ef57e Refactor context handling into ctxwatch package 2019-05-07 18:05:06 -05:00
Jack Christensen
4acc0f54c6 Import fixes from pgx/pgproto3
Import and adapt commit: fbb8cce
2019-05-03 14:07:55 -05:00
Jack Christensen
1e3961bd0e Fix flickering test 2019-04-24 16:49:52 -05:00
Jack Christensen
23a91ebc90 auth_scram.go file comment should not be part of docs 2019-04-24 16:08:12 -05:00
Jack Christensen
99fd636b8e Finish mod changes for split 2019-04-20 19:20:51 -05:00
Jack Christensen
4ed0de4755 Splitting pgtype into own repo 2019-04-20 19:14:08 -05:00
Jack Christensen
f25878662d Use golang.org/x/xerrors 2019-04-20 17:43:44 -05:00
Jack Christensen
8502a12ac7 Fix go modules
Wow. This is fun. Sure is easy to get modules wrong when upgrading a v2+
project.
2019-04-20 17:41:08 -05:00
Jack Christensen
7e0022ef6b Tag errors if no bytes sent to server 2019-04-20 16:48:24 -05:00
Jack Christensen
0f8e1d30e2 Link context errors and underlying conn errors
Using golang.org/x/xerrors type errors both errors can be exposed.
2019-04-20 15:53:30 -05:00
Jack Christensen
f3b5f6b275 Allow skipping TestConnExecBatchHuge in short mode 2019-04-20 15:34:49 -05:00
Jack Christensen
7a520059d9 Update to remove pgprotov3 ref 2019-04-20 13:01:59 -05:00
Jack Christensen
c116219b62 Update tests to use v2 2019-04-20 13:01:11 -05:00
Jack Christensen
cd629965e6 Use golang.org/x/xerrors 2019-04-20 12:57:52 -05:00
Jack Christensen
6161728ff9 Prepare takes context
Also remove PrepareEx. It's primary usage was for context. Supplying
parameter OIDs is unnecessary when you can type cast in the query SQL.
If it does become necessary or desirable to add options back it can be
added in a backwards compatible way by adding a varargs as last
argument.
2019-04-20 11:47:16 -05:00
Jack Christensen
39e6ff5766 Prevent deadlock with huge batches 2019-04-20 11:11:09 -05:00
Jack Christensen
9f774761ba Fix TestConnLocking 2019-04-20 10:59:50 -05:00
Jack Christensen
3710e52a9a Add named error for conn busy 2019-04-19 16:16:55 -05:00
Jack Christensen
7bb6c2f3e9 Unify locked and closed into status
No longer panic on locking busy conn
2019-04-19 15:52:12 -05:00
Jack Christensen
16412e56e2 0 alloc context to deadline 2019-04-19 14:43:09 -05:00
Jack Christensen
2383561e4d Use 0-alloc pgproto3/v2 2019-04-18 23:17:28 -05:00
Jack Christensen
9b6a681f50 Update go.mod version 2019-04-18 23:15:44 -05:00
Jack Christensen
76e904a5a4 CommandComplete.CommandTag is now []byte
Avoid allocation
2019-04-18 23:12:18 -05:00
Jack Christensen
8d43b38287 RowDescription.Name is now []byte
Avoid allocation
2019-04-18 23:12:00 -05:00
Jack Christensen
9d30dad837 Do not buffer results in benchmarks 2019-04-18 22:52:07 -05:00
Jack Christensen
b6e5b74e2c Reuse one MultiResultReader per connection
Using a PgConn while locked now panics. i.e. You must Close any
ResultReader or MultiResultReader.
2019-04-18 22:50:36 -05:00
Jack Christensen
2acb7b6d4e Reduce mallocs in RowDescription.Decode 2019-04-18 22:33:11 -05:00
Jack Christensen
bc139fadb5 Reuse one ResultReader per connection 2019-04-18 22:01:47 -05:00
Jack Christensen
e948dc3246 Reuse buffer for writing 2019-04-18 21:51:58 -05:00
Jack Christensen
0174907e04 Fix travis unix domain socket test 2019-04-16 20:58:10 -05:00
Jack Christensen
244e114435 Add SCRAM authentication 2019-04-16 20:41:38 -05:00
Jack Christensen
b2a540ca81 Add sufficient support for SCRAM 2019-04-16 20:30:55 -05:00
Jack Christensen
78eda7d567 Remove unused scan float into numeric 2019-04-13 18:06:09 -05:00
Jack Christensen
4e79a104f7 Test domains when registered and unregistered
Fix bug assigning to unknown type.
2019-04-13 17:09:51 -05:00
Jack Christensen
bd85fe870d Hard code standard PostgreSQL types
Instead of needing to instrospect the database on connection preload the
standard OID / type map. Types from extensions (like hstore) and custom
types can be registered by the application developer. Otherwise, they
will be treated as strings.
2019-04-13 16:45:52 -05:00
Jack Christensen
a0f487bc09 More transcoding type tests
Text every combination of text and binary arguments and text and binary
results.
2019-04-13 14:17:04 -05:00
Jack Christensen
ea65a92de9 Fix long standing text array text format null bug 2019-04-13 14:06:01 -05:00
Jack Christensen
7fbae064bb Remove simple protocol and one round trip query options
It is impossible to guarantee that the a query executed with the simple
protocol will behave the same as with the extended protocol. This is
because the normal pgx path relies on knowing the OID of query
parameters. Without this encoding a value can only be determined by the
value instead of the combination of value and PostgreSQL type. For
example, how should a []int32 be encoded? It might be encoded into a
PostgreSQL int4[] or json.

Removal also simplifies the core query path.

The primary reason for the simple protocol is for servers like PgBouncer
that may not be able to support normal prepared statements. After
further research it appears that issuing a "flush" instead "sync" after
preparing the unnamed statement would allow PgBouncer to work.

The one round trip mode can be better handled with prepared statements.

As a last resort, all original server functionality can still be accessed by
dropping down to PgConn.
2019-04-13 11:39:01 -05:00
Jack Christensen
698bd4bf5a Use defer to unlock pgConn in Prepare 2019-04-13 10:30:49 -05:00
Jack Christensen
f779b05f36 Extract scan value to pgtype 2019-04-12 21:31:59 -05:00
Jack Christensen
59003afe8c Fix encode empty value 2019-04-12 21:23:57 -05:00
Jack Christensen
fcbd9e93fa Initial pass at fixing pgtype tests
Many still failing, but at least it compiles now.
2019-04-12 16:58:42 -05:00
Jack Christensen
0ebe322ac3 Extract common code from ExecParams and ExecPrepared 2019-04-05 16:10:11 -05:00
Jack Christensen
7ad3625edd unlock connection when context is pre-canceled 2019-04-05 12:06:59 -05:00
Jack Christensen
408837dcb1 Handle extended protocol with too many arguments 2019-04-05 11:47:31 -05:00
Jack Christensen
c745509c59 Rename test 2019-04-05 11:27:04 -05:00
Jack Christensen
0ac82007fb Use extracted packages with Go modules 2019-04-05 10:59:47 -05:00
Jack Christensen
ed7d91dc98 Force Go modules for Travis 2019-03-30 17:13:23 -05:00
Jack Christensen
3d9e42d74c Replace chan based conn locking with bool
This is conceptually simpler and will lead to error messages instead of
deadlocks.
2019-03-30 17:09:39 -05:00
Jack Christensen
444bd6deaf Context cancellation is fatal during query 2019-03-30 16:44:20 -05:00
Jack Christensen
b2fc69d32f Import pgx travis config 2019-03-30 13:03:28 -05:00
Jack Christensen
08fcc7f273 Add license and readme 2019-03-30 12:59:04 -05:00
Jack Christensen
fbdfccf1f9 Use Go modules 2019-03-30 12:55:56 -05:00
Jack Christensen
97a0ac4ddc Clarify ChunkReader.Next contract 2019-03-30 12:52:55 -05:00
Jack Christensen
bb06e6b3ff Decouple github.com/jackc/chunkreader 2019-03-30 12:46:56 -05:00
Jack Christensen
127e997696 Add travis CI 2019-03-30 12:33:04 -05:00
Jack Christensen
b9d0da5558 Add readme, license, and docs 2019-03-30 12:32:39 -05:00
Jack Christensen
16176b5151 Add go module support 2019-03-30 12:26:24 -05:00
Jack Christensen
517cfde605 Add Travis CI 2019-03-30 12:21:36 -05:00
Jack Christensen
811a7d92d6 Add Go module support 2019-03-30 12:21:06 -05:00
Jack Christensen
65a3248f5c Add license and readme 2019-03-30 12:20:18 -05:00
Jack Christensen
e2207bfbaf Add some documentation 2019-03-30 12:19:49 -05:00
Jack Christensen
8d9c2a3daf Add travis ci 2019-03-30 12:04:38 -05:00
Jack Christensen
8abf4a9eaa Fix links in readme 2019-03-30 12:04:23 -05:00
Jack Christensen
715eaaf2ed Add go module support 2019-03-30 12:03:34 -05:00
Jack Christensen
2a11259555 Add readme and license 2019-03-30 12:03:04 -05:00
Jack Christensen
79ffab9836 All writes errors are fatal 2019-01-28 23:13:03 -06:00
Jack Christensen
9229e03d06 Partial conversion of pgx to use pgconn 2019-01-26 16:46:30 -06:00
Jack Christensen
b59437f6ec writeAll dies on permanent net errors 2019-01-26 16:45:06 -06:00
Jack Christensen
f5aecdd499 Extract writeAll 2019-01-26 12:33:51 -06:00
Jack Christensen
440fbf1581 Include missed changes 2019-01-26 12:21:54 -06:00
Jack Christensen
96c85cf0c3 Recover from context cancellation during CopyFrom 2019-01-26 12:20:36 -06:00
Jack Christensen
38671ea106 Properly abort CopyFrom on reader error 2019-01-26 10:21:16 -06:00
Jack Christensen
01b54c7cb6 Properly abort CopyFrom on reader error 2019-01-26 10:21:16 -06:00
Jack Christensen
3683e4a0a1 Move CopyFrom to pgconn 2019-01-19 17:24:48 -06:00
Jack Christensen
c9f985c1e4 Add PgConn.EscapeString 2019-01-19 15:44:03 -06:00
Jack Christensen
e15528c419 Remove obsolete comment 2019-01-19 15:41:42 -06:00
Jack Christensen
c447ff4e79 Use NoError instead of Nil for assertions 2019-01-19 14:51:07 -06:00
Jack Christensen
19ef57ad9a Add PgConn.CopyTo 2019-01-19 14:49:39 -06:00
Jack Christensen
e441d4828c Fix doc typo 2019-01-19 14:49:26 -06:00
David Bariod
738f3a1027 support binding of []int type to array integer 2019-01-19 11:44:30 -06:00
Josh Leverette
66af2227c0 Fix encoding of ErrorResponse 2019-01-19 11:42:30 -06:00
Jack Christensen
edfd837ba4 Add PgConn.WaitForNotification 2019-01-14 20:51:53 -06:00
Jack Christensen
cd4b0025c3 Add listen/notify to pgconn 2019-01-14 20:39:10 -06:00
Jack Christensen
b3cde6830f Fix die on receive message error 2019-01-14 20:27:34 -06:00
Jack Christensen
9c36fa1e50 Fix prepare failure 2019-01-12 16:16:47 -06:00
Jack Christensen
bd777fe20c Add custom context cancellation hook 2019-01-12 11:37:13 -06:00
Jack Christensen
c6a73a469a Add example 2019-01-05 18:47:50 -06:00
Jack Christensen
406e95650a Add more docs 2019-01-05 18:40:33 -06:00
Jack Christensen
2959411c41 CommandTag is string 2019-01-05 18:06:25 -06:00
Jack Christensen
2c8971b382 Rename some types and methods 2019-01-05 18:01:57 -06:00
Jack Christensen
379be3508b Add some docs for batch 2019-01-05 17:46:47 -06:00
Jack Christensen
04ee3b8cbd Remove Pg prefix for a couple types 2019-01-05 17:41:43 -06:00
Jack Christensen
cddf011806 Big restructure to better handle context cancel 2019-01-05 17:37:28 -06:00
Jack Christensen
64e80f1f72 Add benchmarks when cancellable 2019-01-02 18:16:20 -06:00
Jack Christensen
a24d764440 Back out of some over optimization 2019-01-02 18:16:08 -06:00
Jack Christensen
fa5e1d3ec4 Back out of some over optimization 2019-01-02 18:16:08 -06:00
Jack Christensen
ec622237e9 Extract startOperation 2019-01-02 14:56:24 -06:00
Jack Christensen
de2b9bb301 Tweak RecoverFromTimeout docs 2019-01-02 14:20:10 -06:00
Jack Christensen
475720d172 Fix typo 2019-01-02 14:10:57 -06:00
Jack Christensen
b213299a92 Add ensureReadyForQuery to pgconn 2019-01-02 13:59:00 -06:00
Jack Christensen
460946d662 Move notice handling to pgconn 2019-01-02 13:15:26 -06:00
Jack Christensen
6d2fa9c5cf Handle empty query response 2019-01-02 12:28:11 -06:00
Jack Christensen
d545e0704e Prepare returns description 2019-01-01 18:03:20 -06:00
Jack Christensen
547741ae6a Fix bug with ready for query counter 2019-01-01 17:08:56 -06:00
Jack Christensen
7986e2726d pgx uses pgconn.CommandTag instead of own definition 2019-01-01 16:55:48 -06:00
Jack Christensen
fdbf2ba728 Use pgproto3 instead of custom message encoders 2019-01-01 14:32:42 -06:00
Jack Christensen
11964a6ec3 Add non-buffered benchmark 2019-01-01 14:17:17 -06:00
Jack Christensen
bd2a5d97d0 Add benchmark to pgconn 2019-01-01 14:10:24 -06:00
Jack Christensen
914766af9b Use result readers in next/get fashion 2019-01-01 14:10:16 -06:00
Jack Christensen
9af9f57f15 Remove another allocation 2019-01-01 13:56:09 -06:00
Jack Christensen
7bd9b776cd Remove another allocation 2019-01-01 13:52:04 -06:00
Jack Christensen
4f00c6aebd Add pgconn stress test 2019-01-01 13:49:12 -06:00
Jack Christensen
f225b3d4a1 Avoid allocating strings in common message types 2019-01-01 13:47:37 -06:00
Jack Christensen
8df3f2010f Avoid allocating strings in common message types 2019-01-01 13:47:37 -06:00
Jack Christensen
b793875c1f Extract bufferLastResult
Buffered exec methods need to read until pending ready for queries is 0.
Factor this common logic out.

Add stress test for PgConn.
2019-01-01 13:16:50 -06:00
Jack Christensen
51d654d32a Format code constants already in pgproto3 2019-01-01 11:35:39 -06:00
Jack Christensen
54df8c6918 Add ExecPrepared 2019-01-01 11:32:56 -06:00
Jack Christensen
13323df0dd Add batched query test 2018-12-31 20:08:11 -06:00
Jack Christensen
5f69253174 Added ExecParams 2018-12-31 19:59:32 -06:00
Jack Christensen
650aa7059a Fix broken tests 2018-12-31 18:45:51 -06:00
Jack Christensen
2f156c7add Access PID and SecretKey via method 2018-12-31 18:03:55 -06:00
Jack Christensen
f5faed6568 Access underlying net.Conn via method
Also remove some dead code.
2018-12-31 18:00:08 -06:00
Jack Christensen
49c9674102 PG error type is *pgconn.PgError 2018-12-31 17:46:56 -06:00
Jack Christensen
bcc3da490c Run tests in parallel 2018-12-31 17:34:44 -06:00
Jack Christensen
53175a7bad Add cancel request to PgConn
RecoverFromTimeout automatically tries to cancel in progress requests.
2018-12-31 17:32:04 -06:00
Jack Christensen
4ee6fef452 Add context to potentially blocking methods 2018-12-31 17:17:11 -06:00
Jack Christensen
4e12c08b04 Use buffered exec 2018-12-31 14:14:40 -06:00
Jack Christensen
b419493e5c Add pgconn.Exec 2018-12-31 13:32:26 -06:00
Jack Christensen
8c574c39f8 Add support for libpq target_session_attrs
Generalize AcceptConnFunc into AfterConnectFunc.
2018-12-31 12:15:29 -06:00
Jack Christensen
5ae6310b05 Add AcceptConnFunc for filtering HA connections 2018-12-31 11:39:22 -06:00
Jack Christensen
1836f7be46 Support comma separated hosts and ports like libpq
Also add test and fix the fallback config implementation.
2018-12-31 11:14:13 -06:00
Jack Christensen
c4080cce35 Move connection tests to pgconn 2018-12-30 21:10:06 -06:00
Jack Christensen
beeb69ff0b Restructure connect process
- Moved lots of connection logic to pgconn from pgx
- Extracted pgpassfile package
2018-12-30 16:55:56 -06:00
Jack Christensen
5d17ec4156 Rename base package to pgconn 2018-12-28 17:09:56 -06:00
maxarchx
f9440700e5 Apply UUID string length check before parsing 2018-11-30 15:13:43 +05:00
Jack Christensen
64b1ecf96f Type modifier should be int32 not uint32 2018-09-22 07:43:18 -05:00
Jack Christensen
6c9b75d49b Merge pull request #454 from regeda/macaddr-array
macaddr array is introduced
2018-09-01 22:46:13 -04:00
Jack Christensen
8f7c03a47f Fix: do not silently ignore assign NULL to *string
AssignTo can only assign NULL to a **string. Previous code tried to
assign nil to a *string, which did nothing. Correct behavior is to
detect this as an error.
2018-09-01 18:40:42 -05:00
Anthony Regeda
88d317af97 macaddr-array macaddr array is introduced 2018-09-01 16:06:20 +03:00
Murat Kabilov
5f39bbaf35 Add *Conn. CopyFromTextual, CopyToTextual, which use textual format for copying data 2018-07-31 08:57:53 +02:00
Damir Vandic
79ba0275de Add the type of the value in all decode error messages 2018-06-04 21:02:20 +02:00
Tarik Demirci
3ec4c6ca23 Allow setting nil to pgtype.Bool 2018-05-17 12:22:48 +02:00
Anthony Regeda
5524d654d3 numeric_with_uint64 numeric array supports both types int64 and uint64 2018-04-24 16:31:31 +03:00
Jack Christensen
9bb19fd8e7 pgtype.JSON(B).Value now returns []byte
Allows scanning jsonb column into *json.RawMessage.

fixes #409
2018-04-14 09:17:56 -05:00
Jack Christensen
46d0f7e1c8 Fix precision loss for test format geometric types
fixes #399
2018-03-17 10:26:03 -05:00
Jack Christensen
898fc86e25 Skip line test of PG 9.3 2018-03-08 08:05:54 -05:00
Jack Christensen
7ed0a8732c Update shopspring decimal integration test
New version of shopspring/decimal improves precision. This broke a test.
2018-03-08 07:40:25 -05:00
Jack Christensen
f078754e05 Skip test based on missing line type
Instead of explicit server version checking. Ubuntu installed version
string is not parsable by go-version. e.g.

10.2 (Ubuntu 10.2-1.pgdg16.04+1)
2018-02-16 21:39:19 -06:00
Jack Christensen
44bb11de82 Import encoding/json package 2018-01-13 18:14:42 -06:00
Jack Christensen
c01e12e351 Merge branch 'patch-1' of https://github.com/eruca/pgx into eruca-patch-1 2018-01-13 18:12:35 -06:00
Iurii Krasnoshchok
91bb74b526 Add support for bpchar type 2018-01-02 12:29:40 +01:00
eruca
fbc0fc7e3e UnmarshalJSON for Int8 missing 2017-12-29 21:09:22 +08:00
Jack Christensen
81b3e79897 Merge pull request #371 from ferhatelmas/gofmt-simplify
Run gofmt with simplify flag
2017-12-23 10:32:44 -06:00
Jack Christensen
5bd04dc568 Add test for record with unknown OID 2017-12-23 10:24:09 -06:00
ferhat elmas
645e646183 Run gofmt with simplify flag 2017-12-21 23:45:26 +01:00
Iurii Krasnoshchok
e22e7e67ec Return error on unknown oid while decoding record instead of panic 2017-12-20 14:47:52 +01:00
Jack Christensen
b3d0cbd0e6 Fix reading interrupted messages
When an message is received and a timeout occurs after reading the
header but before reading the entire body the connection state could
be corrupted due to the header being consumed. The next read would
consider the body of the previous message as the header for the next.

fixes #348
2017-12-16 13:45:22 -06:00
Jack Christensen
a01653c3df Add support for bit type 2017-11-18 21:13:34 -06:00
Jack Christensen
4e6de12a62 Fix missing interval mapping 2017-11-17 09:37:57 -06:00
Jack Christensen
3f02d66ae0 Detect erroneous JSON(B) encoding
JSON(B) automatically marshals any value. Avoid marshalling values of
pgtype.JSON and pgtype.JSONB. The caller certainly meant to call on a
pointer.

See https://github.com/jackc/pgx/issues/350 for discussion.

refs #350
2017-11-04 19:09:24 -05:00
Jack Christensen
4e334054dd Fix ranges with text format where end is unbounded
fixes #342
2017-11-04 14:03:46 -05:00
Jack Christensen
5ab54cb24f Add String method to pgtype.BoundType
Character representation is much easier to read than numeric.
2017-11-04 13:47:03 -05:00
Jack Christensen
6618ea669e Use named value instead of literal 2017-11-04 13:37:47 -05:00
Jack Christensen
5ba28cf2c5 Add support for array of enum
fixes #338
2017-10-17 20:31:11 -05:00
Jack Christensen
3453586e89 Add UnmarshalJSON to a few types 2017-09-29 15:26:37 -05:00
Jack Christensen
2e630dddf9 Fix decoding row with same type values
Row decoding was reusing and returning connection owned values for
decoding. Instead allocate new value each time.

fixes #313
2017-08-29 15:38:45 -05:00
Jack Christensen
703ce85513 Generate UUIDArray from template
- Fix error in Set
- Specifically handle untyped nil
2017-08-29 14:33:25 -05:00
Kelsey Francis
2dfcf74f62 Add UUIDArray type
Also change UUID.Set() to convert nil to NULL in order for
UUIDArray.Set() to support converting [][]byte slices that contain nil.
2017-08-27 19:36:53 -07:00
Jack Christensen
9ee71598ee Merge pull request #306 from cyberdelia/rows
Add more ColumnType support
2017-08-22 14:21:47 -05:00
Timothée Peignier
43c2b979d0 Add more ColumnType support 2017-08-18 18:22:08 -07:00
Wei Congrui
f18a22e066 Fix numeric EncodeBinary bug 2017-08-18 15:20:39 +08:00
Jack Christensen
10fa3a6497 Return error on MarshalJSON of status Undefined
Previously "undefined" was returned as a value. While this is a
valid JavaScript value, it is not valid JSON.
2017-08-12 16:40:18 -05:00
Jack Christensen
ffa9ff2213 Use github.com/pkg/errors 2017-06-04 21:30:03 -05:00
Jack Christensen
a5f166bd21 Use github.com/pkg/errors 2017-06-04 21:30:03 -05:00
Jack Christensen
654adbdd4a Use Go casing convention for CID/TID/XID/CIDR 2017-06-03 12:01:49 -05:00
Jack Christensen
01fa5960b2 Use Go casing convention for ACLItem 2017-06-03 11:58:40 -05:00
Jack Christensen
aab8b77215 Use Go casing convention for JSON(B) 2017-06-03 11:57:14 -05:00
Jack Christensen
496c5a4dff Use Go casing convention for UUID 2017-06-03 11:54:57 -05:00
Jack Christensen
2140814606 Use Go casing convention for OID 2017-06-03 11:53:49 -05:00
Jack Christensen
8e404a02a3 Ensure pgproto3.Parse.Decode overwrites itself entirely 2017-05-29 11:24:49 -05:00
Jack Christensen
d6312305ae Replace MarshalBinary with Encode
This new approach can avoid allocations.
2017-05-26 17:00:44 -05:00
Jack Christensen
071de0b674 Fix shopsprint-numeric test 2017-05-20 09:46:06 -05:00
Jack Christensen
1f1677ba5e Ensure shopspring-numeric tests run 2017-05-20 09:44:15 -05:00
Jack Christensen
a3e05ea29f Fix TestHstoreArrayTranscode 2017-05-20 08:42:39 -05:00
Jack Christensen
97a927bb03 Fix TestIntervalNormalize 2017-05-20 08:39:53 -05:00
Jack Christensen
6ba93d4e54 Fix TestNumericNormalize 2017-05-20 08:38:27 -05:00
Jack Christensen
4c51d6af82 Test &pgtype.QChar 2017-05-20 08:36:40 -05:00
Jack Christensen
fe36df4fff Uncomment Hstore tests 2017-05-20 08:34:20 -05:00
Jack Christensen
b1934ad4c2 Add flush and close messages to pgproto3 2017-05-19 17:31:22 -05:00
Jack Christensen
e45a42c7ef Do not create empty slices in Bind.Decode 2017-05-19 15:50:27 -05:00
Jack Christensen
c6aef15181 Add basic pgmock support
Primarily useful for testing pgx itself. Design is still subject to change.
2017-05-13 17:56:54 -05:00
Jack Christensen
80edb27dee Fix Bind Decode to advance rp 2017-05-13 16:19:55 -05:00
Jack Christensen
45b67f9b95 Fix issues identified by go vet 2017-05-06 19:48:03 -05:00
Jack Christensen
2d209bd579 Remove read functions from pgio and update docs 2017-05-06 08:54:47 -05:00
Jack Christensen
61d4386931 Update pgproto3 to enable pgmock 2017-05-06 08:48:40 -05:00
Jack Christensen
6f398d8bb5 Update pgproto3 to enable pgmock 2017-05-06 08:48:40 -05:00
Jack Christensen
6b906ca870 Refactor pgio and types to append buffers 2017-05-02 20:38:26 -05:00
Jack Christensen
d4fe3edf84 Refactor pgio and types to append buffers 2017-05-02 20:38:26 -05:00
Jack Christensen
eb9fc6e7a5 Fix queries with more than 32 columns
fixes #270
2017-05-01 19:46:37 -05:00
Jack Christensen
d25abf5674 Add pgproto3.Backend 2017-05-01 18:11:55 -05:00
Jack Christensen
ab21bc4ec7 pgtype DecodeText and DecodeBinary do not copy
They now take ownership of the src argument.

Needed to change Scan to make a copy of []byte arguments as lib/pq apparently
gives Scan a shared memory buffer.
2017-04-29 12:23:51 -05:00
Jack Christensen
61026b7c21 Reduce allocations and copies in pgproto3
Altered chunkreader to never reuse memory.

Altered pgproto3 to to copy memory when decoding. Renamed UnmarshalBinary to
Decode because of changed semantics.
2017-04-29 11:55:14 -05:00
Jack Christensen
eff55451cf Reduce allocations and copies in pgproto3
Altered chunkreader to never reuse memory.

Altered pgproto3 to to copy memory when decoding. Renamed UnmarshalBinary to
Decode because of changed semantics.
2017-04-29 11:55:14 -05:00
Jack Christensen
de9bb7e6d8 Use flyweight pattern for pgproto3 messages 2017-04-29 11:01:54 -05:00
Jack Christensen
4e2900b774 Introduce pgproto3 package
pgproto3 will wrap the message encoding and decoding for the PostgreSQL
frontend/backend protocol version 3.
2017-04-29 10:02:38 -05:00
Jack Christensen
fa68e44e5f Use pgx.ParseConnectionString in test helper
This allows using URI or DSN for database connection information. DSN allows
using unix domain sockets.
2017-04-14 17:21:32 -05:00
Jack Christensen
851479b0d3 Replace DATABASE_URL with PGX_TEST_DATABASE
PGX_TEST_DATABASE is much less likely to collide with another environment
variable. This is especially valuable when using direnv to automatically set
environment variables.
2017-04-14 17:11:39 -05:00
Jack Christensen
f0e9337d8f Add satori-uuid type
Make pgtype.EncodeValueText public
2017-04-14 16:46:39 -05:00
Jack Christensen
d94f8daeb1 Use pointer methods for all struct pgtypes
Now no need to no whether certain interfaces are implemented by struct or
pointer to struct.
2017-04-14 13:08:05 -05:00
Jack Christensen
e380de7cd1 Finish extraction of pgtype test helpers 2017-04-14 12:38:33 -05:00
Jack Christensen
b49035fdc1 Add shopspring.Numeric
This adds PostgreSQL numeric mapping to and from
github.com/shopspring/decimal.

Makes pgtype.NullAssignTo public as external types need this functionality.

Begin extraction of pgtype testing functionality so it can easily be used by
external types.
2017-04-14 12:24:44 -05:00
Jack Christensen
92474ef292 Add MarshalJSON to a few types 2017-04-13 21:58:19 -05:00
Jack Christensen
e76cf5617f Skip line tests on when server version < PG 9.4 2017-04-11 20:16:41 -05:00
Jack Christensen
7ff405ff84 Add simple protocol suuport with (Query|Exec)Ex 2017-04-10 08:58:51 -05:00
Jack Christensen
68fd815778 Add pgtype.Varbit 2017-04-05 07:54:41 -05:00
Jack Christensen
c31fe24693 Fix pgtype.Inet.AssignTo assigning reference
AssignTo should always assign copy.
Added documentation for AssignTo interface.
2017-04-04 21:13:00 -05:00
Jack Christensen
3631b076fe Add pgtype.Macaddr 2017-04-04 21:07:27 -05:00
Jack Christensen
d99d09b0d1 Add pgtype.Circle
Also rename Point.Vec2 to Point.P to conform to rest of geometric types.
2017-04-04 20:39:48 -05:00
Jack Christensen
6a0b41e50a Add pgtype.Polygon 2017-04-04 20:30:04 -05:00
Jack Christensen
8cbf667b8e Add pgtype.Uuid 2017-04-04 20:24:01 -05:00
Jack Christensen
f4bdd8300f Add path 2017-04-04 08:40:41 -05:00
Jack Christensen
d8a778811e Add pgtype.Lseg 2017-04-04 08:16:02 -05:00
Jack Christensen
2fc89c69e9 Add pgtype.Line 2017-04-04 08:04:54 -05:00
Jack Christensen
dccbbc6a40 Add pgtype.Box 2017-04-03 19:47:36 -05:00
Jack Christensen
0079bd5095 Add pgtype.Point 2017-04-03 17:53:32 -05:00
Jack Christensen
cc873a0bcf Add pgtype.NumericArray 2017-04-03 07:46:45 -05:00
Jack Christensen
066562fc89 Add pgtype.Numrange 2017-04-03 07:35:19 -05:00
Jack Christensen
f7191d3a56 Add pgtype.Numeric 2017-04-01 23:33:04 -05:00
Jack Christensen
d25c346d6d Add interval type 2017-03-31 20:11:18 -05:00
Jack Christensen
94971db9e2 Add daterange, tsrange, and tstzrange 2017-03-24 14:17:49 -05:00
Jack Christensen
a021a7717a Add Int8range
Add code generation for ranges
2017-03-24 13:36:10 -05:00
Jack Christensen
be04ad7b21 Add int4range 2017-03-23 18:41:52 -05:00
Jack Christensen
0e51991aaa Skip jsonb test if no jsonb type 2017-03-20 08:58:28 -05:00
Jack Christensen
4645475800 Run goimports as part of array gen script 2017-03-20 08:00:43 -05:00
Jack Christensen
6f9ef694d0 Add database/sql support to pgtype 2017-03-18 21:11:43 -05:00
Jack Christensen
3acd3d8546 Optionally generate binary array format 2017-03-18 17:38:58 -05:00
Jack Christensen
85f7df1e81 Factor out duplication in AssignTo 2017-03-18 16:54:08 -05:00
Jack Christensen
0f92da1f24 Remove unneeded idea file 2017-03-18 15:51:16 -05:00
Jack Christensen
d516894475 Simplify []byte scanning 2017-03-18 14:42:36 -05:00
Jack Christensen
df8f8e17cf Add pgtype.HstoreArray
This required restructuring array types to lookup oid of element instead of
hard-coding it due to hstore having a variable oid.
2017-03-18 12:40:54 -05:00
Jack Christensen
6e21cb00fe Add pgtype.Record and prerequisite restructuring
Because reading a record type requires the decoder to be able to look up oid
to type mapping and types such as hstore have types that are not fixed between
different PostgreSQL servers it was necessary to restructure the pgtype system
so all encoders and decodes take a *ConnInfo that includes oid/name/type
information.
2017-03-18 12:01:16 -05:00
Jack Christensen
b31d409dc2 Move not null Oid to pgtype
In preparation to ConnInfo implementation.
2017-03-13 21:34:38 -05:00
Jack Christensen
937368fd5f Fix error message for hstore 2017-03-13 20:23:17 -05:00
Jack Christensen
f9e5879072 Move hstore to pgtype
Also implement binary format
2017-03-12 17:06:06 -05:00
Jack Christensen
45b33519d7 Add pgtype GenericText and GenericBinary
Rows.Values uses this for unknown types.
2017-03-11 20:28:14 -06:00
Jack Christensen
a79b498533 Remove Set self support from pgtype
Set having the capability to assign an object of the same type was
inconsistently implemented. Some places it was not implemented at all, some
places it was a shallow copy, some places a deep copy. Given that it doesn't
seem likely to ever be used, and if it is needed it is easy enough to do
outside of the library this code has been removed.
2017-03-11 20:18:56 -06:00
Jack Christensen
b94ccae4c9 Document that Decode* must not keep src
- Also fix Bytea.DecodeBinary to not keep src.
2017-03-11 20:12:47 -06:00
Jack Christensen
9b9361848d Expand pgtype.Value interface
- Include and rename ConvertFrom to Set
- Add Get
- Include AssignTo
2017-03-11 19:53:45 -06:00
Jack Christensen
7985ca5f87 Add json/jsonb to pgtype 2017-03-11 18:46:51 -06:00
Jack Christensen
666af9ead5 Name PG types as words
Though this doesn't follow Go naming conventions exactly it makes names more
consistent with PostgreSQL and it is easier to read. For example, TIDOID becomes
TidOid. In addition this is one less breaking change in the move to V3.
2017-03-11 17:03:23 -06:00
Jack Christensen
44e206ab5b Rename array files 2017-03-11 16:53:07 -06:00
Jack Christensen
a231c5461f Move Tid to pgtype 2017-03-11 16:48:37 -06:00
Jack Christensen
2f63514c47 Move ACLItem to pgtype 2017-03-11 16:13:05 -06:00
Jack Christensen
86620c5e91 Add pgtype.ByteaArray
Also fix up quoting array elements for text arrays.
2017-03-11 13:32:32 -06:00
Jack Christensen
e654d1f0fc pgtype.Encode(Binary|Text) do not write length
To aid in composability, these methods no longer write their own length. This
is especially useful for text formatted arrays and may be useful for future
database/sql compatibility. It also makes the code a little simpler as the
types no longer have to compute their own size.

Along with this, these methods cannot encode NULL. They now return a boolean
if they are NULL. This also benefits text array encoding as numeric arrays
require NULL to be exactly `NULL` while string arrays require NULL to be
`"NULL"`.
2017-03-11 12:45:30 -06:00
Jack Christensen
361a54abb7 Decode(Text|Binary) now accepts []byte instead of io.Reader 2017-03-10 16:08:47 -06:00
Jack Christensen
bb7122d4a8 Fix typed_array_gen.sh typo 2017-03-09 21:09:36 -06:00
Jack Christensen
8fa9afbb36 Add bytea 2017-03-09 21:07:40 -06:00
Jack Christensen
eea6e5a64c Move "char" to pgtype 2017-03-07 19:39:57 -06:00
Jack Christensen
94612427ed Move Name to pgtype 2017-03-06 17:55:20 -06:00
Jack Christensen
b139307f5b Move OID to pgtype 2017-03-05 13:05:49 -06:00
Jack Christensen
6f9aef67c7 Fix comment on XID 2017-03-05 09:18:50 -06:00
Jack Christensen
603d829611 Extract pguint32 2017-03-05 09:13:25 -06:00
Jack Christensen
8922421ad6 Move XID to pgypte 2017-03-05 09:07:07 -06:00
Jack Christensen
3aad9c08d5 Generalize array template 2017-03-05 08:59:26 -06:00
Jack Christensen
0437c9f5d6 Move cid to pgtype 2017-03-04 22:12:03 -06:00
Jack Christensen
4254e5f2d2 Add text to pgtype 2017-03-04 21:20:56 -06:00
Jack Christensen
93e1715082 Add inet and cidr to pgtype 2017-03-04 17:33:41 -06:00
Jack Christensen
0f115477de Add float4, float8 and arrays 2017-03-04 13:29:04 -06:00
Jack Christensen
39b60605ae Add timestamp to pgtype 2017-03-04 12:36:24 -06:00
Jack Christensen
34c5070371 Add arrays to all other pgtypes 2017-03-04 11:48:53 -06:00
Jack Christensen
a2843aba53 Add tests for pgtype.Int2Array 2017-03-03 19:19:31 -06:00
Jack Christensen
c4e08dab42 Add pgtype error cases 2017-03-03 18:39:52 -06:00
Jack Christensen
db69aa6f72 Add tests to more pgtypes
Int4, Int8, Date, Timestamptz
2017-03-03 18:23:26 -06:00
Jack Christensen
3d54c9a958 Add test for pgtype.Int2.AssignTo 2017-03-03 17:59:26 -06:00
Jack Christensen
890708967c Standardize receiver variable name for pgtype
Conversion functions now use standardized src and dst depending on their role.
2017-03-03 17:35:02 -06:00
Jack Christensen
a1e4efe14e Add more tests for pgtype.Bool 2017-03-03 17:15:05 -06:00
Jack Christensen
579b6cd612 Initial proof-of-concept for pgtype
Squashed commit of the following:

commit c19454582b335ce5bdda6320f7e4e8c76cfeaf44
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Fri Mar 3 15:24:47 2017 -0600

    Add AssignTo to pgtype.Timestamptz

    Also handle infinity for pgtype.Date

commit 7329933610b38f4bc15731b1f7c55c520b49e300
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Fri Mar 3 15:12:18 2017 -0600

    Implement AssignTo for most pgtypes

commit cc3d1e4af896d34ec98c3bf2e982d0367451f21c
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Thu Mar 2 21:19:07 2017 -0600

    Use pgtype.Int2Array in pgx

commit 36da5cc2178d1a31a56dc6e6f128843bd80dea0b
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Tue Feb 28 21:45:33 2017 -0600

    Add text array transcoding

commit 1b0f18d99f38b69f8c2db26388815e67b2b03d59
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Mon Feb 27 19:28:55 2017 -0600

    Add ParseUntypedTextArray

commit 0f50ce3e833fc38495d333228daf04f5142be676
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Mon Feb 27 18:54:20 2017 -0600

    wip

commit d934f273627d79997035c282416db922f2fbe87a
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sun Feb 26 17:14:32 2017 -0600

    WIP - beginning text format array parsing

commit 7276ad33ce7fa9c250745a3ed909998f3dae4a32
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Feb 25 22:50:11 2017 -0600

    Beginning binary arrays

commit 917faa5a3175d376222423c10aca297a20f96448
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Feb 25 19:36:35 2017 -0600

    Fix incomplete tests

commit de8c140cfb98b7b047d53c5718ccbf12eaf813a1
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Feb 25 19:32:22 2017 -0600

    Add timestamptz null and infinity

commit 7d9f954de4e071a1eccac762248079b90dbeb53f
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Feb 25 18:19:38 2017 -0600

    Add infinity to pgtype.Date

commit 7bf783ae20ba05571c2fb9f661183233c95eab41
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Feb 25 17:19:55 2017 -0600

    Add Status to pgtype.Date

commit 984500455c9b9a4b6221758540d248e6410d93a4
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Feb 25 16:54:01 2017 -0600

    Add status to Int4 and Int8

commit 6fe76fcfc2de31552790db3b093480a9d5b2a742
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Feb 25 16:40:27 2017 -0600

    Extract testSuccessfulTranscode

commit 001647c1da03f796014cf21f41c9a7fd2cfadfde
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Feb 25 16:15:51 2017 -0600

    Add Status to pgtype.Int2

commit 720451f06d13d9c9fa2a0482e010f24bf4627c2a
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Feb 25 15:56:44 2017 -0600

    Add status to pgtype.Bool

commit 325f700b6edff215a692b10bc5b94cdfe1100769
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Fri Feb 24 17:28:15 2017 -0600

    Add date to conversion system

commit 4a9343e45d3897f59eab98a0009d2ddbe07e02d7
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Fri Feb 24 16:28:35 2017 -0600

    Add bool to oid based encoding

commit d984fcafab1476cf84852485b6711f4b2069eb6d
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Fri Feb 24 16:15:38 2017 -0600

    Add pgtype interfaces

commit 0f93bfc2de4023b069b966c0988bf7f0469d1809
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Fri Feb 24 14:48:34 2017 -0600

    Begin introduction of Convert

commit e5707023cac7c07342b8c910e480d09a1caaf6ee
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Fri Feb 24 14:10:56 2017 -0600

    Move bool to pgtype

commit bb764d2129efe7fb21e841dbb35e6d0dc7586d37
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Fri Feb 24 13:45:05 2017 -0600

    Add Int2 test

commit 08c49437f455a32f7c3f0a524cd21a895d440301
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Fri Feb 24 13:44:09 2017 -0600

    Add Int4 test

commit 16722952222fd15c53c8fa84974645504a6d0dc0
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Fri Feb 24 08:56:59 2017 -0600

    Add int8 tests

commit 83a5447cd2c46b58d0880023cc4e9af0c84988a2
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Wed Feb 22 18:08:05 2017 -0600

    wip

commit 0ca0ee72068a72b016729b01fccef22474595285
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Mon Feb 20 18:56:52 2017 -0600

    wip

commit d2c2baf4ea2cd0793d68c7094c425217df952bec
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Mon Feb 20 18:46:10 2017 -0600

    wip

commit f78371da0098356527b193fd496a338da5fe414b
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Mon Feb 20 17:43:39 2017 -0600

    wip

commit 3366699bea62ec0110db05f3cb2998d58ac9ce5d
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Mon Feb 20 14:07:47 2017 -0600

    wip

commit 66b79e940870fd0133ebb10ac1547e1d4d7d0b51
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Mon Feb 20 13:35:37 2017 -0600

    Extract pgio

commit 8b07d97d1305ed98fd76db6e306a289c0af92d56
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Mon Feb 20 13:20:00 2017 -0600

    wip

commit 62f1adb3427f4317b708da075dce50c4d4daff7b
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Mon Feb 20 12:08:46 2017 -0600

    wip

commit a712d2546933a5a8433c65eef0ff2ee135077c87
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Mon Feb 20 09:30:52 2017 -0600

    wip

commit 4faf97cc588126dda160fc360680719572a23105
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Fri Feb 17 22:20:18 2017 -0600

    wip
2017-03-03 15:33:34 -06:00
Jack Christensen
ac2414449c Initial proof-of-concept for pgtype
Squashed commit of the following:

commit c19454582b335ce5bdda6320f7e4e8c76cfeaf44
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Fri Mar 3 15:24:47 2017 -0600

    Add AssignTo to pgtype.Timestamptz

    Also handle infinity for pgtype.Date

commit 7329933610b38f4bc15731b1f7c55c520b49e300
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Fri Mar 3 15:12:18 2017 -0600

    Implement AssignTo for most pgtypes

commit cc3d1e4af896d34ec98c3bf2e982d0367451f21c
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Thu Mar 2 21:19:07 2017 -0600

    Use pgtype.Int2Array in pgx

commit 36da5cc2178d1a31a56dc6e6f128843bd80dea0b
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Tue Feb 28 21:45:33 2017 -0600

    Add text array transcoding

commit 1b0f18d99f38b69f8c2db26388815e67b2b03d59
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Mon Feb 27 19:28:55 2017 -0600

    Add ParseUntypedTextArray

commit 0f50ce3e833fc38495d333228daf04f5142be676
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Mon Feb 27 18:54:20 2017 -0600

    wip

commit d934f273627d79997035c282416db922f2fbe87a
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sun Feb 26 17:14:32 2017 -0600

    WIP - beginning text format array parsing

commit 7276ad33ce7fa9c250745a3ed909998f3dae4a32
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Feb 25 22:50:11 2017 -0600

    Beginning binary arrays

commit 917faa5a3175d376222423c10aca297a20f96448
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Feb 25 19:36:35 2017 -0600

    Fix incomplete tests

commit de8c140cfb98b7b047d53c5718ccbf12eaf813a1
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Feb 25 19:32:22 2017 -0600

    Add timestamptz null and infinity

commit 7d9f954de4e071a1eccac762248079b90dbeb53f
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Feb 25 18:19:38 2017 -0600

    Add infinity to pgtype.Date

commit 7bf783ae20ba05571c2fb9f661183233c95eab41
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Feb 25 17:19:55 2017 -0600

    Add Status to pgtype.Date

commit 984500455c9b9a4b6221758540d248e6410d93a4
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Feb 25 16:54:01 2017 -0600

    Add status to Int4 and Int8

commit 6fe76fcfc2de31552790db3b093480a9d5b2a742
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Feb 25 16:40:27 2017 -0600

    Extract testSuccessfulTranscode

commit 001647c1da03f796014cf21f41c9a7fd2cfadfde
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Feb 25 16:15:51 2017 -0600

    Add Status to pgtype.Int2

commit 720451f06d13d9c9fa2a0482e010f24bf4627c2a
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Sat Feb 25 15:56:44 2017 -0600

    Add status to pgtype.Bool

commit 325f700b6edff215a692b10bc5b94cdfe1100769
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Fri Feb 24 17:28:15 2017 -0600

    Add date to conversion system

commit 4a9343e45d3897f59eab98a0009d2ddbe07e02d7
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Fri Feb 24 16:28:35 2017 -0600

    Add bool to oid based encoding

commit d984fcafab1476cf84852485b6711f4b2069eb6d
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Fri Feb 24 16:15:38 2017 -0600

    Add pgtype interfaces

commit 0f93bfc2de4023b069b966c0988bf7f0469d1809
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Fri Feb 24 14:48:34 2017 -0600

    Begin introduction of Convert

commit e5707023cac7c07342b8c910e480d09a1caaf6ee
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Fri Feb 24 14:10:56 2017 -0600

    Move bool to pgtype

commit bb764d2129efe7fb21e841dbb35e6d0dc7586d37
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Fri Feb 24 13:45:05 2017 -0600

    Add Int2 test

commit 08c49437f455a32f7c3f0a524cd21a895d440301
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Fri Feb 24 13:44:09 2017 -0600

    Add Int4 test

commit 16722952222fd15c53c8fa84974645504a6d0dc0
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Fri Feb 24 08:56:59 2017 -0600

    Add int8 tests

commit 83a5447cd2c46b58d0880023cc4e9af0c84988a2
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Wed Feb 22 18:08:05 2017 -0600

    wip

commit 0ca0ee72068a72b016729b01fccef22474595285
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Mon Feb 20 18:56:52 2017 -0600

    wip

commit d2c2baf4ea2cd0793d68c7094c425217df952bec
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Mon Feb 20 18:46:10 2017 -0600

    wip

commit f78371da0098356527b193fd496a338da5fe414b
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Mon Feb 20 17:43:39 2017 -0600

    wip

commit 3366699bea62ec0110db05f3cb2998d58ac9ce5d
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Mon Feb 20 14:07:47 2017 -0600

    wip

commit 66b79e940870fd0133ebb10ac1547e1d4d7d0b51
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Mon Feb 20 13:35:37 2017 -0600

    Extract pgio

commit 8b07d97d1305ed98fd76db6e306a289c0af92d56
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Mon Feb 20 13:20:00 2017 -0600

    wip

commit 62f1adb3427f4317b708da075dce50c4d4daff7b
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Mon Feb 20 12:08:46 2017 -0600

    wip

commit a712d2546933a5a8433c65eef0ff2ee135077c87
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Mon Feb 20 09:30:52 2017 -0600

    wip

commit 4faf97cc588126dda160fc360680719572a23105
Author: Jack Christensen <jack@jackchristensen.com>
Date:   Fri Feb 17 22:20:18 2017 -0600

    wip
2017-03-03 15:33:34 -06:00
Jack Christensen
e1eda90e29 Add ChunkReader 2017-02-12 21:46:15 -06:00
315 changed files with 58515 additions and 4700 deletions

54
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@ -0,0 +1,54 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: bug
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
If possible, please provide runnable example such as:
```go
package main
import (
"context"
"log"
"os"
"github.com/jackc/pgx/v5"
)
func main() {
conn, err := pgx.Connect(context.Background(), os.Getenv("DATABASE_URL"))
if err != nil {
log.Fatal(err)
}
defer conn.Close(context.Background())
// Your code here...
}
```
Please run your example with the race detector enabled. For example, `go run -race main.go` or `go test -race`.
**Expected behavior**
A clear and concise description of what you expected to happen.
**Actual behavior**
A clear and concise description of what actually happened.
**Version**
- Go: `$ go version` -> [e.g. go version go1.18.3 darwin/amd64]
- PostgreSQL: `$ psql --no-psqlrc --tuples-only -c 'select version()'` -> [e.g. PostgreSQL 14.4 on x86_64-apple-darwin21.5.0, compiled by Apple clang version 13.1.6 (clang-1316.0.21.2.5), 64-bit]
- pgx: `$ grep 'github.com/jackc/pgx/v[0-9]' go.mod` -> [e.g. v4.16.1]
**Additional context**
Add any other context about the problem here.

View File

@ -0,0 +1,20 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.

10
.github/ISSUE_TEMPLATE/other-issues.md vendored Normal file
View File

@ -0,0 +1,10 @@
---
name: Other issues
about: Any issue that is not a bug or a feature request
title: ''
labels: ''
assignees: ''
---
Please describe the issue in detail. If this is a question about how to use pgx please use discussions instead.

156
.github/workflows/ci.yml vendored Normal file
View File

@ -0,0 +1,156 @@
name: CI
on:
push:
branches: [master]
pull_request:
branches: [master]
jobs:
test:
name: Test
runs-on: ubuntu-22.04
strategy:
matrix:
go-version: ["1.23", "1.24"]
pg-version: [13, 14, 15, 16, 17, cockroachdb]
include:
- pg-version: 13
pgx-test-database: "host=127.0.0.1 user=pgx_md5 password=secret dbname=pgx_test"
pgx-test-unix-socket-conn-string: "host=/var/run/postgresql dbname=pgx_test"
pgx-test-tcp-conn-string: "host=127.0.0.1 user=pgx_md5 password=secret dbname=pgx_test"
pgx-test-scram-password-conn-string: "host=127.0.0.1 user=pgx_scram password=secret dbname=pgx_test"
pgx-test-md5-password-conn-string: "host=127.0.0.1 user=pgx_md5 password=secret dbname=pgx_test"
pgx-test-plain-password-conn-string: "host=127.0.0.1 user=pgx_pw password=secret dbname=pgx_test"
pgx-test-tls-conn-string: "host=localhost user=pgx_ssl password=secret sslmode=verify-full sslrootcert=/tmp/ca.pem dbname=pgx_test"
pgx-ssl-password: certpw
pgx-test-tls-client-conn-string: "host=localhost user=pgx_sslcert sslmode=verify-full sslrootcert=/tmp/ca.pem sslcert=/tmp/pgx_sslcert.crt sslkey=/tmp/pgx_sslcert.key dbname=pgx_test"
- pg-version: 14
pgx-test-database: "host=127.0.0.1 user=pgx_md5 password=secret dbname=pgx_test"
pgx-test-unix-socket-conn-string: "host=/var/run/postgresql dbname=pgx_test"
pgx-test-tcp-conn-string: "host=127.0.0.1 user=pgx_md5 password=secret dbname=pgx_test"
pgx-test-scram-password-conn-string: "host=127.0.0.1 user=pgx_scram password=secret dbname=pgx_test"
pgx-test-md5-password-conn-string: "host=127.0.0.1 user=pgx_md5 password=secret dbname=pgx_test"
pgx-test-plain-password-conn-string: "host=127.0.0.1 user=pgx_pw password=secret dbname=pgx_test"
pgx-test-tls-conn-string: "host=localhost user=pgx_ssl password=secret sslmode=verify-full sslrootcert=/tmp/ca.pem dbname=pgx_test"
pgx-ssl-password: certpw
pgx-test-tls-client-conn-string: "host=localhost user=pgx_sslcert sslmode=verify-full sslrootcert=/tmp/ca.pem sslcert=/tmp/pgx_sslcert.crt sslkey=/tmp/pgx_sslcert.key dbname=pgx_test"
- pg-version: 15
pgx-test-database: "host=127.0.0.1 user=pgx_md5 password=secret dbname=pgx_test"
pgx-test-unix-socket-conn-string: "host=/var/run/postgresql dbname=pgx_test"
pgx-test-tcp-conn-string: "host=127.0.0.1 user=pgx_md5 password=secret dbname=pgx_test"
pgx-test-scram-password-conn-string: "host=127.0.0.1 user=pgx_scram password=secret dbname=pgx_test"
pgx-test-md5-password-conn-string: "host=127.0.0.1 user=pgx_md5 password=secret dbname=pgx_test"
pgx-test-plain-password-conn-string: "host=127.0.0.1 user=pgx_pw password=secret dbname=pgx_test"
pgx-test-tls-conn-string: "host=localhost user=pgx_ssl password=secret sslmode=verify-full sslrootcert=/tmp/ca.pem dbname=pgx_test"
pgx-ssl-password: certpw
pgx-test-tls-client-conn-string: "host=localhost user=pgx_sslcert sslmode=verify-full sslrootcert=/tmp/ca.pem sslcert=/tmp/pgx_sslcert.crt sslkey=/tmp/pgx_sslcert.key dbname=pgx_test"
- pg-version: 16
pgx-test-database: "host=127.0.0.1 user=pgx_md5 password=secret dbname=pgx_test"
pgx-test-unix-socket-conn-string: "host=/var/run/postgresql dbname=pgx_test"
pgx-test-tcp-conn-string: "host=127.0.0.1 user=pgx_md5 password=secret dbname=pgx_test"
pgx-test-scram-password-conn-string: "host=127.0.0.1 user=pgx_scram password=secret dbname=pgx_test"
pgx-test-md5-password-conn-string: "host=127.0.0.1 user=pgx_md5 password=secret dbname=pgx_test"
pgx-test-plain-password-conn-string: "host=127.0.0.1 user=pgx_pw password=secret dbname=pgx_test"
pgx-test-tls-conn-string: "host=localhost user=pgx_ssl password=secret sslmode=verify-full sslrootcert=/tmp/ca.pem dbname=pgx_test"
pgx-ssl-password: certpw
pgx-test-tls-client-conn-string: "host=localhost user=pgx_sslcert sslmode=verify-full sslrootcert=/tmp/ca.pem sslcert=/tmp/pgx_sslcert.crt sslkey=/tmp/pgx_sslcert.key dbname=pgx_test"
- pg-version: 17
pgx-test-database: "host=127.0.0.1 user=pgx_md5 password=secret dbname=pgx_test"
pgx-test-unix-socket-conn-string: "host=/var/run/postgresql dbname=pgx_test"
pgx-test-tcp-conn-string: "host=127.0.0.1 user=pgx_md5 password=secret dbname=pgx_test"
pgx-test-scram-password-conn-string: "host=127.0.0.1 user=pgx_scram password=secret dbname=pgx_test"
pgx-test-md5-password-conn-string: "host=127.0.0.1 user=pgx_md5 password=secret dbname=pgx_test"
pgx-test-plain-password-conn-string: "host=127.0.0.1 user=pgx_pw password=secret dbname=pgx_test"
pgx-test-tls-conn-string: "host=localhost user=pgx_ssl password=secret sslmode=verify-full sslrootcert=/tmp/ca.pem dbname=pgx_test"
pgx-ssl-password: certpw
pgx-test-tls-client-conn-string: "host=localhost user=pgx_sslcert sslmode=verify-full sslrootcert=/tmp/ca.pem sslcert=/tmp/pgx_sslcert.crt sslkey=/tmp/pgx_sslcert.key dbname=pgx_test"
- pg-version: cockroachdb
pgx-test-database: "postgresql://root@127.0.0.1:26257/pgx_test?sslmode=disable&experimental_enable_temp_tables=on"
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v4
- name: Set up Go ${{ matrix.go-version }}
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Setup database server for testing
run: ci/setup_test.bash
env:
PGVERSION: ${{ matrix.pg-version }}
# - name: Setup upterm session
# uses: lhotari/action-upterm@v1
# with:
# ## limits ssh access and adds the ssh public key for the user which triggered the workflow
# limit-access-to-actor: true
# env:
# PGX_TEST_DATABASE: ${{ matrix.pgx-test-database }}
# PGX_TEST_UNIX_SOCKET_CONN_STRING: ${{ matrix.pgx-test-unix-socket-conn-string }}
# PGX_TEST_TCP_CONN_STRING: ${{ matrix.pgx-test-tcp-conn-string }}
# PGX_TEST_SCRAM_PASSWORD_CONN_STRING: ${{ matrix.pgx-test-scram-password-conn-string }}
# PGX_TEST_MD5_PASSWORD_CONN_STRING: ${{ matrix.pgx-test-md5-password-conn-string }}
# PGX_TEST_PLAIN_PASSWORD_CONN_STRING: ${{ matrix.pgx-test-plain-password-conn-string }}
# PGX_TEST_TLS_CONN_STRING: ${{ matrix.pgx-test-tls-conn-string }}
# PGX_SSL_PASSWORD: ${{ matrix.pgx-ssl-password }}
# PGX_TEST_TLS_CLIENT_CONN_STRING: ${{ matrix.pgx-test-tls-client-conn-string }}
- name: Check formatting
run: |
gofmt -l -s -w .
git status
git diff --exit-code
- name: Test
# parallel testing is disabled because somehow parallel testing causes Github Actions to kill the runner.
run: go test -parallel=1 -race ./...
env:
PGX_TEST_DATABASE: ${{ matrix.pgx-test-database }}
PGX_TEST_UNIX_SOCKET_CONN_STRING: ${{ matrix.pgx-test-unix-socket-conn-string }}
PGX_TEST_TCP_CONN_STRING: ${{ matrix.pgx-test-tcp-conn-string }}
PGX_TEST_SCRAM_PASSWORD_CONN_STRING: ${{ matrix.pgx-test-scram-password-conn-string }}
PGX_TEST_MD5_PASSWORD_CONN_STRING: ${{ matrix.pgx-test-md5-password-conn-string }}
PGX_TEST_PLAIN_PASSWORD_CONN_STRING: ${{ matrix.pgx-test-plain-password-conn-string }}
# TestConnectTLS fails. However, it succeeds if I connect to the CI server with upterm and run it. Give up on that test for now.
# PGX_TEST_TLS_CONN_STRING: ${{ matrix.pgx-test-tls-conn-string }}
PGX_SSL_PASSWORD: ${{ matrix.pgx-ssl-password }}
PGX_TEST_TLS_CLIENT_CONN_STRING: ${{ matrix.pgx-test-tls-client-conn-string }}
test-windows:
name: Test Windows
runs-on: windows-latest
strategy:
matrix:
go-version: ["1.23", "1.24"]
steps:
- name: Setup PostgreSQL
id: postgres
uses: ikalnytskyi/action-setup-postgres@v4
with:
database: pgx_test
- name: Check out code into the Go module directory
uses: actions/checkout@v4
- name: Set up Go ${{ matrix.go-version }}
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Initialize test database
run: |
psql -f testsetup/postgresql_setup.sql pgx_test
env:
PGSERVICE: ${{ steps.postgres.outputs.service-name }}
shell: bash
- name: Test
# parallel testing is disabled because somehow parallel testing causes Github Actions to kill the runner.
run: go test -parallel=1 -race ./...
env:
PGX_TEST_DATABASE: ${{ steps.postgres.outputs.connection-uri }}

3
.gitignore vendored
View File

@ -22,3 +22,6 @@ _testmain.go
*.exe
.envrc
/.testdb
.DS_Store

21
.golangci.yml Normal file
View File

@ -0,0 +1,21 @@
# See for configurations: https://golangci-lint.run/usage/configuration/
version: 2
# See: https://golangci-lint.run/usage/formatters/
formatters:
default: none
enable:
- gofmt # https://pkg.go.dev/cmd/gofmt
- gofumpt # https://github.com/mvdan/gofumpt
settings:
gofmt:
simplify: true # Simplify code: gofmt with `-s` option.
gofumpt:
# Module path which contains the source code being formatted.
# Default: ""
module-path: github.com/jackc/pgx/v5 # Should match with module in go.mod
# Choose whether to use the extra rules.
# Default: false
extra-rules: true

View File

@ -1,33 +0,0 @@
language: go
go:
- 1.15.x
- 1.14.x
- tip
# Derived from https://github.com/lib/pq/blob/master/.travis.yml
before_install:
- ./travis/before_install.bash
env:
global:
- GO111MODULE=on
- PGX_TEST_DATABASE=postgres://pgx_md5:secret@127.0.0.1/pgx_test
matrix:
- CRATEVERSION=2.1 PGX_TEST_CRATEDB_CONN_STRING="host=127.0.0.1 port=6543 user=pgx database=pgx_test"
- PGVERSION=12
- PGVERSION=11
- PGVERSION=10
- PGVERSION=9.6
- PGVERSION=9.5
before_script:
- ./travis/before_script.bash
script:
- ./travis/script.bash
matrix:
allow_failures:
- go: tip

View File

@ -1,165 +1,462 @@
# 4.9.2 (November 3, 2020)
# 5.7.5 (May 17, 2025)
The underlying library updates fix an issue where appending to a scanned slice could corrupt other data.
* Support sslnegotiation connection option (divyam234)
* Update golang.org/x/crypto to v0.37.0. This placates security scanners that were unable to see that pgx did not use the behavior affected by https://pkg.go.dev/vuln/GO-2025-3487.
* TraceLog now logs Acquire and Release at the debug level (dave sinclair)
* Add support for PGTZ environment variable
* Add support for PGOPTIONS environment variable
* Unpin memory used by Rows quicker
* Remove PlanScan memoization. This resolves a rare issue where scanning could be broken for one type by first scanning another. The problem was in the memoization system and benchmarking revealed that memoization was not providing any meaningful benefit.
* Update pgconn to v1.7.2
* Update pgproto3 to v2.0.6
# 5.7.4 (March 24, 2025)
# 4.9.1 (October 31, 2020)
* Fix / revert change to scanning JSON `null` (Felix Röhrich)
* Update pgconn to v1.7.1
* Update pgtype to v1.6.1
* Fix SendBatch of all prepared statements with statement cache disabled
# 5.7.3 (March 21, 2025)
# 4.9.0 (September 26, 2020)
* Expose EmptyAcquireWaitTime in pgxpool.Stat (vamshiaruru32)
* Improve SQL sanitizer performance (ninedraft)
* Fix Scan confusion with json(b), sql.Scanner, and automatic dereferencing (moukoublen, felix-roehrich)
* Fix Values() for xml type always returning nil instead of []byte
* Add ability to send Flush message in pipeline mode (zenkovev)
* Fix pgtype.Timestamp's JSON behavior to match PostgreSQL (pconstantinou)
* Better error messages when scanning structs (logicbomb)
* Fix handling of error on batch write (bonnefoa)
* Match libpq's connection fallback behavior more closely (felix-roehrich)
* Add MinIdleConns to pgxpool (djahandarie)
* pgxpool now waits for connection cleanup to finish before making room in pool for another connection. This prevents temporarily exceeding max pool size.
* Fix when scanning a column to nil to skip it on the first row but scanning it to a real value on a subsequent row.
* Fix prefer simple protocol with prepared statements. (Jinzhu)
* Fix FieldDescriptions not being available on Rows before calling Next the first time.
* Various minor fixes in updated versions of pgconn, pgtype, and puddle.
# 5.7.2 (December 21, 2024)
# 4.8.1 (July 29, 2020)
* Fix prepared statement already exists on batch prepare failure
* Add commit query to tx options (Lucas Hild)
* Fix pgtype.Timestamp json unmarshal (Shean de Montigny-Desautels)
* Add message body size limits in frontend and backend (zene)
* Add xid8 type
* Ensure planning encodes and scans cannot infinitely recurse
* Implement pgtype.UUID.String() (Konstantin Grachev)
* Switch from ExecParams to Exec in ValidateConnectTargetSessionAttrs functions (Alexander Rumyantsev)
* Update golang.org/x/crypto
* Fix json(b) columns prefer sql.Scanner interface like database/sql (Ludovico Russo)
* Update pgconn to v1.6.4
* Fix deadlock on error after CommandComplete but before ReadyForQuery
* Fix panic on parsing DSN with trailing '='
# 5.7.1 (September 10, 2024)
# 4.8.0 (July 22, 2020)
* Fix data race in tracelog.TraceLog
* Update puddle to v2.2.2. This removes the import of nanotime via linkname.
* Update golang.org/x/crypto and golang.org/x/text
* All argument types supported by native pgx should now also work through database/sql
* Update pgconn to v1.6.3
* Update pgtype to v1.4.2
# 5.7.0 (September 7, 2024)
# 4.7.2 (July 14, 2020)
* Add support for sslrootcert=system (Yann Soubeyrand)
* Add LoadTypes to load multiple types in a single SQL query (Nick Farrell)
* Add XMLCodec supports encoding + scanning XML column type like json (nickcruess-soda)
* Add MultiTrace (Stepan Rabotkin)
* Add TraceLogConfig with customizable TimeKey (stringintech)
* pgx.ErrNoRows wraps sql.ErrNoRows to aid in database/sql compatibility with native pgx functions (merlin)
* Support scanning binary formatted uint32 into string / TextScanner (jennifersp)
* Fix interval encoding to allow 0s and avoid extra spaces (Carlos Pérez-Aradros Herce)
* Update pgservicefile - fixes panic when parsing invalid file
* Better error message when reading past end of batch
* Don't print url when url.Parse returns an error (Kevin Biju)
* Fix snake case name normalization collision in RowToStructByName with db tag (nolandseigler)
* Fix: Scan and encode types with underlying types of arrays
* Improve performance of Columns() (zikaeroh)
* Fix fatal Commit() failure not being considered fatal
* Update pgconn to v1.6.2
* Update pgtype to v1.4.1
# 5.6.0 (May 25, 2024)
# 4.7.1 (June 29, 2020)
* Add StrictNamedArgs (Tomas Zahradnicek)
* Add support for macaddr8 type (Carlos Pérez-Aradros Herce)
* Add SeverityUnlocalized field to PgError / Notice
* Performance optimization of RowToStructByPos/Name (Zach Olstein)
* Allow customizing context canceled behavior for pgconn
* Add ScanLocation to pgtype.Timestamp[tz]Codec
* Add custom data to pgconn.PgConn
* Fix ResultReader.Read() to handle nil values
* Do not encode interval microseconds when they are 0 (Carlos Pérez-Aradros Herce)
* pgconn.SafeToRetry checks for wrapped errors (tjasko)
* Failed connection attempts include all errors
* Optimize LargeObject.Read (Mitar)
* Add tracing for connection acquire and release from pool (ngavinsir)
* Fix encode driver.Valuer not called when nil
* Add support for custom JSON marshal and unmarshal (Mitar)
* Use Go default keepalive for TCP connections (Hans-Joachim Kliemeck)
* Fix stdlib decoding error with certain order and combination of fields
# 5.5.5 (March 9, 2024)
# 4.7.0 (June 27, 2020)
Use spaces instead of parentheses for SQL sanitization.
* Update pgtype to v1.4.0
* Update pgconn to v1.6.1
* Update puddle to v1.1.1
* Fix context propagation with Tx commit and Rollback (georgysavva)
* Add lazy connect option to pgxpool (georgysavva)
* Fix connection leak if pgxpool.BeginTx() fail (Jean-Baptiste Bronisz)
* Add native Go slice support for strings and numbers to simple protocol
* stdlib add default timeouts for Conn.Close() and Stmt.Close() (georgysavva)
* Assorted performance improvements especially with large result sets
* Fix close pool on not lazy connect failure (Yegor Myskin)
* Add Config copy (georgysavva)
* Support SendBatch with Simple Protocol (Jordan Lewis)
* Better error logging on rows close (Igor V. Kozinov)
* Expose stdlib.Conn.Conn() to enable database/sql.Conn.Raw()
* Improve unknown type support for database/sql
* Fix transaction commit failure closing connection
This still solves the problem of negative numbers creating a line comment, but this avoids breaking edge cases such as
`set foo to $1` where the substitution is taking place in a location where an arbitrary expression is not allowed.
# 4.6.0 (March 30, 2020)
# 5.5.4 (March 4, 2024)
* stdlib: Bail early if preloading rows.Next() results in rows.Err() (Bas van Beek)
* Sanitize time to microsecond accuracy (Andrew Nicoll)
* Update pgtype to v1.3.0
* Update pgconn to v1.5.0
* Update golang.org/x/crypto for security fix
* Implement "verify-ca" SSL mode
Fix CVE-2024-27304
# 4.5.0 (March 7, 2020)
SQL injection can occur if an attacker can cause a single query or bind message to exceed 4 GB in size. An integer
overflow in the calculated message size can cause the one large message to be sent as multiple messages under the
attacker's control.
* Update to pgconn v1.4.0
* Fixes QueryRow with empty SQL
* Adds PostgreSQL service file support
* Add Len() to *pgx.Batch (WGH)
* Better logging for individual batch items (Ben Bader)
Thanks to Paul Gerste for reporting this issue.
# 4.4.1 (February 14, 2020)
* Fix behavior of CollectRows to return empty slice if Rows are empty (Felix)
* Fix simple protocol encoding of json.RawMessage
* Fix *Pipeline.getResults should close pipeline on error
* Fix panic in TryFindUnderlyingTypeScanPlan (David Kurman)
* Fix deallocation of invalidated cached statements in a transaction
* Handle invalid sslkey file
* Fix scan float4 into sql.Scanner
* Fix pgtype.Bits not making copy of data from read buffer. This would cause the data to be corrupted by future reads.
* Update pgconn to v1.3.2 - better default read buffer size
* Fix race in CopyFrom
# 5.5.3 (February 3, 2024)
# 4.4.0 (February 5, 2020)
* Fix: prepared statement already exists
* Improve CopyFrom auto-conversion of text-ish values
* Add ltree type support (Florent Viel)
* Make some properties of Batch and QueuedQuery public (Pavlo Golub)
* Add AppendRows function (Edoardo Spadolini)
* Optimize convert UUID [16]byte to string (Kirill Malikov)
* Fix: LargeObject Read and Write of more than ~1GB at a time (Mitar)
* Update puddle to v1.1.0 - fixes possible deadlock when acquire is cancelled
* Update pgconn to v1.3.1 - fixes CopyFrom deadlock when multiple NoticeResponse received during copy
* Update pgtype to v1.2.0
* Add MaxConnIdleTime to pgxpool (Patrick Ellul)
* Add MinConns to pgxpool (Patrick Ellul)
* Fix: stdlib.ReleaseConn closes connections left in invalid state
# 5.5.2 (January 13, 2024)
# 4.3.0 (January 23, 2020)
* Allow NamedArgs to start with underscore
* pgproto3: Maximum message body length support (jeremy.spriet)
* Upgrade golang.org/x/crypto to v0.17.0
* Add snake_case support to RowToStructByName (Tikhon Fedulov)
* Fix: update description cache after exec prepare (James Hartig)
* Fix: pipeline checks if it is closed (James Hartig and Ryan Fowler)
* Fix: normalize timeout / context errors during TLS startup (Samuel Stauffer)
* Add OnPgError for easier centralized error handling (James Hartig)
* Fix Rows.Values panic when unable to decode
* Add Rows.Values support for unknown types
* Add DriverContext support for stdlib (Alex Gaynor)
* Update pgproto3 to v2.0.1 to never return an io.EOF as it would be misinterpreted by database/sql. Instead return io.UnexpectedEOF.
# 5.5.1 (December 9, 2023)
# 4.2.1 (January 13, 2020)
* Add CopyFromFunc helper function. (robford)
* Add PgConn.Deallocate method that uses PostgreSQL protocol Close message.
* pgx uses new PgConn.Deallocate method. This allows deallocating statements to work in a failed transaction. This fixes a case where the prepared statement map could become invalid.
* Fix: Prefer driver.Valuer over json.Marshaler for json fields. (Jacopo)
* Fix: simple protocol SQL sanitizer previously panicked if an invalid $0 placeholder was used. This now returns an error instead. (maksymnevajdev)
* Add pgtype.Numeric.ScanScientific (Eshton Robateau)
* Update pgconn to v1.2.1 (fixes context cancellation data race introduced in v1.2.0))
# 5.5.0 (November 4, 2023)
# 4.2.0 (January 11, 2020)
* Add CollectExactlyOneRow. (Julien GOTTELAND)
* Add OpenDBFromPool to create *database/sql.DB from *pgxpool.Pool. (Lev Zakharov)
* Prepare can automatically choose statement name based on sql. This makes it easier to explicitly manage prepared statements.
* Statement cache now uses deterministic, stable statement names.
* database/sql prepared statement names are deterministically generated.
* Fix: SendBatch wasn't respecting context cancellation.
* Fix: Timeout error from pipeline is now normalized.
* Fix: database/sql encoding json.RawMessage to []byte.
* CancelRequest: Wait for the cancel request to be acknowledged by the server. This should improve PgBouncer compatibility. (Anton Levakin)
* stdlib: Use Ping instead of CheckConn in ResetSession
* Add json.Marshaler and json.Unmarshaler for Float4, Float8 (Kirill Mironov)
* Update pgconn to v1.2.0.
* Update pgtype to v1.1.0.
* Return error instead of panic when wrong number of arguments passed to Exec. (malstoun)
* Fix large objects functionality when PreferSimpleProtocol = true.
* Restore GetDefaultDriver which existed in v3. (Johan Brandhorst)
* Add RegisterConnConfig to stdlib which replaces the removed RegisterDriverConfig from v3.
# 5.4.3 (August 5, 2023)
# 4.1.2 (October 22, 2019)
* Fix: QCharArrayOID was defined with the wrong OID (Christoph Engelbert)
* Fix: connect_timeout for sslmode=allow|prefer (smaher-edb)
* Fix: pgxpool: background health check cannot overflow pool
* Fix: Check for nil in defer when sending batch (recover properly from panic)
* Fix: json scan of non-string pointer to pointer
* Fix: zeronull.Timestamptz should use pgtype.Timestamptz
* Fix: NewConnsCount was not correctly counting connections created by Acquire directly. (James Hartig)
* RowTo(AddrOf)StructByPos ignores fields with "-" db tag
* Optimization: improve text format numeric parsing (horpto)
* Fix dbSavepoint.Begin recursive self call
* Upgrade pgtype to v1.0.2 - fix scan pointer to pointer
# 5.4.2 (July 11, 2023)
# 4.1.1 (October 21, 2019)
* Fix: RowScanner errors are fatal to Rows
* Fix: Enable failover efforts when pg_hba.conf disallows non-ssl connections (Brandon Kauffman)
* Hstore text codec internal improvements (Evan Jones)
* Fix: Stop timers for background reader when not in use. Fixes memory leak when closing connections (Adrian-Stefan Mares)
* Fix: Stop background reader as soon as possible.
* Add PgConn.SyncConn(). This combined with the above fix makes it safe to directly use the underlying net.Conn.
* Fix pgxpool Rows.CommandTag() infinite loop / typo
# 5.4.1 (June 18, 2023)
# 4.1.0 (October 12, 2019)
* Fix: concurrency bug with pgtypeDefaultMap and simple protocol (Lev Zakharov)
* Add TxOptions.BeginQuery to allow overriding the default BEGIN query
## Potentially Breaking Changes
# 5.4.0 (June 14, 2023)
Technically, two changes are breaking changes, but in practice these are extremely unlikely to break existing code.
* Replace platform specific syscalls for non-blocking IO with more traditional goroutines and deadlines. This returns to the v4 approach with some additional improvements and fixes. This restores the ability to use a pgx.Conn over an ssh.Conn as well as other non-TCP or Unix socket connections. In addition, it is a significantly simpler implementation that is less likely to have cross platform issues.
* Optimization: The default type registrations are now shared among all connections. This saves about 100KB of memory per connection. `pgtype.Type` and `pgtype.Codec` values are now required to be immutable after registration. This was already necessary in most cases but wasn't documented until now. (Lev Zakharov)
* Fix: Ensure pgxpool.Pool.QueryRow.Scan releases connection on panic
* CancelRequest: don't try to read the reply (Nicola Murino)
* Fix: correctly handle bool type aliases (Wichert Akkerman)
* Fix: pgconn.CancelRequest: Fix unix sockets: don't use RemoteAddr()
* Fix: pgx.Conn memory leak with prepared statement caching (Evan Jones)
* Add BeforeClose to pgxpool.Pool (Evan Cordell)
* Fix: various hstore fixes and optimizations (Evan Jones)
* Fix: RowToStructByPos with embedded unexported struct
* Support different bool string representations (Lev Zakharov)
* Fix: error when using BatchResults.Exec on a select that returns an error after some rows.
* Fix: pipelineBatchResults.Exec() not returning error from ResultReader
* Fix: pipeline batch results not closing pipeline when error occurs while reading directly from results instead of using
a callback.
* Fix: scanning a table type into a struct
* Fix: scan array of record to pointer to slice of struct
* Fix: handle null for json (Cemre Mengu)
* Batch Query callback is called even when there is an error
* Add RowTo(AddrOf)StructByNameLax (Audi P. Risa P)
* Conn.Begin and Conn.BeginTx return a Tx interface instead of the internal dbTx struct. This is necessary for the Conn.Begin method to signature as other methods that begin a transaction.
* Add Conn() to Tx interface. This is necessary to allow code using a Tx to access the *Conn (and pgconn.PgConn) on which the Tx is executing.
# 5.3.1 (February 27, 2023)
## Fixes
* Fix: Support v4 and v5 stdlib in same program (Tomáš Procházka)
* Fix: sql.Scanner not being used in certain cases
* Add text format jsonpath support
* Fix: fake non-blocking read adaptive wait time
* Releasing a busy connection closes the connection instead of returning an unusable connection to the pool
* Do not mutate config.Config.OnNotification in connect
# 5.3.0 (February 11, 2023)
# 4.0.1 (September 19, 2019)
* Fix: json values work with sql.Scanner
* Fixed / improved error messages (Mark Chambers and Yevgeny Pats)
* Fix: support scan into single dimensional arrays
* Fix: MaxConnLifetimeJitter setting actually jitter (Ben Weintraub)
* Fix: driver.Value representation of bytea should be []byte not string
* Fix: better handling of unregistered OIDs
* CopyFrom can use query cache to avoid extra round trip to get OIDs (Alejandro Do Nascimento Mora)
* Fix: encode to json ignoring driver.Valuer
* Support sql.Scanner on renamed base type
* Fix: pgtype.Numeric text encoding of negative numbers (Mark Chambers)
* Fix: connect with multiple hostnames when one can't be resolved
* Upgrade puddle to remove dependency on uber/atomic and fix alignment issue on 32-bit platform
* Fix: scanning json column into **string
* Multiple reductions in memory allocations
* Fake non-blocking read adapts its max wait time
* Improve CopyFrom performance and reduce memory usage
* Fix: encode []any to array
* Fix: LoadType for composite with dropped attributes (Felix Röhrich)
* Support v4 and v5 stdlib in same program
* Fix: text format array decoding with string of "NULL"
* Prefer binary format for arrays
* Fix statement cache cleanup.
* Corrected daterange OID.
* Fix Tx when committing or rolling back multiple times in certain cases.
* Improve documentation.
# 5.2.0 (December 5, 2022)
# 4.0.0 (September 14, 2019)
* `tracelog.TraceLog` implements the pgx.PrepareTracer interface. (Vitalii Solodilov)
* Optimize creating begin transaction SQL string (Petr Evdokimov and ksco)
* `Conn.LoadType` supports range and multirange types (Vitalii Solodilov)
* Fix scan `uint` and `uint64` `ScanNumeric`. This resolves a PostgreSQL `numeric` being incorrectly scanned into `uint` and `uint64`.
v4 is a major release with many significant changes some of which are breaking changes. The most significant are
included below.
# 5.1.1 (November 17, 2022)
* Simplified establishing a connection with a connection string.
* All potentially blocking operations now require a context.Context. The non-context aware functions have been removed.
* OIDs are hard-coded for known types. This saves the query on connection.
* Context cancellations while network activity is in progress is now always fatal. Previously, it was sometimes recoverable. This led to increased complexity in pgx itself and in application code.
* Go modules are required.
* Errors are now implemented in the Go 1.13 style.
* `Rows` and `Tx` are now interfaces.
* The connection pool as been decoupled from pgx and is now a separate, included package (github.com/jackc/pgx/v4/pgxpool).
* pgtype has been spun off to a separate package (github.com/jackc/pgtype).
* pgproto3 has been spun off to a separate package (github.com/jackc/pgproto3/v2).
* Logical replication support has been spun off to a separate package (github.com/jackc/pglogrepl).
* Lower level PostgreSQL functionality is now implemented in a separate package (github.com/jackc/pgconn).
* Tests are now configured with environment variables.
* Conn has an automatic statement cache by default.
* Batch interface has been simplified.
* QueryArgs has been removed.
* Fix simple query sanitizer where query text contains a Unicode replacement character.
* Remove erroneous `name` argument from `DeallocateAll()`. Technically, this is a breaking change, but given that method was only added 5 days ago this change was accepted. (Bodo Kaiser)
# 5.1.0 (November 12, 2022)
* Update puddle to v2.1.2. This resolves a race condition and a deadlock in pgxpool.
* `QueryRewriter.RewriteQuery` now returns an error. Technically, this is a breaking change for any external implementers, but given the minimal likelihood that there are actually any external implementers this change was accepted.
* Expose `GetSSLPassword` support to pgx.
* Fix encode `ErrorResponse` unknown field handling. This would only affect pgproto3 being used directly as a proxy with a non-PostgreSQL server that included additional error fields.
* Fix date text format encoding with 5 digit years.
* Fix date values passed to a `sql.Scanner` as `string` instead of `time.Time`.
* DateCodec.DecodeValue can return `pgtype.InfinityModifier` instead of `string` for infinite values. This now matches the behavior of the timestamp types.
* Add domain type support to `Conn.LoadType()`.
* Add `RowToStructByName` and `RowToAddrOfStructByName`. (Pavlo Golub)
* Add `Conn.DeallocateAll()` to clear all prepared statements including the statement cache. (Bodo Kaiser)
# 5.0.4 (October 24, 2022)
* Fix: CollectOneRow prefers PostgreSQL error over pgx.ErrorNoRows
* Fix: some reflect Kind checks to first check for nil
* Bump golang.org/x/text dependency to placate snyk
* Fix: RowToStructByPos on structs with multiple anonymous sub-structs (Baptiste Fontaine)
* Fix: Exec checks if tx is closed
# 5.0.3 (October 14, 2022)
* Fix `driver.Valuer` handling edge cases that could cause infinite loop or crash
# v5.0.2 (October 8, 2022)
* Fix date encoding in text format to always use 2 digits for month and day
* Prefer driver.Valuer over wrap plans when encoding
* Fix scan to pointer to pointer to renamed type
* Allow scanning NULL even if PG and Go types are incompatible
# v5.0.1 (September 24, 2022)
* Fix 32-bit atomic usage
* Add MarshalJSON for Float8 (yogipristiawan)
* Add `[` and `]` to text encoding of `Lseg`
* Fix sqlScannerWrapper NULL handling
# v5.0.0 (September 17, 2022)
## Merged Packages
`github.com/jackc/pgtype`, `github.com/jackc/pgconn`, and `github.com/jackc/pgproto3` are now included in the main
`github.com/jackc/pgx` repository. Previously there was confusion as to where issues should be reported, additional
release work due to releasing multiple packages, and less clear changelogs.
## pgconn
`CommandTag` is now an opaque type instead of directly exposing an underlying `[]byte`.
The return value `ResultReader.Values()` is no longer safe to retain a reference to after a subsequent call to `NextRow()` or `Close()`.
`Trace()` method adds low level message tracing similar to the `PQtrace` function in `libpq`.
pgconn now uses non-blocking IO. This is a significant internal restructuring, but it should not cause any visible changes on its own. However, it is important in implementing other new features.
`CheckConn()` checks a connection's liveness by doing a non-blocking read. This can be used to detect database restarts or network interruptions without executing a query or a ping.
pgconn now supports pipeline mode.
`*PgConn.ReceiveResults` removed. Use pipeline mode instead.
`Timeout()` no longer considers `context.Canceled` as a timeout error. `context.DeadlineExceeded` still is considered a timeout error.
## pgxpool
`Connect` and `ConnectConfig` have been renamed to `New` and `NewWithConfig` respectively. The `LazyConnect` option has been removed. Pools always lazily connect.
## pgtype
The `pgtype` package has been significantly changed.
### NULL Representation
Previously, types had a `Status` field that could be `Undefined`, `Null`, or `Present`. This has been changed to a
`Valid` `bool` field to harmonize with how `database/sql` represents `NULL` and to make the zero value useable.
Previously, a type that implemented `driver.Valuer` would have the `Value` method called even on a nil pointer. All nils
whether typed or untyped now represent `NULL`.
### Codec and Value Split
Previously, the type system combined decoding and encoding values with the value types. e.g. Type `Int8` both handled
encoding and decoding the PostgreSQL representation and acted as a value object. This caused some difficulties when
there was not an exact 1 to 1 relationship between the Go types and the PostgreSQL types For example, scanning a
PostgreSQL binary `numeric` into a Go `float64` was awkward (see https://github.com/jackc/pgtype/issues/147). This
concepts have been separated. A `Codec` only has responsibility for encoding and decoding values. Value types are
generally defined by implementing an interface that a particular `Codec` understands (e.g. `PointScanner` and
`PointValuer` for the PostgreSQL `point` type).
### Array Types
All array types are now handled by `ArrayCodec` instead of using code generation for each new array type. This also
means that less common array types such as `point[]` are now supported. `Array[T]` supports PostgreSQL multi-dimensional
arrays.
### Composite Types
Composite types must be registered before use. `CompositeFields` may still be used to construct and destruct composite
values, but any type may now implement `CompositeIndexGetter` and `CompositeIndexScanner` to be used as a composite.
### Range Types
Range types are now handled with types `RangeCodec` and `Range[T]`. This allows additional user defined range types to
easily be handled. Multirange types are handled similarly with `MultirangeCodec` and `Multirange[T]`.
### pgxtype
`LoadDataType` moved to `*Conn` as `LoadType`.
### Bytea
The `Bytea` and `GenericBinary` types have been replaced. Use the following instead:
* `[]byte` - For normal usage directly use `[]byte`.
* `DriverBytes` - Uses driver memory only available until next database method call. Avoids a copy and an allocation.
* `PreallocBytes` - Uses preallocated byte slice to avoid an allocation.
* `UndecodedBytes` - Avoids any decoding. Allows working with raw bytes.
### Dropped lib/pq Support
`pgtype` previously supported and was tested against [lib/pq](https://github.com/lib/pq). While it will continue to work
in most cases this is no longer supported.
### database/sql Scan
Previously, most `Scan` implementations would convert `[]byte` to `string` automatically to decode a text value. Now
only `string` is handled. This is to allow the possibility of future binary support in `database/sql` mode by
considering `[]byte` to be binary format and `string` text format. This change should have no effect for any use with
`pgx`. The previous behavior was only necessary for `lib/pq` compatibility.
Added `*Map.SQLScanner` to create a `sql.Scanner` for types such as `[]int32` and `Range[T]` that do not implement
`sql.Scanner` directly.
### Number Type Fields Include Bit size
`Int2`, `Int4`, `Int8`, `Float4`, `Float8`, and `Uint32` fields now include bit size. e.g. `Int` is renamed to `Int64`.
This matches the convention set by `database/sql`. In addition, for comparable types like `pgtype.Int8` and
`sql.NullInt64` the structures are identical. This means they can be directly converted one to another.
### 3rd Party Type Integrations
* Extracted integrations with https://github.com/shopspring/decimal and https://github.com/gofrs/uuid to
https://github.com/jackc/pgx-shopspring-decimal and https://github.com/jackc/pgx-gofrs-uuid respectively. This trims
the pgx dependency tree.
### Other Changes
* `Bit` and `Varbit` are both replaced by the `Bits` type.
* `CID`, `OID`, `OIDValue`, and `XID` are replaced by the `Uint32` type.
* `Hstore` is now defined as `map[string]*string`.
* `JSON` and `JSONB` types removed. Use `[]byte` or `string` directly.
* `QChar` type removed. Use `rune` or `byte` directly.
* `Inet` and `Cidr` types removed. Use `netip.Addr` and `netip.Prefix` directly. These types are more memory efficient than the previous `net.IPNet`.
* `Macaddr` type removed. Use `net.HardwareAddr` directly.
* Renamed `pgtype.ConnInfo` to `pgtype.Map`.
* Renamed `pgtype.DataType` to `pgtype.Type`.
* Renamed `pgtype.None` to `pgtype.Finite`.
* `RegisterType` now accepts a `*Type` instead of `Type`.
* Assorted array helper methods and types made private.
## stdlib
* Removed `AcquireConn` and `ReleaseConn` as that functionality has been built in since Go 1.13.
## Reduced Memory Usage by Reusing Read Buffers
Previously, the connection read buffer would allocate large chunks of memory and never reuse them. This allowed
transferring ownership to anything such as scanned values without incurring an additional allocation and memory copy.
However, this came at the cost of overall increased memory allocation size. But worse it was also possible to pin large
chunks of memory by retaining a reference to a small value that originally came directly from the read buffer. Now
ownership remains with the read buffer and anything needing to retain a value must make a copy.
## Query Execution Modes
Control over automatic prepared statement caching and simple protocol use are now combined into query execution mode.
See documentation for `QueryExecMode`.
## QueryRewriter Interface and NamedArgs
pgx now supports named arguments with the `NamedArgs` type. This is implemented via the new `QueryRewriter` interface which
allows arbitrary rewriting of query SQL and arguments.
## RowScanner Interface
The `RowScanner` interface allows a single argument to Rows.Scan to scan the entire row.
## Rows Result Helpers
* `CollectRows` and `RowTo*` functions simplify collecting results into a slice.
* `CollectOneRow` collects one row using `RowTo*` functions.
* `ForEachRow` simplifies scanning each row and executing code using the scanned values. `ForEachRow` replaces `QueryFunc`.
## Tx Helpers
Rather than every type that implemented `Begin` or `BeginTx` methods also needing to implement `BeginFunc` and
`BeginTxFunc` these methods have been converted to functions that take a db that implements `Begin` or `BeginTx`.
## Improved Batch Query Ergonomics
Previously, the code for building a batch went in one place before the call to `SendBatch`, and the code for reading the
results went in one place after the call to `SendBatch`. This could make it difficult to match up the query and the code
to handle the results. Now `Queue` returns a `QueuedQuery` which has methods `Query`, `QueryRow`, and `Exec` which can
be used to register a callback function that will handle the result. Callback functions are called automatically when
`BatchResults.Close` is called.
## SendBatch Uses Pipeline Mode When Appropriate
Previously, a batch with 10 unique parameterized statements executed 100 times would entail 11 network round trips. 1
for each prepare / describe and 1 for executing them all. Now pipeline mode is used to prepare / describe all statements
in a single network round trip. So it would only take 2 round trips.
## Tracing and Logging
Internal logging support has been replaced with tracing hooks. This allows custom tracing integration with tools like OpenTelemetry. Package tracelog provides an adapter for pgx v4 loggers to act as a tracer.
All integrations with 3rd party loggers have been extracted to separate repositories. This trims the pgx dependency
tree.

121
CONTRIBUTING.md Normal file
View File

@ -0,0 +1,121 @@
# Contributing
## Discuss Significant Changes
Before you invest a significant amount of time on a change, please create a discussion or issue describing your
proposal. This will help to ensure your proposed change has a reasonable chance of being merged.
## Avoid Dependencies
Adding a dependency is a big deal. While on occasion a new dependency may be accepted, the default answer to any change
that adds a dependency is no.
## Development Environment Setup
pgx tests naturally require a PostgreSQL database. It will connect to the database specified in the `PGX_TEST_DATABASE`
environment variable. The `PGX_TEST_DATABASE` environment variable can either be a URL or key-value pairs. In addition,
the standard `PG*` environment variables will be respected. Consider using [direnv](https://github.com/direnv/direnv) to
simplify environment variable handling.
### Using an Existing PostgreSQL Cluster
If you already have a PostgreSQL development server this is the quickest way to start and run the majority of the pgx
test suite. Some tests will be skipped that require server configuration changes (e.g. those testing different
authentication methods).
Create and setup a test database:
```
export PGDATABASE=pgx_test
createdb
psql -c 'create extension hstore;'
psql -c 'create extension ltree;'
psql -c 'create domain uint64 as numeric(20,0);'
```
Ensure a `postgres` user exists. This happens by default in normal PostgreSQL installs, but some installation methods
such as Homebrew do not.
```
createuser -s postgres
```
Ensure your `PGX_TEST_DATABASE` environment variable points to the database you just created and run the tests.
```
export PGX_TEST_DATABASE="host=/private/tmp database=pgx_test"
go test ./...
```
This will run the vast majority of the tests, but some tests will be skipped (e.g. those testing different connection methods).
### Creating a New PostgreSQL Cluster Exclusively for Testing
The following environment variables need to be set both for initial setup and whenever the tests are run. (direnv is
highly recommended). Depending on your platform, you may need to change the host for `PGX_TEST_UNIX_SOCKET_CONN_STRING`.
```
export PGPORT=5015
export PGUSER=postgres
export PGDATABASE=pgx_test
export POSTGRESQL_DATA_DIR=postgresql
export PGX_TEST_DATABASE="host=127.0.0.1 database=pgx_test user=pgx_md5 password=secret"
export PGX_TEST_UNIX_SOCKET_CONN_STRING="host=/private/tmp database=pgx_test"
export PGX_TEST_TCP_CONN_STRING="host=127.0.0.1 database=pgx_test user=pgx_md5 password=secret"
export PGX_TEST_SCRAM_PASSWORD_CONN_STRING="host=127.0.0.1 user=pgx_scram password=secret database=pgx_test"
export PGX_TEST_MD5_PASSWORD_CONN_STRING="host=127.0.0.1 database=pgx_test user=pgx_md5 password=secret"
export PGX_TEST_PLAIN_PASSWORD_CONN_STRING="host=127.0.0.1 user=pgx_pw password=secret"
export PGX_TEST_TLS_CONN_STRING="host=localhost user=pgx_ssl password=secret sslmode=verify-full sslrootcert=`pwd`/.testdb/ca.pem"
export PGX_SSL_PASSWORD=certpw
export PGX_TEST_TLS_CLIENT_CONN_STRING="host=localhost user=pgx_sslcert sslmode=verify-full sslrootcert=`pwd`/.testdb/ca.pem database=pgx_test sslcert=`pwd`/.testdb/pgx_sslcert.crt sslkey=`pwd`/.testdb/pgx_sslcert.key"
```
Create a new database cluster.
```
initdb --locale=en_US -E UTF-8 --username=postgres .testdb/$POSTGRESQL_DATA_DIR
echo "listen_addresses = '127.0.0.1'" >> .testdb/$POSTGRESQL_DATA_DIR/postgresql.conf
echo "port = $PGPORT" >> .testdb/$POSTGRESQL_DATA_DIR/postgresql.conf
cat testsetup/postgresql_ssl.conf >> .testdb/$POSTGRESQL_DATA_DIR/postgresql.conf
cp testsetup/pg_hba.conf .testdb/$POSTGRESQL_DATA_DIR/pg_hba.conf
cd .testdb
# Generate CA, server, and encrypted client certificates.
go run ../testsetup/generate_certs.go
# Copy certificates to server directory and set permissions.
cp ca.pem $POSTGRESQL_DATA_DIR/root.crt
cp localhost.key $POSTGRESQL_DATA_DIR/server.key
chmod 600 $POSTGRESQL_DATA_DIR/server.key
cp localhost.crt $POSTGRESQL_DATA_DIR/server.crt
cd ..
```
Start the new cluster. This will be necessary whenever you are running pgx tests.
```
postgres -D .testdb/$POSTGRESQL_DATA_DIR
```
Setup the test database in the new cluster.
```
createdb
psql --no-psqlrc -f testsetup/postgresql_setup.sql
```
### PgBouncer
There are tests specific for PgBouncer that will be executed if `PGX_TEST_PGBOUNCER_CONN_STRING` is set.
### Optional Tests
pgx supports multiple connection types and means of authentication. These tests are optional. They will only run if the
appropriate environment variables are set. In addition, there may be tests specific to particular PostgreSQL versions,
non-PostgreSQL servers (e.g. CockroachDB), or connection poolers (e.g. PgBouncer). `go test ./... -v | grep SKIP` to see
if any tests are being skipped.

View File

@ -1,4 +1,4 @@
Copyright (c) 2013 Jack Christensen
Copyright (c) 2013-2021 Jack Christensen
MIT License
@ -19,4 +19,4 @@ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

197
README.md
View File

@ -1,20 +1,17 @@
[![](https://godoc.org/github.com/jackc/pgx?status.svg)](https://pkg.go.dev/github.com/jackc/pgx/v4)
[![Build Status](https://travis-ci.org/jackc/pgx.svg)](https://travis-ci.org/jackc/pgx)
[![Go Reference](https://pkg.go.dev/badge/github.com/jackc/pgx/v5.svg)](https://pkg.go.dev/github.com/jackc/pgx/v5)
[![Build Status](https://github.com/jackc/pgx/actions/workflows/ci.yml/badge.svg)](https://github.com/jackc/pgx/actions/workflows/ci.yml)
# pgx - PostgreSQL Driver and Toolkit
pgx is a pure Go driver and toolkit for PostgreSQL.
pgx aims to be low-level, fast, and performant, while also enabling PostgreSQL-specific features that the standard `database/sql` package does not allow for.
The driver component of pgx can be used alongside the standard `database/sql` package.
The pgx driver is a low-level, high performance interface that exposes PostgreSQL-specific features such as `LISTEN` /
`NOTIFY` and `COPY`. It also includes an adapter for the standard `database/sql` interface.
The toolkit component is a related set of packages that implement PostgreSQL functionality such as parsing the wire protocol
and type mapping between PostgreSQL and Go. These underlying packages can be used to implement alternative drivers,
proxies, load balancers, logical replication clients, etc.
The current release of `pgx v4` requires Go modules. To use the previous version, checkout and vendor the `v3` branch.
## Example Usage
```go
@ -25,10 +22,11 @@ import (
"fmt"
"os"
"github.com/jackc/pgx/v4"
"github.com/jackc/pgx/v5"
)
func main() {
// urlExample := "postgres://username:password@localhost:5432/database_name"
conn, err := pgx.Connect(context.Background(), os.Getenv("DATABASE_URL"))
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to connect to database: %v\n", err)
@ -50,135 +48,58 @@ func main() {
See the [getting started guide](https://github.com/jackc/pgx/wiki/Getting-started-with-pgx) for more information.
## Choosing Between the pgx and database/sql Interfaces
It is recommended to use the pgx interface if:
1. The application only targets PostgreSQL.
2. No other libraries that require `database/sql` are in use.
The pgx interface is faster and exposes more features.
The `database/sql` interface only allows the underlying driver to return or receive the following types: `int64`,
`float64`, `bool`, `[]byte`, `string`, `time.Time`, or `nil`. Handling other types requires implementing the
`database/sql.Scanner` and the `database/sql/driver/driver.Valuer` interfaces which require transmission of values in text format. The binary format can be substantially faster, which is what the pgx interface uses.
## Features
pgx supports many features beyond what is available through `database/sql`:
* Support for approximately 70 different PostgreSQL types
* Automatic statement preparation and caching
* Batch queries
* Single-round trip query mode
* Full TLS connection control
* Binary format support for custom types (allows for much quicker encoding/decoding)
* Copy protocol support for faster bulk data loads
* Extendable logging support including built-in support for `log15adapter`, [`logrus`](https://github.com/sirupsen/logrus), [`zap`](https://github.com/uber-go/zap), and [`zerolog`](https://github.com/rs/zerolog)
* `COPY` protocol support for faster bulk data loads
* Tracing and logging support
* Connection pool with after-connect hook for arbitrary connection setup
* Listen / notify
* `LISTEN` / `NOTIFY`
* Conversion of PostgreSQL arrays to Go slice mappings for integers, floats, and strings
* Hstore support
* JSON and JSONB support
* Maps `inet` and `cidr` PostgreSQL types to `net.IPNet` and `net.IP`
* `hstore` support
* `json` and `jsonb` support
* Maps `inet` and `cidr` PostgreSQL types to `netip.Addr` and `netip.Prefix`
* Large object support
* NULL mapping to Null* struct or pointer to pointer
* NULL mapping to pointer to pointer
* Supports `database/sql.Scanner` and `database/sql/driver.Valuer` interfaces for custom types
* Notice response handling
* Simulated nested transactions with savepoints
## Performance
## Choosing Between the pgx and database/sql Interfaces
There are three areas in particular where pgx can provide a significant performance advantage over the standard
`database/sql` interface and other drivers:
The pgx interface is faster. Many PostgreSQL specific features such as `LISTEN` / `NOTIFY` and `COPY` are not available
through the `database/sql` interface.
1. PostgreSQL specific types - Types such as arrays can be parsed much quicker because pgx uses the binary format.
2. Automatic statement preparation and caching - pgx will prepare and cache statements by default. This can provide an
significant free improvement to code that does not explicitly use prepared statements. Under certain workloads, it can
perform nearly 3x the number of queries per second.
3. Batched queries - Multiple queries can be batched together to minimize network round trips.
The pgx interface is recommended when:
## Comparison with Alternatives
1. The application only targets PostgreSQL.
2. No other libraries that require `database/sql` are in use.
* [pq](http://godoc.org/github.com/lib/pq)
* [go-pg](https://github.com/go-pg/pg)
For prepared queries with small sets of simple data types, all drivers will have have similar performance. However, if prepared statements aren't being explicitly used, pgx can have a significant performance advantage due to automatic statement preparation.
pgx also can perform better when using PostgreSQL-specific data types or query batching. See
[go_db_bench](https://github.com/jackc/go_db_bench) for some database driver benchmarks.
### Compatibility with `database/sql`
pq is exclusively used with `database/sql`. go-pg does not use `database/sql` at all. pgx supports `database/sql` as well as
its own interface.
### Level of access, ORM
go-pg is a PostgreSQL client and ORM. It includes many features that traditionally sit above the database driver, such as ORM, struct mapping, soft deletes, schema migrations, and sharding support.
pgx is "closer to the metal" and such abstractions are beyond the scope of the pgx project, which first and foremost, aims to be a performant driver and toolkit.
It is also possible to use the `database/sql` interface and convert a connection to the lower-level pgx interface as needed.
## Testing
pgx tests naturally require a PostgreSQL database. It will connect to the database specified in the `PGX_TEST_DATABASE` environment
variable. The `PGX_TEST_DATABASE` environment variable can either be a URL or DSN. In addition, the standard `PG*` environment
variables will be respected. Consider using [direnv](https://github.com/direnv/direnv) to simplify environment variable
handling.
See [CONTRIBUTING.md](./CONTRIBUTING.md) for setup instructions.
### Example Test Environment
## Architecture
Connect to your PostgreSQL server and run:
```
create database pgx_test;
```
Connect to the newly-created database and run:
```
create domain uint64 as numeric(20,0);
```
Now, you can run the tests:
```
PGX_TEST_DATABASE="host=/var/run/postgresql database=pgx_test" go test ./...
```
In addition, there are tests specific for PgBouncer that will be executed if `PGX_TEST_PGBOUNCER_CONN_STRING` is set.
See the presentation at Golang Estonia, [PGX Top to Bottom](https://www.youtube.com/watch?v=sXMSWhcHCf8) for a description of pgx architecture.
## Supported Go and PostgreSQL Versions
pgx supports the same versions of Go and PostgreSQL that are supported by their respective teams. For [Go](https://golang.org/doc/devel/release.html#policy) that is the two most recent major releases and for [PostgreSQL](https://www.postgresql.org/support/versioning/) the major releases in the last 5 years. This means pgx supports Go 1.13 and higher and PostgreSQL 9.5 and higher.
pgx supports the same versions of Go and PostgreSQL that are supported by their respective teams. For [Go](https://golang.org/doc/devel/release.html#policy) that is the two most recent major releases and for [PostgreSQL](https://www.postgresql.org/support/versioning/) the major releases in the last 5 years. This means pgx supports Go 1.23 and higher and PostgreSQL 13 and higher. pgx also is tested against the latest version of [CockroachDB](https://www.cockroachlabs.com/product/).
## Version Policy
pgx follows semantic versioning for the documented public API on stable releases. `v4` is the latest stable major version.
pgx follows semantic versioning for the documented public API on stable releases. `v5` is the latest stable major version.
## PGX Family Libraries
pgx is the head of a family of PostgreSQL libraries. Many of these can be used independently. Many can also be accessed
from pgx for lower-level control.
### [github.com/jackc/pgconn](https://github.com/jackc/pgconn)
`pgconn` is a lower-level PostgreSQL database driver that operates at nearly the same level as the C library `libpq`.
### [github.com/jackc/pgx/v4/pgxpool](https://github.com/jackc/pgx/tree/master/pgxpool)
`pgxpool` is a connection pool for pgx. pgx is entirely decoupled from its default pool implementation. This means that pgx can be used with a different pool. or without any pool at all.
### [github.com/jackc/pgx/v4/stdlib](https://github.com/jackc/pgx/tree/master/stdlib)
This is a `database/sql` compatibility layer for pgx. pgx can be used as a normal `database/sql` driver, but at any time, the native interface can be acquired for more performance or PostgreSQL specific functionality.
### [github.com/jackc/pgtype](https://github.com/jackc/pgtype)
Over 70 PostgreSQL types are supported including `uuid`, `hstore`, `json`, `bytea`, `numeric`, `interval`, `inet`, and arrays. These types support `database/sql` interfaces and are usable outside of pgx. They are fully tested in pgx and pq. They also support a higher performance interface when used with the pgx driver.
### [github.com/jackc/pgproto3](https://github.com/jackc/pgproto3)
pgproto3 provides standalone encoding and decoding of the PostgreSQL v3 wire protocol. This is useful for implementing very low level PostgreSQL tooling.
### [github.com/jackc/pglogrepl](https://github.com/jackc/pglogrepl)
pglogrepl provides functionality to act as a client for PostgreSQL logical replication.
@ -195,8 +116,76 @@ tern is a stand-alone SQL migration system.
pgerrcode contains constants for the PostgreSQL error codes.
## Adapters for 3rd Party Types
* [github.com/jackc/pgx-gofrs-uuid](https://github.com/jackc/pgx-gofrs-uuid)
* [github.com/jackc/pgx-shopspring-decimal](https://github.com/jackc/pgx-shopspring-decimal)
* [github.com/twpayne/pgx-geos](https://github.com/twpayne/pgx-geos) ([PostGIS](https://postgis.net/) and [GEOS](https://libgeos.org/) via [go-geos](https://github.com/twpayne/go-geos))
* [github.com/vgarvardt/pgx-google-uuid](https://github.com/vgarvardt/pgx-google-uuid)
## Adapters for 3rd Party Tracers
* [github.com/jackhopner/pgx-xray-tracer](https://github.com/jackhopner/pgx-xray-tracer)
* [github.com/exaring/otelpgx](https://github.com/exaring/otelpgx)
## Adapters for 3rd Party Loggers
These adapters can be used with the tracelog package.
* [github.com/jackc/pgx-go-kit-log](https://github.com/jackc/pgx-go-kit-log)
* [github.com/jackc/pgx-log15](https://github.com/jackc/pgx-log15)
* [github.com/jackc/pgx-logrus](https://github.com/jackc/pgx-logrus)
* [github.com/jackc/pgx-zap](https://github.com/jackc/pgx-zap)
* [github.com/jackc/pgx-zerolog](https://github.com/jackc/pgx-zerolog)
* [github.com/mcosta74/pgx-slog](https://github.com/mcosta74/pgx-slog)
* [github.com/kataras/pgx-golog](https://github.com/kataras/pgx-golog)
## 3rd Party Libraries with PGX Support
### [github.com/pashagolub/pgxmock](https://github.com/pashagolub/pgxmock)
pgxmock is a mock library implementing pgx interfaces.
pgxmock has one and only purpose - to simulate pgx behavior in tests, without needing a real database connection.
### [github.com/georgysavva/scany](https://github.com/georgysavva/scany)
Library for scanning data from a database into Go structs and more.
### [github.com/vingarcia/ksql](https://github.com/vingarcia/ksql)
A carefully designed SQL client for making using SQL easier,
more productive, and less error-prone on Golang.
### [github.com/otan/gopgkrb5](https://github.com/otan/gopgkrb5)
Adds GSSAPI / Kerberos authentication support.
### [github.com/wcamarao/pmx](https://github.com/wcamarao/pmx)
Explicit data mapping and scanning library for Go structs and slices.
### [github.com/stephenafamo/scan](https://github.com/stephenafamo/scan)
Type safe and flexible package for scanning database data into Go types.
Supports, structs, maps, slices and custom mapping functions.
### [github.com/z0ne-dev/mgx](https://github.com/z0ne-dev/mgx)
Code first migration library for native pgx (no database/sql abstraction).
### [github.com/amirsalarsafaei/sqlc-pgx-monitoring](https://github.com/amirsalarsafaei/sqlc-pgx-monitoring)
A database monitoring/metrics library for pgx and sqlc. Trace, log and monitor your sqlc query performance using OpenTelemetry.
### [https://github.com/nikolayk812/pgx-outbox](https://github.com/nikolayk812/pgx-outbox)
Simple Golang implementation for transactional outbox pattern for PostgreSQL using jackc/pgx driver.
### [https://github.com/Arlandaren/pgxWrappy](https://github.com/Arlandaren/pgxWrappy)
Simplifies working with the pgx library, providing convenient scanning of nested structures.
## [https://github.com/KoNekoD/pgx-colon-query-rewriter](https://github.com/KoNekoD/pgx-colon-query-rewriter)
Implementation of the pgx query rewriter to use ':' instead of '@' in named query parameters.

18
Rakefile Normal file
View File

@ -0,0 +1,18 @@
require "erb"
rule '.go' => '.go.erb' do |task|
erb = ERB.new(File.read(task.source))
File.write(task.name, "// Code generated from #{task.source}. DO NOT EDIT.\n\n" + erb.result(binding))
sh "goimports", "-w", task.name
end
generated_code_files = [
"pgtype/int.go",
"pgtype/int_test.go",
"pgtype/integration_benchmark_test.go",
"pgtype/zeronull/int.go",
"pgtype/zeronull/int_test.go"
]
desc "Generate code"
task generate: generated_code_files

444
batch.go
View File

@ -2,64 +2,136 @@ package pgx
import (
"context"
"errors"
"fmt"
"github.com/jackc/pgconn"
errors "golang.org/x/xerrors"
"github.com/jackc/pgx/v5/pgconn"
)
type batchItem struct {
query string
arguments []interface{}
// QueuedQuery is a query that has been queued for execution via a Batch.
type QueuedQuery struct {
SQL string
Arguments []any
Fn batchItemFunc
sd *pgconn.StatementDescription
}
type batchItemFunc func(br BatchResults) error
// Query sets fn to be called when the response to qq is received.
func (qq *QueuedQuery) Query(fn func(rows Rows) error) {
qq.Fn = func(br BatchResults) error {
rows, _ := br.Query()
defer rows.Close()
err := fn(rows)
if err != nil {
return err
}
rows.Close()
return rows.Err()
}
}
// Query sets fn to be called when the response to qq is received.
func (qq *QueuedQuery) QueryRow(fn func(row Row) error) {
qq.Fn = func(br BatchResults) error {
row := br.QueryRow()
return fn(row)
}
}
// Exec sets fn to be called when the response to qq is received.
//
// Note: for simple batch insert uses where it is not required to handle
// each potential error individually, it's sufficient to not set any callbacks,
// and just handle the return value of BatchResults.Close.
func (qq *QueuedQuery) Exec(fn func(ct pgconn.CommandTag) error) {
qq.Fn = func(br BatchResults) error {
ct, err := br.Exec()
if err != nil {
return err
}
return fn(ct)
}
}
// Batch queries are a way of bundling multiple queries together to avoid
// unnecessary network round trips.
// unnecessary network round trips. A Batch must only be sent once.
type Batch struct {
items []*batchItem
QueuedQueries []*QueuedQuery
}
// Queue queues a query to batch b. query can be an SQL query or the name of a prepared statement.
func (b *Batch) Queue(query string, arguments ...interface{}) {
b.items = append(b.items, &batchItem{
query: query,
arguments: arguments,
})
// Queue queues a query to batch b. query can be an SQL query or the name of a prepared statement. The only pgx option
// argument that is supported is QueryRewriter. Queries are executed using the connection's DefaultQueryExecMode.
//
// While query can contain multiple statements if the connection's DefaultQueryExecMode is QueryModeSimple, this should
// be avoided. QueuedQuery.Fn must not be set as it will only be called for the first query. That is, QueuedQuery.Query,
// QueuedQuery.QueryRow, and QueuedQuery.Exec must not be called. In addition, any error messages or tracing that
// include the current query may reference the wrong query.
func (b *Batch) Queue(query string, arguments ...any) *QueuedQuery {
qq := &QueuedQuery{
SQL: query,
Arguments: arguments,
}
b.QueuedQueries = append(b.QueuedQueries, qq)
return qq
}
// Len returns number of queries that have been queued so far.
func (b *Batch) Len() int {
return len(b.items)
return len(b.QueuedQueries)
}
type BatchResults interface {
// Exec reads the results from the next query in the batch as if the query has been sent with Conn.Exec.
// Exec reads the results from the next query in the batch as if the query has been sent with Conn.Exec. Prefer
// calling Exec on the QueuedQuery, or just calling Close.
Exec() (pgconn.CommandTag, error)
// Query reads the results from the next query in the batch as if the query has been sent with Conn.Query.
// Query reads the results from the next query in the batch as if the query has been sent with Conn.Query. Prefer
// calling Query on the QueuedQuery.
Query() (Rows, error)
// QueryRow reads the results from the next query in the batch as if the query has been sent with Conn.QueryRow.
// Prefer calling QueryRow on the QueuedQuery.
QueryRow() Row
// Close closes the batch operation. This must be called before the underlying connection can be used again. Any error
// that occurred during a batch operation may have made it impossible to resyncronize the connection with the server.
// In this case the underlying connection will have been closed.
// Close closes the batch operation. All unread results are read and any callback functions registered with
// QueuedQuery.Query, QueuedQuery.QueryRow, or QueuedQuery.Exec will be called. If a callback function returns an
// error or the batch encounters an error subsequent callback functions will not be called.
//
// For simple batch inserts inside a transaction or similar queries, it's sufficient to not set any callbacks,
// and just handle the return value of Close.
//
// Close must be called before the underlying connection can be used again. Any error that occurred during a batch
// operation may have made it impossible to resyncronize the connection with the server. In this case the underlying
// connection will have been closed.
//
// Close is safe to call multiple times. If it returns an error subsequent calls will return the same error. Callback
// functions will not be rerun.
Close() error
}
type batchResults struct {
ctx context.Context
conn *Conn
mrr *pgconn.MultiResultReader
err error
b *Batch
ix int
ctx context.Context
conn *Conn
mrr *pgconn.MultiResultReader
err error
b *Batch
qqIdx int
closed bool
endTraced bool
}
// Exec reads the results from the next query in the batch as if the query has been sent with Exec.
func (br *batchResults) Exec() (pgconn.CommandTag, error) {
if br.err != nil {
return nil, br.err
return pgconn.CommandTag{}, br.err
}
if br.closed {
return pgconn.CommandTag{}, fmt.Errorf("batch already closed")
}
query, arguments, _ := br.nextQueryAndArgs()
@ -67,37 +139,34 @@ func (br *batchResults) Exec() (pgconn.CommandTag, error) {
if !br.mrr.NextResult() {
err := br.mrr.Close()
if err == nil {
err = errors.New("no result")
err = errors.New("no more results in batch")
}
if br.conn.shouldLog(LogLevelError) {
br.conn.log(br.ctx, LogLevelError, "BatchResult.Exec", map[string]interface{} {
"sql": query,
"args": logQueryArgs(arguments),
"err": err,
if br.conn.batchTracer != nil {
br.conn.batchTracer.TraceBatchQuery(br.ctx, br.conn, TraceBatchQueryData{
SQL: query,
Args: arguments,
Err: err,
})
}
return nil, err
return pgconn.CommandTag{}, err
}
commandTag, err := br.mrr.ResultReader().Close()
if err != nil {
if br.conn.shouldLog(LogLevelError) {
br.conn.log(br.ctx, LogLevelError, "BatchResult.Exec", map[string]interface{}{
"sql": query,
"args": logQueryArgs(arguments),
"err": err,
})
}
} else if br.conn.shouldLog(LogLevelInfo) {
br.conn.log(br.ctx, LogLevelInfo, "BatchResult.Exec", map[string]interface{} {
"sql": query,
"args": logQueryArgs(arguments),
"commandTag": commandTag,
br.err = err
br.mrr.Close()
}
if br.conn.batchTracer != nil {
br.conn.batchTracer.TraceBatchQuery(br.ctx, br.conn, TraceBatchQueryData{
SQL: query,
Args: arguments,
CommandTag: commandTag,
Err: br.err,
})
}
return commandTag, err
return commandTag, br.err
}
// Query reads the results from the next query in the batch as if the query has been sent with Query.
@ -107,26 +176,30 @@ func (br *batchResults) Query() (Rows, error) {
query = "batch query"
}
rows := br.conn.getRows(br.ctx, query, arguments)
if br.err != nil {
rows.err = br.err
rows.closed = true
return rows, br.err
return &baseRows{err: br.err, closed: true}, br.err
}
if br.closed {
alreadyClosedErr := fmt.Errorf("batch already closed")
return &baseRows{err: alreadyClosedErr, closed: true}, alreadyClosedErr
}
rows := br.conn.getRows(br.ctx, query, arguments)
rows.batchTracer = br.conn.batchTracer
if !br.mrr.NextResult() {
rows.err = br.mrr.Close()
if rows.err == nil {
rows.err = errors.New("no result")
rows.err = errors.New("no more results in batch")
}
rows.closed = true
if br.conn.shouldLog(LogLevelError) {
br.conn.log(br.ctx, LogLevelError, "BatchResult.Query", map[string]interface{} {
"sql": query,
"args": logQueryArgs(arguments),
"err": rows.err,
if br.conn.batchTracer != nil {
br.conn.batchTracer.TraceBatchQuery(br.ctx, br.conn, TraceBatchQueryData{
SQL: query,
Args: arguments,
Err: rows.err,
})
}
@ -140,42 +213,257 @@ func (br *batchResults) Query() (Rows, error) {
// QueryRow reads the results from the next query in the batch as if the query has been sent with QueryRow.
func (br *batchResults) QueryRow() Row {
rows, _ := br.Query()
return (*connRow)(rows.(*connRows))
return (*connRow)(rows.(*baseRows))
}
// Close closes the batch operation. Any error that occurred during a batch operation may have made it impossible to
// resyncronize the connection with the server. In this case the underlying connection will have been closed.
func (br *batchResults) Close() error {
defer func() {
if !br.endTraced {
if br.conn != nil && br.conn.batchTracer != nil {
br.conn.batchTracer.TraceBatchEnd(br.ctx, br.conn, TraceBatchEndData{Err: br.err})
}
br.endTraced = true
}
invalidateCachesOnBatchResultsError(br.conn, br.b, br.err)
}()
if br.err != nil {
return br.err
}
// log any queries that haven't yet been logged by Exec or Query
for {
query, args, ok := br.nextQueryAndArgs()
if !ok {
break
}
if br.closed {
return nil
}
if br.conn.shouldLog(LogLevelInfo) {
br.conn.log(br.ctx, LogLevelInfo, "BatchResult.Close", map[string]interface{} {
"sql": query,
"args": logQueryArgs(args),
})
// Read and run fn for all remaining items
for br.err == nil && !br.closed && br.b != nil && br.qqIdx < len(br.b.QueuedQueries) {
if br.b.QueuedQueries[br.qqIdx].Fn != nil {
err := br.b.QueuedQueries[br.qqIdx].Fn(br)
if err != nil {
br.err = err
}
} else {
br.Exec()
}
}
return br.mrr.Close()
br.closed = true
err := br.mrr.Close()
if br.err == nil {
br.err = err
}
return br.err
}
func (br *batchResults) nextQueryAndArgs() (query string, args []interface{}, ok bool) {
if br.b != nil && br.ix < len(br.b.items) {
bi := br.b.items[br.ix]
query = bi.query
args = bi.arguments
func (br *batchResults) earlyError() error {
return br.err
}
func (br *batchResults) nextQueryAndArgs() (query string, args []any, ok bool) {
if br.b != nil && br.qqIdx < len(br.b.QueuedQueries) {
bi := br.b.QueuedQueries[br.qqIdx]
query = bi.SQL
args = bi.Arguments
ok = true
br.ix++
br.qqIdx++
}
return
}
type pipelineBatchResults struct {
ctx context.Context
conn *Conn
pipeline *pgconn.Pipeline
lastRows *baseRows
err error
b *Batch
qqIdx int
closed bool
endTraced bool
}
// Exec reads the results from the next query in the batch as if the query has been sent with Exec.
func (br *pipelineBatchResults) Exec() (pgconn.CommandTag, error) {
if br.err != nil {
return pgconn.CommandTag{}, br.err
}
if br.closed {
return pgconn.CommandTag{}, fmt.Errorf("batch already closed")
}
if br.lastRows != nil && br.lastRows.err != nil {
return pgconn.CommandTag{}, br.err
}
query, arguments, err := br.nextQueryAndArgs()
if err != nil {
return pgconn.CommandTag{}, err
}
results, err := br.pipeline.GetResults()
if err != nil {
br.err = err
return pgconn.CommandTag{}, br.err
}
var commandTag pgconn.CommandTag
switch results := results.(type) {
case *pgconn.ResultReader:
commandTag, br.err = results.Close()
default:
return pgconn.CommandTag{}, fmt.Errorf("unexpected pipeline result: %T", results)
}
if br.conn.batchTracer != nil {
br.conn.batchTracer.TraceBatchQuery(br.ctx, br.conn, TraceBatchQueryData{
SQL: query,
Args: arguments,
CommandTag: commandTag,
Err: br.err,
})
}
return commandTag, br.err
}
// Query reads the results from the next query in the batch as if the query has been sent with Query.
func (br *pipelineBatchResults) Query() (Rows, error) {
if br.err != nil {
return &baseRows{err: br.err, closed: true}, br.err
}
if br.closed {
alreadyClosedErr := fmt.Errorf("batch already closed")
return &baseRows{err: alreadyClosedErr, closed: true}, alreadyClosedErr
}
if br.lastRows != nil && br.lastRows.err != nil {
br.err = br.lastRows.err
return &baseRows{err: br.err, closed: true}, br.err
}
query, arguments, err := br.nextQueryAndArgs()
if err != nil {
return &baseRows{err: err, closed: true}, err
}
rows := br.conn.getRows(br.ctx, query, arguments)
rows.batchTracer = br.conn.batchTracer
br.lastRows = rows
results, err := br.pipeline.GetResults()
if err != nil {
br.err = err
rows.err = err
rows.closed = true
if br.conn.batchTracer != nil {
br.conn.batchTracer.TraceBatchQuery(br.ctx, br.conn, TraceBatchQueryData{
SQL: query,
Args: arguments,
Err: err,
})
}
} else {
switch results := results.(type) {
case *pgconn.ResultReader:
rows.resultReader = results
default:
err = fmt.Errorf("unexpected pipeline result: %T", results)
br.err = err
rows.err = err
rows.closed = true
}
}
return rows, rows.err
}
// QueryRow reads the results from the next query in the batch as if the query has been sent with QueryRow.
func (br *pipelineBatchResults) QueryRow() Row {
rows, _ := br.Query()
return (*connRow)(rows.(*baseRows))
}
// Close closes the batch operation. Any error that occurred during a batch operation may have made it impossible to
// resyncronize the connection with the server. In this case the underlying connection will have been closed.
func (br *pipelineBatchResults) Close() error {
defer func() {
if !br.endTraced {
if br.conn.batchTracer != nil {
br.conn.batchTracer.TraceBatchEnd(br.ctx, br.conn, TraceBatchEndData{Err: br.err})
}
br.endTraced = true
}
invalidateCachesOnBatchResultsError(br.conn, br.b, br.err)
}()
if br.err == nil && br.lastRows != nil && br.lastRows.err != nil {
br.err = br.lastRows.err
return br.err
}
if br.closed {
return br.err
}
// Read and run fn for all remaining items
for br.err == nil && !br.closed && br.b != nil && br.qqIdx < len(br.b.QueuedQueries) {
if br.b.QueuedQueries[br.qqIdx].Fn != nil {
err := br.b.QueuedQueries[br.qqIdx].Fn(br)
if err != nil {
br.err = err
}
} else {
br.Exec()
}
}
br.closed = true
err := br.pipeline.Close()
if br.err == nil {
br.err = err
}
return br.err
}
func (br *pipelineBatchResults) earlyError() error {
return br.err
}
func (br *pipelineBatchResults) nextQueryAndArgs() (query string, args []any, err error) {
if br.b == nil {
return "", nil, errors.New("no reference to batch")
}
if br.qqIdx >= len(br.b.QueuedQueries) {
return "", nil, errors.New("no more results in batch")
}
bi := br.b.QueuedQueries[br.qqIdx]
br.qqIdx++
return bi.SQL, bi.Arguments, nil
}
// invalidates statement and description caches on batch results error
func invalidateCachesOnBatchResultsError(conn *Conn, b *Batch, err error) {
if err != nil && conn != nil && b != nil {
if sc := conn.statementCache; sc != nil {
for _, bi := range b.QueuedQueries {
sc.Invalidate(bi.SQL)
}
}
if sc := conn.descriptionCache; sc != nil {
for _, bi := range b.QueuedQueries {
sc.Invalidate(bi.SQL)
}
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -12,16 +12,31 @@ import (
"testing"
"time"
"github.com/jackc/pgconn"
"github.com/jackc/pgconn/stmtcache"
"github.com/jackc/pgtype"
"github.com/jackc/pgx/v4"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
"github.com/jackc/pgx/v5/pgtype"
"github.com/stretchr/testify/require"
)
func BenchmarkConnectClose(b *testing.B) {
for i := 0; i < b.N; i++ {
conn, err := pgx.Connect(context.Background(), os.Getenv("PGX_TEST_DATABASE"))
if err != nil {
b.Fatal(err)
}
err = conn.Close(context.Background())
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkMinimalUnpreparedSelectWithoutStatementCache(b *testing.B) {
config := mustParseConfig(b, os.Getenv("PGX_TEST_DATABASE"))
config.BuildStatementCache = nil
config.DefaultQueryExecMode = pgx.QueryExecModeDescribeExec
config.StatementCacheCapacity = 0
config.DescriptionCacheCapacity = 0
conn := mustConnect(b, config)
defer closeConn(b, conn)
@ -43,9 +58,9 @@ func BenchmarkMinimalUnpreparedSelectWithoutStatementCache(b *testing.B) {
func BenchmarkMinimalUnpreparedSelectWithStatementCacheModeDescribe(b *testing.B) {
config := mustParseConfig(b, os.Getenv("PGX_TEST_DATABASE"))
config.BuildStatementCache = func(conn *pgconn.PgConn) stmtcache.Cache {
return stmtcache.New(conn, stmtcache.ModeDescribe, 32)
}
config.DefaultQueryExecMode = pgx.QueryExecModeCacheDescribe
config.StatementCacheCapacity = 0
config.DescriptionCacheCapacity = 32
conn := mustConnect(b, config)
defer closeConn(b, conn)
@ -67,9 +82,9 @@ func BenchmarkMinimalUnpreparedSelectWithStatementCacheModeDescribe(b *testing.B
func BenchmarkMinimalUnpreparedSelectWithStatementCacheModePrepare(b *testing.B) {
config := mustParseConfig(b, os.Getenv("PGX_TEST_DATABASE"))
config.BuildStatementCache = func(conn *pgconn.PgConn) stmtcache.Cache {
return stmtcache.New(conn, stmtcache.ModePrepare, 32)
}
config.DefaultQueryExecMode = pgx.QueryExecModeCacheStatement
config.StatementCacheCapacity = 32
config.DescriptionCacheCapacity = 0
conn := mustConnect(b, config)
defer closeConn(b, conn)
@ -136,7 +151,7 @@ func BenchmarkMinimalPgConnPreparedSelect(b *testing.B) {
for rr.NextRow() {
for i := range rr.Values() {
if bytes.Compare(rr.Values()[0], encodedBytes) != 0 {
if !bytes.Equal(rr.Values()[0], encodedBytes) {
b.Fatalf("unexpected values: %s %s", rr.Values()[i], encodedBytes)
}
}
@ -268,123 +283,6 @@ func BenchmarkPointerPointerWithPresentValues(b *testing.B) {
}
}
func BenchmarkSelectWithoutLogging(b *testing.B) {
conn := mustConnect(b, mustParseConfig(b, os.Getenv("PGX_TEST_DATABASE")))
defer closeConn(b, conn)
benchmarkSelectWithLog(b, conn)
}
type discardLogger struct{}
func (dl discardLogger) Log(ctx context.Context, level pgx.LogLevel, msg string, data map[string]interface{}) {
}
func BenchmarkSelectWithLoggingTraceDiscard(b *testing.B) {
var logger discardLogger
config := mustParseConfig(b, os.Getenv("PGX_TEST_DATABASE"))
config.Logger = logger
config.LogLevel = pgx.LogLevelTrace
conn := mustConnect(b, config)
defer closeConn(b, conn)
benchmarkSelectWithLog(b, conn)
}
func BenchmarkSelectWithLoggingDebugWithDiscard(b *testing.B) {
var logger discardLogger
config := mustParseConfig(b, os.Getenv("PGX_TEST_DATABASE"))
config.Logger = logger
config.LogLevel = pgx.LogLevelDebug
conn := mustConnect(b, config)
defer closeConn(b, conn)
benchmarkSelectWithLog(b, conn)
}
func BenchmarkSelectWithLoggingInfoWithDiscard(b *testing.B) {
var logger discardLogger
config := mustParseConfig(b, os.Getenv("PGX_TEST_DATABASE"))
config.Logger = logger
config.LogLevel = pgx.LogLevelInfo
conn := mustConnect(b, config)
defer closeConn(b, conn)
benchmarkSelectWithLog(b, conn)
}
func BenchmarkSelectWithLoggingErrorWithDiscard(b *testing.B) {
var logger discardLogger
config := mustParseConfig(b, os.Getenv("PGX_TEST_DATABASE"))
config.Logger = logger
config.LogLevel = pgx.LogLevelError
conn := mustConnect(b, config)
defer closeConn(b, conn)
benchmarkSelectWithLog(b, conn)
}
func benchmarkSelectWithLog(b *testing.B, conn *pgx.Conn) {
_, err := conn.Prepare(context.Background(), "test", "select 1::int4, 'johnsmith', 'johnsmith@example.com', 'John Smith', 'male', '1970-01-01'::date, '2015-01-01 00:00:00'::timestamptz")
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
var record struct {
id int32
userName string
email string
name string
sex string
birthDate time.Time
lastLoginTime time.Time
}
err = conn.QueryRow(context.Background(), "test").Scan(
&record.id,
&record.userName,
&record.email,
&record.name,
&record.sex,
&record.birthDate,
&record.lastLoginTime,
)
if err != nil {
b.Fatal(err)
}
// These checks both ensure that the correct data was returned
// and provide a benchmark of accessing the returned values.
if record.id != 1 {
b.Fatalf("bad value for id: %v", record.id)
}
if record.userName != "johnsmith" {
b.Fatalf("bad value for userName: %v", record.userName)
}
if record.email != "johnsmith@example.com" {
b.Fatalf("bad value for email: %v", record.email)
}
if record.name != "John Smith" {
b.Fatalf("bad value for name: %v", record.name)
}
if record.sex != "male" {
b.Fatalf("bad value for sex: %v", record.sex)
}
if record.birthDate != time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC) {
b.Fatalf("bad value for birthDate: %v", record.birthDate)
}
if record.lastLoginTime != time.Date(2015, 1, 1, 0, 0, 0, 0, time.Local) {
b.Fatalf("bad value for lastLoginTime: %v", record.lastLoginTime)
}
}
}
const benchmarkWriteTableCreateSQL = `drop table if exists t;
create table t(
@ -437,15 +335,16 @@ const benchmarkWriteTableInsertSQL = `insert into t(
type benchmarkWriteTableCopyFromSrc struct {
count int
idx int
row []interface{}
row []any
}
func (s *benchmarkWriteTableCopyFromSrc) Next() bool {
next := s.idx < s.count
s.idx++
return s.idx < s.count
return next
}
func (s *benchmarkWriteTableCopyFromSrc) Values() ([]interface{}, error) {
func (s *benchmarkWriteTableCopyFromSrc) Values() ([]any, error) {
return s.row, nil
}
@ -456,15 +355,15 @@ func (s *benchmarkWriteTableCopyFromSrc) Err() error {
func newBenchmarkWriteTableCopyFromSrc(count int) pgx.CopyFromSource {
return &benchmarkWriteTableCopyFromSrc{
count: count,
row: []interface{}{
row: []any{
"varchar_1",
"varchar_2",
&pgtype.Text{Status: pgtype.Null},
&pgtype.Text{},
time.Date(2000, 1, 1, 0, 0, 0, 0, time.Local),
&pgtype.Date{Status: pgtype.Null},
&pgtype.Date{},
1,
2,
&pgtype.Int4{Status: pgtype.Null},
&pgtype.Int4{},
time.Date(2001, 1, 1, 0, 0, 0, 0, time.Local),
time.Date(2002, 1, 1, 0, 0, 0, 0, time.Local),
true,
@ -508,9 +407,37 @@ func benchmarkWriteNRowsViaInsert(b *testing.B, n int) {
}
}
type queryArgs []interface{}
func benchmarkWriteNRowsViaBatchInsert(b *testing.B, n int) {
conn := mustConnect(b, mustParseConfig(b, os.Getenv("PGX_TEST_DATABASE")))
defer closeConn(b, conn)
func (qa *queryArgs) Append(v interface{}) string {
mustExec(b, conn, benchmarkWriteTableCreateSQL)
_, err := conn.Prepare(context.Background(), "insert_t", benchmarkWriteTableInsertSQL)
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
src := newBenchmarkWriteTableCopyFromSrc(n)
batch := &pgx.Batch{}
for src.Next() {
values, _ := src.Values()
batch.Queue("insert_t", values...)
}
err = conn.SendBatch(context.Background(), batch).Close()
if err != nil {
b.Fatal(err)
}
}
}
type queryArgs []any
func (qa *queryArgs) Append(v any) string {
*qa = append(*qa, v)
return "$" + strconv.Itoa(len(*qa))
}
@ -585,11 +512,10 @@ func multiInsert(conn *pgx.Conn, tableName string, columnNames []string, rowSrc
}
if err := tx.Commit(context.Background()); err != nil {
return 0, nil
return 0, err
}
return rowCount, nil
}
func benchmarkWriteNRowsViaMultiInsert(b *testing.B, n int) {
@ -608,7 +534,8 @@ func benchmarkWriteNRowsViaMultiInsert(b *testing.B, n int) {
src := newBenchmarkWriteTableCopyFromSrc(n)
_, err := multiInsert(conn, "t",
[]string{"varchar_1",
[]string{
"varchar_1",
"varchar_2",
"varchar_null_1",
"date_1",
@ -620,7 +547,8 @@ func benchmarkWriteNRowsViaMultiInsert(b *testing.B, n int) {
"tstz_2",
"bool_1",
"bool_2",
"bool_3"},
"bool_3",
},
src)
if err != nil {
b.Fatal(err)
@ -641,7 +569,8 @@ func benchmarkWriteNRowsViaCopy(b *testing.B, n int) {
_, err := conn.CopyFrom(context.Background(),
pgx.Identifier{"t"},
[]string{"varchar_1",
[]string{
"varchar_1",
"varchar_2",
"varchar_null_1",
"date_1",
@ -653,7 +582,8 @@ func benchmarkWriteNRowsViaCopy(b *testing.B, n int) {
"tstz_2",
"bool_1",
"bool_2",
"bool_3"},
"bool_3",
},
src)
if err != nil {
b.Fatal(err)
@ -661,6 +591,22 @@ func benchmarkWriteNRowsViaCopy(b *testing.B, n int) {
}
}
func BenchmarkWrite2RowsViaInsert(b *testing.B) {
benchmarkWriteNRowsViaInsert(b, 2)
}
func BenchmarkWrite2RowsViaMultiInsert(b *testing.B) {
benchmarkWriteNRowsViaMultiInsert(b, 2)
}
func BenchmarkWrite2RowsViaBatchInsert(b *testing.B) {
benchmarkWriteNRowsViaBatchInsert(b, 2)
}
func BenchmarkWrite2RowsViaCopy(b *testing.B) {
benchmarkWriteNRowsViaCopy(b, 2)
}
func BenchmarkWrite5RowsViaInsert(b *testing.B) {
benchmarkWriteNRowsViaInsert(b, 5)
}
@ -669,6 +615,10 @@ func BenchmarkWrite5RowsViaMultiInsert(b *testing.B) {
benchmarkWriteNRowsViaMultiInsert(b, 5)
}
func BenchmarkWrite5RowsViaBatchInsert(b *testing.B) {
benchmarkWriteNRowsViaBatchInsert(b, 5)
}
func BenchmarkWrite5RowsViaCopy(b *testing.B) {
benchmarkWriteNRowsViaCopy(b, 5)
}
@ -681,6 +631,10 @@ func BenchmarkWrite10RowsViaMultiInsert(b *testing.B) {
benchmarkWriteNRowsViaMultiInsert(b, 10)
}
func BenchmarkWrite10RowsViaBatchInsert(b *testing.B) {
benchmarkWriteNRowsViaBatchInsert(b, 10)
}
func BenchmarkWrite10RowsViaCopy(b *testing.B) {
benchmarkWriteNRowsViaCopy(b, 10)
}
@ -693,6 +647,10 @@ func BenchmarkWrite100RowsViaMultiInsert(b *testing.B) {
benchmarkWriteNRowsViaMultiInsert(b, 100)
}
func BenchmarkWrite100RowsViaBatchInsert(b *testing.B) {
benchmarkWriteNRowsViaBatchInsert(b, 100)
}
func BenchmarkWrite100RowsViaCopy(b *testing.B) {
benchmarkWriteNRowsViaCopy(b, 100)
}
@ -705,6 +663,10 @@ func BenchmarkWrite1000RowsViaMultiInsert(b *testing.B) {
benchmarkWriteNRowsViaMultiInsert(b, 1000)
}
func BenchmarkWrite1000RowsViaBatchInsert(b *testing.B) {
benchmarkWriteNRowsViaBatchInsert(b, 1000)
}
func BenchmarkWrite1000RowsViaCopy(b *testing.B) {
benchmarkWriteNRowsViaCopy(b, 1000)
}
@ -717,13 +679,19 @@ func BenchmarkWrite10000RowsViaMultiInsert(b *testing.B) {
benchmarkWriteNRowsViaMultiInsert(b, 10000)
}
func BenchmarkWrite10000RowsViaBatchInsert(b *testing.B) {
benchmarkWriteNRowsViaBatchInsert(b, 10000)
}
func BenchmarkWrite10000RowsViaCopy(b *testing.B) {
benchmarkWriteNRowsViaCopy(b, 10000)
}
func BenchmarkMultipleQueriesNonBatchNoStatementCache(b *testing.B) {
config := mustParseConfig(b, os.Getenv("PGX_TEST_DATABASE"))
config.BuildStatementCache = nil
config.DefaultQueryExecMode = pgx.QueryExecModeDescribeExec
config.StatementCacheCapacity = 0
config.DescriptionCacheCapacity = 0
conn := mustConnect(b, config)
defer closeConn(b, conn)
@ -733,9 +701,9 @@ func BenchmarkMultipleQueriesNonBatchNoStatementCache(b *testing.B) {
func BenchmarkMultipleQueriesNonBatchPrepareStatementCache(b *testing.B) {
config := mustParseConfig(b, os.Getenv("PGX_TEST_DATABASE"))
config.BuildStatementCache = func(conn *pgconn.PgConn) stmtcache.Cache {
return stmtcache.New(conn, stmtcache.ModePrepare, 32)
}
config.DefaultQueryExecMode = pgx.QueryExecModeCacheStatement
config.StatementCacheCapacity = 32
config.DescriptionCacheCapacity = 0
conn := mustConnect(b, config)
defer closeConn(b, conn)
@ -745,9 +713,9 @@ func BenchmarkMultipleQueriesNonBatchPrepareStatementCache(b *testing.B) {
func BenchmarkMultipleQueriesNonBatchDescribeStatementCache(b *testing.B) {
config := mustParseConfig(b, os.Getenv("PGX_TEST_DATABASE"))
config.BuildStatementCache = func(conn *pgconn.PgConn) stmtcache.Cache {
return stmtcache.New(conn, stmtcache.ModeDescribe, 32)
}
config.DefaultQueryExecMode = pgx.QueryExecModeCacheDescribe
config.StatementCacheCapacity = 0
config.DescriptionCacheCapacity = 32
conn := mustConnect(b, config)
defer closeConn(b, conn)
@ -783,7 +751,9 @@ func benchmarkMultipleQueriesNonBatch(b *testing.B, conn *pgx.Conn, queryCount i
func BenchmarkMultipleQueriesBatchNoStatementCache(b *testing.B) {
config := mustParseConfig(b, os.Getenv("PGX_TEST_DATABASE"))
config.BuildStatementCache = nil
config.DefaultQueryExecMode = pgx.QueryExecModeDescribeExec
config.StatementCacheCapacity = 0
config.DescriptionCacheCapacity = 0
conn := mustConnect(b, config)
defer closeConn(b, conn)
@ -793,9 +763,9 @@ func BenchmarkMultipleQueriesBatchNoStatementCache(b *testing.B) {
func BenchmarkMultipleQueriesBatchPrepareStatementCache(b *testing.B) {
config := mustParseConfig(b, os.Getenv("PGX_TEST_DATABASE"))
config.BuildStatementCache = func(conn *pgconn.PgConn) stmtcache.Cache {
return stmtcache.New(conn, stmtcache.ModePrepare, 32)
}
config.DefaultQueryExecMode = pgx.QueryExecModeCacheStatement
config.StatementCacheCapacity = 32
config.DescriptionCacheCapacity = 0
conn := mustConnect(b, config)
defer closeConn(b, conn)
@ -805,9 +775,9 @@ func BenchmarkMultipleQueriesBatchPrepareStatementCache(b *testing.B) {
func BenchmarkMultipleQueriesBatchDescribeStatementCache(b *testing.B) {
config := mustParseConfig(b, os.Getenv("PGX_TEST_DATABASE"))
config.BuildStatementCache = func(conn *pgconn.PgConn) stmtcache.Cache {
return stmtcache.New(conn, stmtcache.ModeDescribe, 32)
}
config.DefaultQueryExecMode = pgx.QueryExecModeCacheDescribe
config.StatementCacheCapacity = 0
config.DescriptionCacheCapacity = 32
conn := mustConnect(b, config)
defer closeConn(b, conn)
@ -918,8 +888,7 @@ func BenchmarkSelectManyRegisteredEnum(b *testing.B) {
err = conn.QueryRow(context.Background(), "select oid from pg_type where typname=$1;", "color").Scan(&oid)
require.NoError(b, err)
et := pgtype.NewEnumType("color", []string{"blue", "green", "orange"})
conn.ConnInfo().RegisterDataType(pgtype.DataType{Value: et, Name: "color", OID: oid})
conn.TypeMap().RegisterType(&pgtype.Type{Name: "color", OID: oid, Codec: &pgtype.EnumCodec{}})
b.ResetTimer()
var x, y, z string
@ -982,6 +951,7 @@ type BenchRowSimple struct {
BirthDate time.Time
Weight int32
Height int32
Tags []string
UpdateTime time.Time
}
@ -995,13 +965,13 @@ func BenchmarkSelectRowsScanSimple(b *testing.B) {
b.Run(fmt.Sprintf("%d rows", rowCount), func(b *testing.B) {
br := &BenchRowSimple{}
for i := 0; i < b.N; i++ {
rows, err := conn.Query(context.Background(), "select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + $1) n", rowCount)
rows, err := conn.Query(context.Background(), "select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '{foo,bar,baz}'::text[], '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + $1) n", rowCount)
if err != nil {
b.Fatal(err)
}
for rows.Next() {
rows.Scan(&br.ID, &br.FirstName, &br.LastName, &br.Sex, &br.BirthDate, &br.Weight, &br.Height, &br.UpdateTime)
rows.Scan(&br.ID, &br.FirstName, &br.LastName, &br.Sex, &br.BirthDate, &br.Weight, &br.Height, &br.Tags, &br.UpdateTime)
}
if rows.Err() != nil {
@ -1020,6 +990,7 @@ type BenchRowStringBytes struct {
BirthDate time.Time
Weight int32
Height int32
Tags []string
UpdateTime time.Time
}
@ -1033,13 +1004,13 @@ func BenchmarkSelectRowsScanStringBytes(b *testing.B) {
b.Run(fmt.Sprintf("%d rows", rowCount), func(b *testing.B) {
br := &BenchRowStringBytes{}
for i := 0; i < b.N; i++ {
rows, err := conn.Query(context.Background(), "select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + $1) n", rowCount)
rows, err := conn.Query(context.Background(), "select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '{foo,bar,baz}'::text[], '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + $1) n", rowCount)
if err != nil {
b.Fatal(err)
}
for rows.Next() {
rows.Scan(&br.ID, &br.FirstName, &br.LastName, &br.Sex, &br.BirthDate, &br.Weight, &br.Height, &br.UpdateTime)
rows.Scan(&br.ID, &br.FirstName, &br.LastName, &br.Sex, &br.BirthDate, &br.Weight, &br.Height, &br.Tags, &br.UpdateTime)
}
if rows.Err() != nil {
@ -1058,6 +1029,7 @@ type BenchRowDecoder struct {
BirthDate pgtype.Date
Weight pgtype.Int4
Height pgtype.Int4
Tags pgtype.FlatArray[string]
UpdateTime pgtype.Timestamptz
}
@ -1078,12 +1050,11 @@ func BenchmarkSelectRowsScanDecoder(b *testing.B) {
}
for _, format := range formats {
b.Run(format.name, func(b *testing.B) {
br := &BenchRowDecoder{}
for i := 0; i < b.N; i++ {
rows, err := conn.Query(
context.Background(),
"select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + $1) n",
"select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '{foo,bar,baz}'::text[], '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + $1) n",
pgx.QueryResultFormats{format.code},
rowCount,
)
@ -1092,7 +1063,7 @@ func BenchmarkSelectRowsScanDecoder(b *testing.B) {
}
for rows.Next() {
rows.Scan(&br.ID, &br.FirstName, &br.LastName, &br.Sex, &br.BirthDate, &br.Weight, &br.Height, &br.UpdateTime)
rows.Scan(&br.ID, &br.FirstName, &br.LastName, &br.Sex, &br.BirthDate, &br.Weight, &br.Height, &br.Tags, &br.UpdateTime)
}
if rows.Err() != nil {
@ -1105,73 +1076,6 @@ func BenchmarkSelectRowsScanDecoder(b *testing.B) {
}
}
func BenchmarkSelectRowsExplicitDecoding(b *testing.B) {
conn := mustConnectString(b, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(b, conn)
rowCounts := getSelectRowsCounts(b)
for _, rowCount := range rowCounts {
b.Run(fmt.Sprintf("%d rows", rowCount), func(b *testing.B) {
br := &BenchRowDecoder{}
for i := 0; i < b.N; i++ {
rows, err := conn.Query(context.Background(), "select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + $1) n", rowCount)
if err != nil {
b.Fatal(err)
}
for rows.Next() {
rawValues := rows.RawValues()
err = br.ID.DecodeBinary(conn.ConnInfo(), rawValues[0])
if err != nil {
b.Fatal(err)
}
err = br.FirstName.DecodeText(conn.ConnInfo(), rawValues[1])
if err != nil {
b.Fatal(err)
}
err = br.LastName.DecodeText(conn.ConnInfo(), rawValues[2])
if err != nil {
b.Fatal(err)
}
err = br.Sex.DecodeText(conn.ConnInfo(), rawValues[3])
if err != nil {
b.Fatal(err)
}
err = br.BirthDate.DecodeBinary(conn.ConnInfo(), rawValues[4])
if err != nil {
b.Fatal(err)
}
err = br.Weight.DecodeBinary(conn.ConnInfo(), rawValues[5])
if err != nil {
b.Fatal(err)
}
err = br.Height.DecodeBinary(conn.ConnInfo(), rawValues[6])
if err != nil {
b.Fatal(err)
}
err = br.UpdateTime.DecodeBinary(conn.ConnInfo(), rawValues[7])
if err != nil {
b.Fatal(err)
}
}
if rows.Err() != nil {
b.Fatal(rows.Err())
}
}
})
}
}
func BenchmarkSelectRowsPgConnExecText(b *testing.B) {
conn := mustConnectString(b, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(b, conn)
@ -1181,7 +1085,7 @@ func BenchmarkSelectRowsPgConnExecText(b *testing.B) {
for _, rowCount := range rowCounts {
b.Run(fmt.Sprintf("%d rows", rowCount), func(b *testing.B) {
for i := 0; i < b.N; i++ {
mrr := conn.PgConn().Exec(context.Background(), fmt.Sprintf("select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + %d) n", rowCount))
mrr := conn.PgConn().Exec(context.Background(), fmt.Sprintf("select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '{foo,bar,baz}'::text[], '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + %d) n", rowCount))
for mrr.NextResult() {
rr := mrr.ResultReader()
for rr.NextRow() {
@ -1218,11 +1122,11 @@ func BenchmarkSelectRowsPgConnExecParams(b *testing.B) {
for i := 0; i < b.N; i++ {
rr := conn.PgConn().ExecParams(
context.Background(),
"select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + $1) n",
"select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '{foo,bar,baz}'::text[], '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + $1) n",
[][]byte{[]byte(strconv.FormatInt(rowCount, 10))},
nil,
nil,
[]int16{format.code, pgx.TextFormatCode, pgx.TextFormatCode, pgx.TextFormatCode, format.code, format.code, format.code, format.code},
[]int16{format.code, pgx.TextFormatCode, pgx.TextFormatCode, pgx.TextFormatCode, format.code, format.code, format.code, format.code, format.code},
)
for rr.NextRow() {
rr.Values()
@ -1239,13 +1143,107 @@ func BenchmarkSelectRowsPgConnExecParams(b *testing.B) {
}
}
func BenchmarkSelectRowsSimpleCollectRowsRowToStructByPos(b *testing.B) {
conn := mustConnectString(b, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(b, conn)
rowCounts := getSelectRowsCounts(b)
for _, rowCount := range rowCounts {
b.Run(fmt.Sprintf("%d rows", rowCount), func(b *testing.B) {
for i := 0; i < b.N; i++ {
rows, _ := conn.Query(context.Background(), "select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '{foo,bar,baz}'::text[], '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + $1) n", rowCount)
benchRows, err := pgx.CollectRows(rows, pgx.RowToStructByPos[BenchRowSimple])
if err != nil {
b.Fatal(err)
}
if len(benchRows) != int(rowCount) {
b.Fatalf("Expected %d rows, got %d", rowCount, len(benchRows))
}
}
})
}
}
func BenchmarkSelectRowsSimpleAppendRowsRowToStructByPos(b *testing.B) {
conn := mustConnectString(b, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(b, conn)
rowCounts := getSelectRowsCounts(b)
for _, rowCount := range rowCounts {
b.Run(fmt.Sprintf("%d rows", rowCount), func(b *testing.B) {
benchRows := make([]BenchRowSimple, 0, rowCount)
for i := 0; i < b.N; i++ {
benchRows = benchRows[:0]
rows, _ := conn.Query(context.Background(), "select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '{foo,bar,baz}'::text[], '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + $1) n", rowCount)
var err error
benchRows, err = pgx.AppendRows(benchRows, rows, pgx.RowToStructByPos[BenchRowSimple])
if err != nil {
b.Fatal(err)
}
if len(benchRows) != int(rowCount) {
b.Fatalf("Expected %d rows, got %d", rowCount, len(benchRows))
}
}
})
}
}
func BenchmarkSelectRowsSimpleCollectRowsRowToStructByName(b *testing.B) {
conn := mustConnectString(b, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(b, conn)
rowCounts := getSelectRowsCounts(b)
for _, rowCount := range rowCounts {
b.Run(fmt.Sprintf("%d rows", rowCount), func(b *testing.B) {
for i := 0; i < b.N; i++ {
rows, _ := conn.Query(context.Background(), "select n as id, 'Adam' as first_name, 'Smith ' || n as last_name, 'male' as sex, '1952-06-16'::date as birth_date, 258 as weight, 72 as height, '{foo,bar,baz}'::text[] as tags, '2001-01-28 01:02:03-05'::timestamptz as update_time from generate_series(100001, 100000 + $1) n", rowCount)
benchRows, err := pgx.CollectRows(rows, pgx.RowToStructByName[BenchRowSimple])
if err != nil {
b.Fatal(err)
}
if len(benchRows) != int(rowCount) {
b.Fatalf("Expected %d rows, got %d", rowCount, len(benchRows))
}
}
})
}
}
func BenchmarkSelectRowsSimpleAppendRowsRowToStructByName(b *testing.B) {
conn := mustConnectString(b, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(b, conn)
rowCounts := getSelectRowsCounts(b)
for _, rowCount := range rowCounts {
b.Run(fmt.Sprintf("%d rows", rowCount), func(b *testing.B) {
benchRows := make([]BenchRowSimple, 0, rowCount)
for i := 0; i < b.N; i++ {
benchRows = benchRows[:0]
rows, _ := conn.Query(context.Background(), "select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '{foo,bar,baz}'::text[], '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + $1) n", rowCount)
var err error
benchRows, err = pgx.AppendRows(benchRows, rows, pgx.RowToStructByPos[BenchRowSimple])
if err != nil {
b.Fatal(err)
}
if len(benchRows) != int(rowCount) {
b.Fatalf("Expected %d rows, got %d", rowCount, len(benchRows))
}
}
})
}
}
func BenchmarkSelectRowsPgConnExecPrepared(b *testing.B) {
conn := mustConnectString(b, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(b, conn)
rowCounts := getSelectRowsCounts(b)
_, err := conn.PgConn().Prepare(context.Background(), "ps1", "select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + $1) n", nil)
_, err := conn.PgConn().Prepare(context.Background(), "ps1", "select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '{foo,bar,baz}'::text[], '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + $1) n", nil)
if err != nil {
b.Fatal(err)
}
@ -1267,7 +1265,7 @@ func BenchmarkSelectRowsPgConnExecPrepared(b *testing.B) {
"ps1",
[][]byte{[]byte(strconv.FormatInt(rowCount, 10))},
nil,
[]int16{format.code, pgx.TextFormatCode, pgx.TextFormatCode, pgx.TextFormatCode, format.code, format.code, format.code, format.code},
[]int16{format.code, pgx.TextFormatCode, pgx.TextFormatCode, pgx.TextFormatCode, format.code, format.code, format.code, format.code, format.code},
)
for rr.NextRow() {
rr.Values()
@ -1346,7 +1344,7 @@ func BenchmarkSelectRowsRawPrepared(b *testing.B) {
conn := mustConnectString(b, os.Getenv("PGX_TEST_DATABASE")).PgConn()
defer conn.Close(context.Background())
_, err := conn.Prepare(context.Background(), "ps1", "select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + $1) n", nil)
_, err := conn.Prepare(context.Background(), "ps1", "select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '{foo,bar,baz}'::text[], '2001-01-28 01:02:03-05'::timestamptz from generate_series(100001, 100000 + $1) n", nil)
if err != nil {
b.Fatal(err)
}
@ -1369,7 +1367,7 @@ func BenchmarkSelectRowsRawPrepared(b *testing.B) {
"ps1",
[][]byte{[]byte(strconv.FormatInt(rowCount, 10))},
nil,
[]int16{format.code, pgx.TextFormatCode, pgx.TextFormatCode, pgx.TextFormatCode, format.code, format.code, format.code, format.code},
[]int16{format.code, pgx.TextFormatCode, pgx.TextFormatCode, pgx.TextFormatCode, format.code, format.code, format.code, format.code, format.code},
)
_, err := rr.Close()
require.NoError(b, err)

61
ci/setup_test.bash Executable file
View File

@ -0,0 +1,61 @@
#!/usr/bin/env bash
set -eux
if [[ "${PGVERSION-}" =~ ^[0-9.]+$ ]]
then
sudo apt-get remove -y --purge postgresql libpq-dev libpq5 postgresql-client-common postgresql-common
sudo rm -rf /var/lib/postgresql
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
sudo sh -c "echo deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main $PGVERSION >> /etc/apt/sources.list.d/postgresql.list"
sudo apt-get update -qq
sudo apt-get -y -o Dpkg::Options::=--force-confdef -o Dpkg::Options::="--force-confnew" install postgresql-$PGVERSION postgresql-server-dev-$PGVERSION postgresql-contrib-$PGVERSION
sudo cp testsetup/pg_hba.conf /etc/postgresql/$PGVERSION/main/pg_hba.conf
sudo sh -c "echo \"listen_addresses = '127.0.0.1'\" >> /etc/postgresql/$PGVERSION/main/postgresql.conf"
sudo sh -c "cat testsetup/postgresql_ssl.conf >> /etc/postgresql/$PGVERSION/main/postgresql.conf"
cd testsetup
# Generate CA, server, and encrypted client certificates.
go run generate_certs.go
# Copy certificates to server directory and set permissions.
sudo cp ca.pem /var/lib/postgresql/$PGVERSION/main/root.crt
sudo chown postgres:postgres /var/lib/postgresql/$PGVERSION/main/root.crt
sudo cp localhost.key /var/lib/postgresql/$PGVERSION/main/server.key
sudo chown postgres:postgres /var/lib/postgresql/$PGVERSION/main/server.key
sudo chmod 600 /var/lib/postgresql/$PGVERSION/main/server.key
sudo cp localhost.crt /var/lib/postgresql/$PGVERSION/main/server.crt
sudo chown postgres:postgres /var/lib/postgresql/$PGVERSION/main/server.crt
cp ca.pem /tmp
cp pgx_sslcert.key /tmp
cp pgx_sslcert.crt /tmp
cd ..
sudo /etc/init.d/postgresql restart
createdb -U postgres pgx_test
psql -U postgres -f testsetup/postgresql_setup.sql pgx_test
fi
if [[ "${PGVERSION-}" =~ ^cockroach ]]
then
wget -qO- https://binaries.cockroachdb.com/cockroach-v24.3.3.linux-amd64.tgz | tar xvz
sudo mv cockroach-v24.3.3.linux-amd64/cockroach /usr/local/bin/
cockroach start-single-node --insecure --background --listen-addr=localhost
cockroach sql --insecure -e 'create database pgx_test'
fi
if [ "${CRATEVERSION-}" != "" ]
then
docker run \
-p "6543:5432" \
-d \
crate:"$CRATEVERSION" \
crate \
-Cnetwork.host=0.0.0.0 \
-Ctransport.host=localhost \
-Clicense.enterprise=false
fi

1409
conn.go

File diff suppressed because it is too large Load Diff

55
conn_internal_test.go Normal file
View File

@ -0,0 +1,55 @@
package pgx
import (
"context"
"fmt"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func mustParseConfig(t testing.TB, connString string) *ConnConfig {
config, err := ParseConfig(connString)
require.Nil(t, err)
return config
}
func mustConnect(t testing.TB, config *ConnConfig) *Conn {
conn, err := ConnectConfig(context.Background(), config)
if err != nil {
t.Fatalf("Unable to establish connection: %v", err)
}
return conn
}
// Ensures the connection limits the size of its cached objects.
// This test examines the internals of *Conn so must be in the same package.
func TestStmtCacheSizeLimit(t *testing.T) {
const cacheLimit = 16
connConfig := mustParseConfig(t, os.Getenv("PGX_TEST_DATABASE"))
connConfig.StatementCacheCapacity = cacheLimit
conn := mustConnect(t, connConfig)
defer func() {
err := conn.Close(context.Background())
if err != nil {
t.Fatal(err)
}
}()
// run a set of unique queries that should overflow the cache
ctx := context.Background()
for i := 0; i < cacheLimit*2; i++ {
uniqueString := fmt.Sprintf("unique %d", i)
uniqueSQL := fmt.Sprintf("select '%s'", uniqueString)
var output string
err := conn.QueryRow(ctx, uniqueSQL).Scan(&output)
require.NoError(t, err)
require.Equal(t, uniqueString, output)
}
// preparedStatements contains cacheLimit+1 because deallocation happens before the query
assert.Len(t, conn.preparedStatements, cacheLimit+1)
assert.Equal(t, cacheLimit, conn.statementCache.Len())
}

File diff suppressed because it is too large Load Diff

View File

@ -6,19 +6,18 @@ import (
"fmt"
"io"
"github.com/jackc/pgconn"
"github.com/jackc/pgio"
errors "golang.org/x/xerrors"
"github.com/jackc/pgx/v5/internal/pgio"
"github.com/jackc/pgx/v5/pgconn"
)
// CopyFromRows returns a CopyFromSource interface over the provided rows slice
// making it usable by *Conn.CopyFrom.
func CopyFromRows(rows [][]interface{}) CopyFromSource {
func CopyFromRows(rows [][]any) CopyFromSource {
return &copyFromRows{rows: rows, idx: -1}
}
type copyFromRows struct {
rows [][]interface{}
rows [][]any
idx int
}
@ -27,7 +26,7 @@ func (ctr *copyFromRows) Next() bool {
return ctr.idx < len(ctr.rows)
}
func (ctr *copyFromRows) Values() ([]interface{}, error) {
func (ctr *copyFromRows) Values() ([]any, error) {
return ctr.rows[ctr.idx], nil
}
@ -35,6 +34,63 @@ func (ctr *copyFromRows) Err() error {
return nil
}
// CopyFromSlice returns a CopyFromSource interface over a dynamic func
// making it usable by *Conn.CopyFrom.
func CopyFromSlice(length int, next func(int) ([]any, error)) CopyFromSource {
return &copyFromSlice{next: next, idx: -1, len: length}
}
type copyFromSlice struct {
next func(int) ([]any, error)
idx int
len int
err error
}
func (cts *copyFromSlice) Next() bool {
cts.idx++
return cts.idx < cts.len
}
func (cts *copyFromSlice) Values() ([]any, error) {
values, err := cts.next(cts.idx)
if err != nil {
cts.err = err
}
return values, err
}
func (cts *copyFromSlice) Err() error {
return cts.err
}
// CopyFromFunc returns a CopyFromSource interface that relies on nxtf for values.
// nxtf returns rows until it either signals an 'end of data' by returning row=nil and err=nil,
// or it returns an error. If nxtf returns an error, the copy is aborted.
func CopyFromFunc(nxtf func() (row []any, err error)) CopyFromSource {
return &copyFromFunc{next: nxtf}
}
type copyFromFunc struct {
next func() ([]any, error)
valueRow []any
err error
}
func (g *copyFromFunc) Next() bool {
g.valueRow, g.err = g.next()
// only return true if valueRow exists and no error
return g.valueRow != nil && g.err == nil
}
func (g *copyFromFunc) Values() ([]any, error) {
return g.valueRow, g.err
}
func (g *copyFromFunc) Err() error {
return g.err
}
// CopyFromSource is the interface used by *Conn.CopyFrom as the source for copy data.
type CopyFromSource interface {
// Next returns true if there is another row and makes the next row data
@ -43,7 +99,7 @@ type CopyFromSource interface {
Next() bool
// Values returns the values for the current row.
Values() ([]interface{}, error)
Values() ([]any, error)
// Err returns any error that has been encountered by the CopyFromSource. If
// this is not nil *Conn.CopyFrom will abort the copy.
@ -56,9 +112,17 @@ type copyFrom struct {
columnNames []string
rowSrc CopyFromSource
readerErrChan chan error
mode QueryExecMode
}
func (ct *copyFrom) run(ctx context.Context) (int64, error) {
if ct.conn.copyFromTracer != nil {
ctx = ct.conn.copyFromTracer.TraceCopyFromStart(ctx, ct.conn, TraceCopyFromStartData{
TableName: ct.tableName,
ColumnNames: ct.columnNames,
})
}
quotedTableName := ct.tableName.Sanitize()
cbuf := &bytes.Buffer{}
for i, cn := range ct.columnNames {
@ -69,9 +133,29 @@ func (ct *copyFrom) run(ctx context.Context) (int64, error) {
}
quotedColumnNames := cbuf.String()
sd, err := ct.conn.Prepare(ctx, "", fmt.Sprintf("select %s from %s", quotedColumnNames, quotedTableName))
if err != nil {
return 0, err
var sd *pgconn.StatementDescription
switch ct.mode {
case QueryExecModeExec, QueryExecModeSimpleProtocol:
// These modes don't support the binary format. Before the inclusion of the
// QueryExecModes, Conn.Prepare was called on every COPY operation to get
// the OIDs. These prepared statements were not cached.
//
// Since that's the same behavior provided by QueryExecModeDescribeExec,
// we'll default to that mode.
ct.mode = QueryExecModeDescribeExec
fallthrough
case QueryExecModeCacheStatement, QueryExecModeCacheDescribe, QueryExecModeDescribeExec:
var err error
sd, err = ct.conn.getStatementDescription(
ctx,
ct.mode,
fmt.Sprintf("select %s from %s", quotedColumnNames, quotedTableName),
)
if err != nil {
return 0, fmt.Errorf("statement description failed: %w", err)
}
default:
return 0, fmt.Errorf("unknown QueryExecMode: %v", ct.mode)
}
r, w := io.Pipe()
@ -120,29 +204,49 @@ func (ct *copyFrom) run(ctx context.Context) (int64, error) {
r.Close()
<-doneChan
if ct.conn.copyFromTracer != nil {
ct.conn.copyFromTracer.TraceCopyFromEnd(ctx, ct.conn, TraceCopyFromEndData{
CommandTag: commandTag,
Err: err,
})
}
return commandTag.RowsAffected(), err
}
func (ct *copyFrom) buildCopyBuf(buf []byte, sd *pgconn.StatementDescription) (bool, []byte, error) {
const sendBufSize = 65536 - 5 // The packet has a 5-byte header
lastBufLen := 0
largestRowLen := 0
for ct.rowSrc.Next() {
lastBufLen = len(buf)
values, err := ct.rowSrc.Values()
if err != nil {
return false, nil, err
}
if len(values) != len(ct.columnNames) {
return false, nil, errors.Errorf("expected %d values, got %d values", len(ct.columnNames), len(values))
return false, nil, fmt.Errorf("expected %d values, got %d values", len(ct.columnNames), len(values))
}
buf = pgio.AppendInt16(buf, int16(len(ct.columnNames)))
for i, val := range values {
buf, err = encodePreparedStatementArgument(ct.conn.connInfo, buf, sd.Fields[i].DataTypeOID, val)
buf, err = encodeCopyValue(ct.conn.typeMap, buf, sd.Fields[i].DataTypeOID, val)
if err != nil {
return false, nil, err
}
}
if len(buf) > 65536 {
rowLen := len(buf) - lastBufLen
if rowLen > largestRowLen {
largestRowLen = rowLen
}
// Try not to overflow size of the buffer PgConn.CopyFrom will be reading into. If that happens then the nature of
// io.Pipe means that the next Read will be short. This can lead to pathological send sizes such as 65531, 13, 65531
// 13, 65531, 13, 65531, 13.
if len(buf) > sendBufSize-largestRowLen {
return true, buf, nil
}
}
@ -150,12 +254,14 @@ func (ct *copyFrom) buildCopyBuf(buf []byte, sd *pgconn.StatementDescription) (b
return false, buf, nil
}
// CopyFrom uses the PostgreSQL copy protocol to perform bulk data insertion.
// It returns the number of rows copied and an error.
// CopyFrom uses the PostgreSQL copy protocol to perform bulk data insertion. It returns the number of rows copied and
// an error.
//
// CopyFrom requires all values use the binary format. Almost all types
// implemented by pgx use the binary format by default. Types implementing
// Encoder can only be used if they encode to the binary format.
// CopyFrom requires all values use the binary format. A pgtype.Type that supports the binary format must be registered
// for the type of each column. Almost all types implemented by pgx support the binary format.
//
// Even though enum types appear to be strings they still must be registered to use with CopyFrom. This can be done with
// Conn.LoadType and pgtype.Map.RegisterType.
func (c *Conn) CopyFrom(ctx context.Context, tableName Identifier, columnNames []string, rowSrc CopyFromSource) (int64, error) {
ct := &copyFrom{
conn: c,
@ -163,6 +269,7 @@ func (c *Conn) CopyFrom(ctx context.Context, tableName Identifier, columnNames [
columnNames: columnNames,
rowSrc: rowSrc,
readerErrChan: make(chan error),
mode: c.config.DefaultQueryExecMode,
}
return ct.run(ctx)

View File

@ -2,20 +2,148 @@ package pgx_test
import (
"context"
"fmt"
"os"
"reflect"
"testing"
"time"
"github.com/jackc/pgconn"
"github.com/jackc/pgx/v4"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
"github.com/jackc/pgx/v5/pgxtest"
"github.com/stretchr/testify/require"
errors "golang.org/x/xerrors"
)
func TestConnCopyWithAllQueryExecModes(t *testing.T) {
for _, mode := range pgxtest.AllQueryExecModes {
t.Run(mode.String(), func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
defer cancel()
cfg := mustParseConfig(t, os.Getenv("PGX_TEST_DATABASE"))
cfg.DefaultQueryExecMode = mode
conn := mustConnect(t, cfg)
defer closeConn(t, conn)
mustExec(t, conn, `create temporary table foo(
a int2,
b int4,
c int8,
d text,
e timestamptz
)`)
tzedTime := time.Date(2010, 2, 3, 4, 5, 6, 0, time.Local)
inputRows := [][]any{
{int16(0), int32(1), int64(2), "abc", tzedTime},
{nil, nil, nil, nil, nil},
}
copyCount, err := conn.CopyFrom(ctx, pgx.Identifier{"foo"}, []string{"a", "b", "c", "d", "e"}, pgx.CopyFromRows(inputRows))
if err != nil {
t.Errorf("Unexpected error for CopyFrom: %v", err)
}
if int(copyCount) != len(inputRows) {
t.Errorf("Expected CopyFrom to return %d copied rows, but got %d", len(inputRows), copyCount)
}
rows, err := conn.Query(ctx, "select * from foo")
if err != nil {
t.Errorf("Unexpected error for Query: %v", err)
}
var outputRows [][]any
for rows.Next() {
row, err := rows.Values()
if err != nil {
t.Errorf("Unexpected error for rows.Values(): %v", err)
}
outputRows = append(outputRows, row)
}
if rows.Err() != nil {
t.Errorf("Unexpected error for rows.Err(): %v", rows.Err())
}
if !reflect.DeepEqual(inputRows, outputRows) {
t.Errorf("Input rows and output rows do not equal: %v -> %v", inputRows, outputRows)
}
ensureConnValid(t, conn)
})
}
}
func TestConnCopyWithKnownOIDQueryExecModes(t *testing.T) {
for _, mode := range pgxtest.KnownOIDQueryExecModes {
t.Run(mode.String(), func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
defer cancel()
cfg := mustParseConfig(t, os.Getenv("PGX_TEST_DATABASE"))
cfg.DefaultQueryExecMode = mode
conn := mustConnect(t, cfg)
defer closeConn(t, conn)
mustExec(t, conn, `create temporary table foo(
a int2,
b int4,
c int8,
d varchar,
e text,
f date,
g timestamptz
)`)
tzedTime := time.Date(2010, 2, 3, 4, 5, 6, 0, time.Local)
inputRows := [][]any{
{int16(0), int32(1), int64(2), "abc", "efg", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), tzedTime},
{nil, nil, nil, nil, nil, nil, nil},
}
copyCount, err := conn.CopyFrom(ctx, pgx.Identifier{"foo"}, []string{"a", "b", "c", "d", "e", "f", "g"}, pgx.CopyFromRows(inputRows))
if err != nil {
t.Errorf("Unexpected error for CopyFrom: %v", err)
}
if int(copyCount) != len(inputRows) {
t.Errorf("Expected CopyFrom to return %d copied rows, but got %d", len(inputRows), copyCount)
}
rows, err := conn.Query(ctx, "select * from foo")
if err != nil {
t.Errorf("Unexpected error for Query: %v", err)
}
var outputRows [][]any
for rows.Next() {
row, err := rows.Values()
if err != nil {
t.Errorf("Unexpected error for rows.Values(): %v", err)
}
outputRows = append(outputRows, row)
}
if rows.Err() != nil {
t.Errorf("Unexpected error for rows.Err(): %v", rows.Err())
}
if !reflect.DeepEqual(inputRows, outputRows) {
t.Errorf("Input rows and output rows do not equal: %v -> %v", inputRows, outputRows)
}
ensureConnValid(t, conn)
})
}
}
func TestConnCopyFromSmall(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
defer cancel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
@ -31,12 +159,12 @@ func TestConnCopyFromSmall(t *testing.T) {
tzedTime := time.Date(2010, 2, 3, 4, 5, 6, 0, time.Local)
inputRows := [][]interface{}{
inputRows := [][]any{
{int16(0), int32(1), int64(2), "abc", "efg", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), tzedTime},
{nil, nil, nil, nil, nil, nil, nil},
}
copyCount, err := conn.CopyFrom(context.Background(), pgx.Identifier{"foo"}, []string{"a", "b", "c", "d", "e", "f", "g"}, pgx.CopyFromRows(inputRows))
copyCount, err := conn.CopyFrom(ctx, pgx.Identifier{"foo"}, []string{"a", "b", "c", "d", "e", "f", "g"}, pgx.CopyFromRows(inputRows))
if err != nil {
t.Errorf("Unexpected error for CopyFrom: %v", err)
}
@ -44,12 +172,74 @@ func TestConnCopyFromSmall(t *testing.T) {
t.Errorf("Expected CopyFrom to return %d copied rows, but got %d", len(inputRows), copyCount)
}
rows, err := conn.Query(context.Background(), "select * from foo")
rows, err := conn.Query(ctx, "select * from foo")
if err != nil {
t.Errorf("Unexpected error for Query: %v", err)
}
var outputRows [][]interface{}
var outputRows [][]any
for rows.Next() {
row, err := rows.Values()
if err != nil {
t.Errorf("Unexpected error for rows.Values(): %v", err)
}
outputRows = append(outputRows, row)
}
if rows.Err() != nil {
t.Errorf("Unexpected error for rows.Err(): %v", rows.Err())
}
if !reflect.DeepEqual(inputRows, outputRows) {
t.Errorf("Input rows and output rows do not equal: %v -> %v", inputRows, outputRows)
}
ensureConnValid(t, conn)
}
func TestConnCopyFromSliceSmall(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
defer cancel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
mustExec(t, conn, `create temporary table foo(
a int2,
b int4,
c int8,
d varchar,
e text,
f date,
g timestamptz
)`)
tzedTime := time.Date(2010, 2, 3, 4, 5, 6, 0, time.Local)
inputRows := [][]any{
{int16(0), int32(1), int64(2), "abc", "efg", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), tzedTime},
{nil, nil, nil, nil, nil, nil, nil},
}
copyCount, err := conn.CopyFrom(ctx, pgx.Identifier{"foo"}, []string{"a", "b", "c", "d", "e", "f", "g"},
pgx.CopyFromSlice(len(inputRows), func(i int) ([]any, error) {
return inputRows[i], nil
}))
if err != nil {
t.Errorf("Unexpected error for CopyFrom: %v", err)
}
if int(copyCount) != len(inputRows) {
t.Errorf("Expected CopyFrom to return %d copied rows, but got %d", len(inputRows), copyCount)
}
rows, err := conn.Query(ctx, "select * from foo")
if err != nil {
t.Errorf("Unexpected error for Query: %v", err)
}
var outputRows [][]any
for rows.Next() {
row, err := rows.Values()
if err != nil {
@ -72,6 +262,9 @@ func TestConnCopyFromSmall(t *testing.T) {
func TestConnCopyFromLarge(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
defer cancel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
@ -88,13 +281,13 @@ func TestConnCopyFromLarge(t *testing.T) {
tzedTime := time.Date(2010, 2, 3, 4, 5, 6, 0, time.Local)
inputRows := [][]interface{}{}
inputRows := [][]any{}
for i := 0; i < 10000; i++ {
inputRows = append(inputRows, []interface{}{int16(0), int32(1), int64(2), "abc", "efg", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), tzedTime, []byte{111, 111, 111, 111}})
inputRows = append(inputRows, []any{int16(0), int32(1), int64(2), "abc", "efg", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), tzedTime, []byte{111, 111, 111, 111}})
}
copyCount, err := conn.CopyFrom(context.Background(), pgx.Identifier{"foo"}, []string{"a", "b", "c", "d", "e", "f", "g", "h"}, pgx.CopyFromRows(inputRows))
copyCount, err := conn.CopyFrom(ctx, pgx.Identifier{"foo"}, []string{"a", "b", "c", "d", "e", "f", "g", "h"}, pgx.CopyFromRows(inputRows))
if err != nil {
t.Errorf("Unexpected error for CopyFrom: %v", err)
}
@ -102,12 +295,12 @@ func TestConnCopyFromLarge(t *testing.T) {
t.Errorf("Expected CopyFrom to return %d copied rows, but got %d", len(inputRows), copyCount)
}
rows, err := conn.Query(context.Background(), "select * from foo")
rows, err := conn.Query(ctx, "select * from foo")
if err != nil {
t.Errorf("Unexpected error for Query: %v", err)
}
var outputRows [][]interface{}
var outputRows [][]any
for rows.Next() {
row, err := rows.Values()
if err != nil {
@ -130,10 +323,12 @@ func TestConnCopyFromLarge(t *testing.T) {
func TestConnCopyFromEnum(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
defer cancel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
ctx := context.Background()
tx, err := conn.Begin(ctx)
require.NoError(t, err)
defer tx.Rollback(ctx)
@ -150,7 +345,15 @@ func TestConnCopyFromEnum(t *testing.T) {
_, err = tx.Exec(ctx, `create type fruit as enum ('apple', 'orange', 'grape')`)
require.NoError(t, err)
_, err = tx.Exec(ctx, `create table foo(
// Obviously using conn while a tx is in use and registering a type after the connection has been established are
// really bad practices, but for the sake of convenience we do it in the test here.
for _, name := range []string{"fruit", "color"} {
typ, err := conn.LoadType(ctx, name)
require.NoError(t, err)
conn.TypeMap().RegisterType(typ)
}
_, err = tx.Exec(ctx, `create temporary table foo(
a text,
b color,
c fruit,
@ -160,19 +363,19 @@ func TestConnCopyFromEnum(t *testing.T) {
)`)
require.NoError(t, err)
inputRows := [][]interface{}{
inputRows := [][]any{
{"abc", "blue", "grape", "orange", "orange", "def"},
{nil, nil, nil, nil, nil, nil},
}
copyCount, err := conn.CopyFrom(ctx, pgx.Identifier{"foo"}, []string{"a", "b", "c", "d", "e", "f"}, pgx.CopyFromRows(inputRows))
copyCount, err := tx.CopyFrom(ctx, pgx.Identifier{"foo"}, []string{"a", "b", "c", "d", "e", "f"}, pgx.CopyFromRows(inputRows))
require.NoError(t, err)
require.EqualValues(t, len(inputRows), copyCount)
rows, err := conn.Query(ctx, "select * from foo")
rows, err := tx.Query(ctx, "select * from foo")
require.NoError(t, err)
var outputRows [][]interface{}
var outputRows [][]any
for rows.Next() {
row, err := rows.Values()
require.NoError(t, err)
@ -185,17 +388,23 @@ func TestConnCopyFromEnum(t *testing.T) {
t.Errorf("Input rows and output rows do not equal: %v -> %v", inputRows, outputRows)
}
err = tx.Rollback(ctx)
require.NoError(t, err)
ensureConnValid(t, conn)
}
func TestConnCopyFromJSON(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
defer cancel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
for _, typeName := range []string{"json", "jsonb"} {
if _, ok := conn.ConnInfo().DataTypeForName(typeName); !ok {
if _, ok := conn.TypeMap().TypeForName(typeName); !ok {
return // No JSON/JSONB type -- must be running against old PostgreSQL
}
}
@ -205,12 +414,12 @@ func TestConnCopyFromJSON(t *testing.T) {
b jsonb
)`)
inputRows := [][]interface{}{
{map[string]interface{}{"foo": "bar"}, map[string]interface{}{"bar": "quz"}},
inputRows := [][]any{
{map[string]any{"foo": "bar"}, map[string]any{"bar": "quz"}},
{nil, nil},
}
copyCount, err := conn.CopyFrom(context.Background(), pgx.Identifier{"foo"}, []string{"a", "b"}, pgx.CopyFromRows(inputRows))
copyCount, err := conn.CopyFrom(ctx, pgx.Identifier{"foo"}, []string{"a", "b"}, pgx.CopyFromRows(inputRows))
if err != nil {
t.Errorf("Unexpected error for CopyFrom: %v", err)
}
@ -218,12 +427,12 @@ func TestConnCopyFromJSON(t *testing.T) {
t.Errorf("Expected CopyFrom to return %d copied rows, but got %d", len(inputRows), copyCount)
}
rows, err := conn.Query(context.Background(), "select * from foo")
rows, err := conn.Query(ctx, "select * from foo")
if err != nil {
t.Errorf("Unexpected error for Query: %v", err)
}
var outputRows [][]interface{}
var outputRows [][]any
for rows.Next() {
row, err := rows.Values()
if err != nil {
@ -253,12 +462,12 @@ func (cfs *clientFailSource) Next() bool {
return cfs.count < 100
}
func (cfs *clientFailSource) Values() ([]interface{}, error) {
func (cfs *clientFailSource) Values() ([]any, error) {
if cfs.count == 3 {
cfs.err = errors.Errorf("client error")
cfs.err = fmt.Errorf("client error")
return nil, cfs.err
}
return []interface{}{make([]byte, 100000)}, nil
return []any{make([]byte, 100000)}, nil
}
func (cfs *clientFailSource) Err() error {
@ -268,6 +477,9 @@ func (cfs *clientFailSource) Err() error {
func TestConnCopyFromFailServerSideMidway(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
defer cancel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
@ -276,13 +488,13 @@ func TestConnCopyFromFailServerSideMidway(t *testing.T) {
b varchar not null
)`)
inputRows := [][]interface{}{
inputRows := [][]any{
{int32(1), "abc"},
{int32(2), nil}, // this row should trigger a failure
{int32(3), "def"},
}
copyCount, err := conn.CopyFrom(context.Background(), pgx.Identifier{"foo"}, []string{"a", "b"}, pgx.CopyFromRows(inputRows))
copyCount, err := conn.CopyFrom(ctx, pgx.Identifier{"foo"}, []string{"a", "b"}, pgx.CopyFromRows(inputRows))
if err == nil {
t.Errorf("Expected CopyFrom return error, but it did not")
}
@ -293,12 +505,12 @@ func TestConnCopyFromFailServerSideMidway(t *testing.T) {
t.Errorf("Expected CopyFrom to return 0 copied rows, but got %d", copyCount)
}
rows, err := conn.Query(context.Background(), "select * from foo")
rows, err := conn.Query(ctx, "select * from foo")
if err != nil {
t.Errorf("Unexpected error for Query: %v", err)
}
var outputRows [][]interface{}
var outputRows [][]any
for rows.Next() {
row, err := rows.Values()
if err != nil {
@ -330,11 +542,11 @@ func (fs *failSource) Next() bool {
return fs.count < 100
}
func (fs *failSource) Values() ([]interface{}, error) {
func (fs *failSource) Values() ([]any, error) {
if fs.count == 3 {
return []interface{}{nil}, nil
return []any{nil}, nil
}
return []interface{}{make([]byte, 100000)}, nil
return []any{make([]byte, 100000)}, nil
}
func (fs *failSource) Err() error {
@ -344,16 +556,21 @@ func (fs *failSource) Err() error {
func TestConnCopyFromFailServerSideMidwayAbortsWithoutWaiting(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
defer cancel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
pgxtest.SkipCockroachDB(t, conn, "Server copy error does not fail fast")
mustExec(t, conn, `create temporary table foo(
a bytea not null
)`)
startTime := time.Now()
copyCount, err := conn.CopyFrom(context.Background(), pgx.Identifier{"foo"}, []string{"a"}, &failSource{})
copyCount, err := conn.CopyFrom(ctx, pgx.Identifier{"foo"}, []string{"a"}, &failSource{})
if err == nil {
t.Errorf("Expected CopyFrom return error, but it did not")
}
@ -370,12 +587,12 @@ func TestConnCopyFromFailServerSideMidwayAbortsWithoutWaiting(t *testing.T) {
t.Errorf("Failing CopyFrom shouldn't have taken so long: %v", copyTime)
}
rows, err := conn.Query(context.Background(), "select * from foo")
rows, err := conn.Query(ctx, "select * from foo")
if err != nil {
t.Errorf("Unexpected error for Query: %v", err)
}
var outputRows [][]interface{}
var outputRows [][]any
for rows.Next() {
row, err := rows.Values()
if err != nil {
@ -405,11 +622,11 @@ func (fs *slowFailRaceSource) Next() bool {
return fs.count < 1000
}
func (fs *slowFailRaceSource) Values() ([]interface{}, error) {
func (fs *slowFailRaceSource) Values() ([]any, error) {
if fs.count == 500 {
return []interface{}{nil, nil}, nil
return []any{nil, nil}, nil
}
return []interface{}{1, make([]byte, 1000)}, nil
return []any{1, make([]byte, 1000)}, nil
}
func (fs *slowFailRaceSource) Err() error {
@ -419,6 +636,9 @@ func (fs *slowFailRaceSource) Err() error {
func TestConnCopyFromSlowFailRace(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
defer cancel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
@ -427,7 +647,7 @@ func TestConnCopyFromSlowFailRace(t *testing.T) {
b bytea not null
)`)
copyCount, err := conn.CopyFrom(context.Background(), pgx.Identifier{"foo"}, []string{"a", "b"}, &slowFailRaceSource{})
copyCount, err := conn.CopyFrom(ctx, pgx.Identifier{"foo"}, []string{"a", "b"}, &slowFailRaceSource{})
if err == nil {
t.Errorf("Expected CopyFrom return error, but it did not")
}
@ -444,6 +664,9 @@ func TestConnCopyFromSlowFailRace(t *testing.T) {
func TestConnCopyFromCopyFromSourceErrorMidway(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
defer cancel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
@ -451,7 +674,7 @@ func TestConnCopyFromCopyFromSourceErrorMidway(t *testing.T) {
a bytea not null
)`)
copyCount, err := conn.CopyFrom(context.Background(), pgx.Identifier{"foo"}, []string{"a"}, &clientFailSource{})
copyCount, err := conn.CopyFrom(ctx, pgx.Identifier{"foo"}, []string{"a"}, &clientFailSource{})
if err == nil {
t.Errorf("Expected CopyFrom return error, but it did not")
}
@ -459,12 +682,12 @@ func TestConnCopyFromCopyFromSourceErrorMidway(t *testing.T) {
t.Errorf("Expected CopyFrom to return 0 copied rows, but got %d", copyCount)
}
rows, err := conn.Query(context.Background(), "select * from foo")
rows, err := conn.Query(ctx, "select * from foo")
if err != nil {
t.Errorf("Unexpected error for Query: %v", err)
}
var outputRows [][]interface{}
var outputRows [][]any
for rows.Next() {
row, err := rows.Values()
if err != nil {
@ -493,17 +716,20 @@ func (cfs *clientFinalErrSource) Next() bool {
return cfs.count < 5
}
func (cfs *clientFinalErrSource) Values() ([]interface{}, error) {
return []interface{}{make([]byte, 100000)}, nil
func (cfs *clientFinalErrSource) Values() ([]any, error) {
return []any{make([]byte, 100000)}, nil
}
func (cfs *clientFinalErrSource) Err() error {
return errors.Errorf("final error")
return fmt.Errorf("final error")
}
func TestConnCopyFromCopyFromSourceErrorEnd(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
defer cancel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
@ -511,7 +737,7 @@ func TestConnCopyFromCopyFromSourceErrorEnd(t *testing.T) {
a bytea not null
)`)
copyCount, err := conn.CopyFrom(context.Background(), pgx.Identifier{"foo"}, []string{"a"}, &clientFinalErrSource{})
copyCount, err := conn.CopyFrom(ctx, pgx.Identifier{"foo"}, []string{"a"}, &clientFinalErrSource{})
if err == nil {
t.Errorf("Expected CopyFrom return error, but it did not")
}
@ -519,12 +745,12 @@ func TestConnCopyFromCopyFromSourceErrorEnd(t *testing.T) {
t.Errorf("Expected CopyFrom to return 0 copied rows, but got %d", copyCount)
}
rows, err := conn.Query(context.Background(), "select * from foo")
rows, err := conn.Query(ctx, "select * from foo")
if err != nil {
t.Errorf("Unexpected error for Query: %v", err)
}
var outputRows [][]interface{}
var outputRows [][]any
for rows.Next() {
row, err := rows.Values()
if err != nil {
@ -543,3 +769,125 @@ func TestConnCopyFromCopyFromSourceErrorEnd(t *testing.T) {
ensureConnValid(t, conn)
}
func TestConnCopyFromAutomaticStringConversion(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
defer cancel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
mustExec(t, conn, `create temporary table foo(
a int8
)`)
inputRows := [][]interface{}{
{"42"},
{"7"},
{8},
}
copyCount, err := conn.CopyFrom(ctx, pgx.Identifier{"foo"}, []string{"a"}, pgx.CopyFromRows(inputRows))
require.NoError(t, err)
require.EqualValues(t, len(inputRows), copyCount)
rows, _ := conn.Query(ctx, "select * from foo")
nums, err := pgx.CollectRows(rows, pgx.RowTo[int64])
require.NoError(t, err)
require.Equal(t, []int64{42, 7, 8}, nums)
ensureConnValid(t, conn)
}
// https://github.com/jackc/pgx/discussions/1891
func TestConnCopyFromAutomaticStringConversionArray(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
defer cancel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
mustExec(t, conn, `create temporary table foo(
a numeric[]
)`)
inputRows := [][]interface{}{
{[]string{"42"}},
{[]string{"7"}},
{[]string{"8", "9"}},
{[][]string{{"10", "11"}, {"12", "13"}}},
}
copyCount, err := conn.CopyFrom(ctx, pgx.Identifier{"foo"}, []string{"a"}, pgx.CopyFromRows(inputRows))
require.NoError(t, err)
require.EqualValues(t, len(inputRows), copyCount)
// Test reads as int64 and flattened array for simplicity.
rows, _ := conn.Query(ctx, "select * from foo")
nums, err := pgx.CollectRows(rows, pgx.RowTo[[]int64])
require.NoError(t, err)
require.Equal(t, [][]int64{{42}, {7}, {8, 9}, {10, 11, 12, 13}}, nums)
ensureConnValid(t, conn)
}
func TestCopyFromFunc(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
mustExec(t, conn, `create temporary table foo(
a int
)`)
dataCh := make(chan int, 1)
const channelItems = 10
go func() {
for i := 0; i < channelItems; i++ {
dataCh <- i
}
close(dataCh)
}()
copyCount, err := conn.CopyFrom(context.Background(), pgx.Identifier{"foo"}, []string{"a"},
pgx.CopyFromFunc(func() ([]any, error) {
v, ok := <-dataCh
if !ok {
return nil, nil
}
return []any{v}, nil
}))
require.ErrorIs(t, err, nil)
require.EqualValues(t, channelItems, copyCount)
rows, err := conn.Query(context.Background(), "select * from foo order by a")
require.NoError(t, err)
nums, err := pgx.CollectRows(rows, pgx.RowTo[int64])
require.NoError(t, err)
require.Equal(t, []int64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, nums)
// simulate a failure
copyCount, err = conn.CopyFrom(context.Background(), pgx.Identifier{"foo"}, []string{"a"},
pgx.CopyFromFunc(func() func() ([]any, error) {
x := 9
return func() ([]any, error) {
x++
if x > 100 {
return nil, fmt.Errorf("simulated error")
}
return []any{x}, nil
}
}()))
require.NotErrorIs(t, err, nil)
require.EqualValues(t, 0, copyCount) // no change, due to error
ensureConnValid(t, conn)
}

256
derived_types.go Normal file
View File

@ -0,0 +1,256 @@
package pgx
import (
"context"
"fmt"
"regexp"
"strconv"
"strings"
"github.com/jackc/pgx/v5/pgtype"
)
/*
buildLoadDerivedTypesSQL generates the correct query for retrieving type information.
pgVersion: the major version of the PostgreSQL server
typeNames: the names of the types to load. If nil, load all types.
*/
func buildLoadDerivedTypesSQL(pgVersion int64, typeNames []string) string {
supportsMultirange := (pgVersion >= 14)
var typeNamesClause string
if typeNames == nil {
// This should not occur; this will not return any types
typeNamesClause = "= ''"
} else {
typeNamesClause = "= ANY($1)"
}
parts := make([]string, 0, 10)
// Each of the type names provided might be found in pg_class or pg_type.
// Additionally, it may or may not include a schema portion.
parts = append(parts, `
WITH RECURSIVE
-- find the OIDs in pg_class which match one of the provided type names
selected_classes(oid,reltype) AS (
-- this query uses the namespace search path, so will match type names without a schema prefix
SELECT pg_class.oid, pg_class.reltype
FROM pg_catalog.pg_class
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = pg_class.relnamespace
WHERE pg_catalog.pg_table_is_visible(pg_class.oid)
AND relname `, typeNamesClause, `
UNION ALL
-- this query will only match type names which include the schema prefix
SELECT pg_class.oid, pg_class.reltype
FROM pg_class
INNER JOIN pg_namespace ON (pg_class.relnamespace = pg_namespace.oid)
WHERE nspname || '.' || relname `, typeNamesClause, `
),
selected_types(oid) AS (
-- collect the OIDs from pg_types which correspond to the selected classes
SELECT reltype AS oid
FROM selected_classes
UNION ALL
-- as well as any other type names which match our criteria
SELECT pg_type.oid
FROM pg_type
LEFT OUTER JOIN pg_namespace ON (pg_type.typnamespace = pg_namespace.oid)
WHERE typname `, typeNamesClause, `
OR nspname || '.' || typname `, typeNamesClause, `
),
-- this builds a parent/child mapping of objects, allowing us to know
-- all the child (ie: dependent) types that a parent (type) requires
-- As can be seen, there are 3 ways this can occur (the last of which
-- is due to being a composite class, where the composite fields are children)
pc(parent, child) AS (
SELECT parent.oid, parent.typelem
FROM pg_type parent
WHERE parent.typtype = 'b' AND parent.typelem != 0
UNION ALL
SELECT parent.oid, parent.typbasetype
FROM pg_type parent
WHERE parent.typtypmod = -1 AND parent.typbasetype != 0
UNION ALL
SELECT pg_type.oid, atttypid
FROM pg_attribute
INNER JOIN pg_class ON (pg_class.oid = pg_attribute.attrelid)
INNER JOIN pg_type ON (pg_type.oid = pg_class.reltype)
WHERE NOT attisdropped
AND attnum > 0
),
-- Now construct a recursive query which includes a 'depth' element.
-- This is used to ensure that the "youngest" children are registered before
-- their parents.
relationships(parent, child, depth) AS (
SELECT DISTINCT 0::OID, selected_types.oid, 0
FROM selected_types
UNION ALL
SELECT pg_type.oid AS parent, pg_attribute.atttypid AS child, 1
FROM selected_classes c
inner join pg_type ON (c.reltype = pg_type.oid)
inner join pg_attribute on (c.oid = pg_attribute.attrelid)
UNION ALL
SELECT pc.parent, pc.child, relationships.depth + 1
FROM pc
INNER JOIN relationships ON (pc.parent = relationships.child)
),
-- composite fields need to be encapsulated as a couple of arrays to provide the required information for registration
composite AS (
SELECT pg_type.oid, ARRAY_AGG(attname ORDER BY attnum) AS attnames, ARRAY_AGG(atttypid ORDER BY ATTNUM) AS atttypids
FROM pg_attribute
INNER JOIN pg_class ON (pg_class.oid = pg_attribute.attrelid)
INNER JOIN pg_type ON (pg_type.oid = pg_class.reltype)
WHERE NOT attisdropped
AND attnum > 0
GROUP BY pg_type.oid
)
-- Bring together this information, showing all the information which might possibly be required
-- to complete the registration, applying filters to only show the items which relate to the selected
-- types/classes.
SELECT typname,
pg_namespace.nspname,
typtype,
typbasetype,
typelem,
pg_type.oid,`)
if supportsMultirange {
parts = append(parts, `
COALESCE(multirange.rngtypid, 0) AS rngtypid,`)
} else {
parts = append(parts, `
0 AS rngtypid,`)
}
parts = append(parts, `
COALESCE(pg_range.rngsubtype, 0) AS rngsubtype,
attnames, atttypids
FROM relationships
INNER JOIN pg_type ON (pg_type.oid = relationships.child)
LEFT OUTER JOIN pg_range ON (pg_type.oid = pg_range.rngtypid)`)
if supportsMultirange {
parts = append(parts, `
LEFT OUTER JOIN pg_range multirange ON (pg_type.oid = multirange.rngmultitypid)`)
}
parts = append(parts, `
LEFT OUTER JOIN composite USING (oid)
LEFT OUTER JOIN pg_namespace ON (pg_type.typnamespace = pg_namespace.oid)
WHERE NOT (typtype = 'b' AND typelem = 0)`)
parts = append(parts, `
GROUP BY typname, pg_namespace.nspname, typtype, typbasetype, typelem, pg_type.oid, pg_range.rngsubtype,`)
if supportsMultirange {
parts = append(parts, `
multirange.rngtypid,`)
}
parts = append(parts, `
attnames, atttypids
ORDER BY MAX(depth) desc, typname;`)
return strings.Join(parts, "")
}
type derivedTypeInfo struct {
Oid, Typbasetype, Typelem, Rngsubtype, Rngtypid uint32
TypeName, Typtype, NspName string
Attnames []string
Atttypids []uint32
}
// LoadTypes performs a single (complex) query, returning all the required
// information to register the named types, as well as any other types directly
// or indirectly required to complete the registration.
// The result of this call can be passed into RegisterTypes to complete the process.
func (c *Conn) LoadTypes(ctx context.Context, typeNames []string) ([]*pgtype.Type, error) {
m := c.TypeMap()
if len(typeNames) == 0 {
return nil, fmt.Errorf("No type names were supplied.")
}
// Disregard server version errors. This will result in
// the SQL not support recent structures such as multirange
serverVersion, _ := serverVersion(c)
sql := buildLoadDerivedTypesSQL(serverVersion, typeNames)
rows, err := c.Query(ctx, sql, QueryExecModeSimpleProtocol, typeNames)
if err != nil {
return nil, fmt.Errorf("While generating load types query: %w", err)
}
defer rows.Close()
result := make([]*pgtype.Type, 0, 100)
for rows.Next() {
ti := derivedTypeInfo{}
err = rows.Scan(&ti.TypeName, &ti.NspName, &ti.Typtype, &ti.Typbasetype, &ti.Typelem, &ti.Oid, &ti.Rngtypid, &ti.Rngsubtype, &ti.Attnames, &ti.Atttypids)
if err != nil {
return nil, fmt.Errorf("While scanning type information: %w", err)
}
var type_ *pgtype.Type
switch ti.Typtype {
case "b": // array
dt, ok := m.TypeForOID(ti.Typelem)
if !ok {
return nil, fmt.Errorf("Array element OID %v not registered while loading pgtype %q", ti.Typelem, ti.TypeName)
}
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.ArrayCodec{ElementType: dt}}
case "c": // composite
var fields []pgtype.CompositeCodecField
for i, fieldName := range ti.Attnames {
dt, ok := m.TypeForOID(ti.Atttypids[i])
if !ok {
return nil, fmt.Errorf("Unknown field for composite type %q: field %q (OID %v) is not already registered.", ti.TypeName, fieldName, ti.Atttypids[i])
}
fields = append(fields, pgtype.CompositeCodecField{Name: fieldName, Type: dt})
}
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.CompositeCodec{Fields: fields}}
case "d": // domain
dt, ok := m.TypeForOID(ti.Typbasetype)
if !ok {
return nil, fmt.Errorf("Domain base type OID %v was not already registered, needed for %q", ti.Typbasetype, ti.TypeName)
}
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: dt.Codec}
case "e": // enum
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.EnumCodec{}}
case "r": // range
dt, ok := m.TypeForOID(ti.Rngsubtype)
if !ok {
return nil, fmt.Errorf("Range element OID %v was not already registered, needed for %q", ti.Rngsubtype, ti.TypeName)
}
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.RangeCodec{ElementType: dt}}
case "m": // multirange
dt, ok := m.TypeForOID(ti.Rngtypid)
if !ok {
return nil, fmt.Errorf("Multirange element OID %v was not already registered, needed for %q", ti.Rngtypid, ti.TypeName)
}
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.MultirangeCodec{ElementType: dt}}
default:
return nil, fmt.Errorf("Unknown typtype %q was found while registering %q", ti.Typtype, ti.TypeName)
}
// the type_ is imposible to be null
m.RegisterType(type_)
if ti.NspName != "" {
nspType := &pgtype.Type{Name: ti.NspName + "." + type_.Name, OID: type_.OID, Codec: type_.Codec}
m.RegisterType(nspType)
result = append(result, nspType)
}
result = append(result, type_)
}
return result, nil
}
// serverVersion returns the postgresql server version.
func serverVersion(c *Conn) (int64, error) {
serverVersionStr := c.PgConn().ParameterStatus("server_version")
serverVersionStr = regexp.MustCompile(`^[0-9]+`).FindString(serverVersionStr)
// if not PostgreSQL do nothing
if serverVersionStr == "" {
return 0, fmt.Errorf("Cannot identify server version in %q", serverVersionStr)
}
version, err := strconv.ParseInt(serverVersionStr, 10, 64)
if err != nil {
return 0, fmt.Errorf("postgres version parsing failed: %w", err)
}
return version, nil
}

40
derived_types_test.go Normal file
View File

@ -0,0 +1,40 @@
package pgx_test
import (
"context"
"testing"
"github.com/jackc/pgx/v5"
"github.com/stretchr/testify/require"
)
func TestCompositeCodecTranscodeWithLoadTypes(t *testing.T) {
skipCockroachDB(t, "Server does not support composite types (see https://github.com/cockroachdb/cockroach/issues/27792)")
defaultConnTestRunner.RunTest(context.Background(), t, func(ctx context.Context, t testing.TB, conn *pgx.Conn) {
_, err := conn.Exec(ctx, `
drop type if exists dtype_test;
drop domain if exists anotheruint64;
create domain anotheruint64 as numeric(20,0);
create type dtype_test as (
a text,
b int4,
c anotheruint64,
d anotheruint64[]
);`)
require.NoError(t, err)
defer conn.Exec(ctx, "drop type dtype_test")
defer conn.Exec(ctx, "drop domain anotheruint64")
types, err := conn.LoadTypes(ctx, []string{"dtype_test"})
require.NoError(t, err)
require.Len(t, types, 6)
require.Equal(t, types[0].Name, "public.anotheruint64")
require.Equal(t, types[1].Name, "anotheruint64")
require.Equal(t, types[2].Name, "public._anotheruint64")
require.Equal(t, types[3].Name, "_anotheruint64")
require.Equal(t, types[4].Name, "public.dtype_test")
require.Equal(t, types[5].Name, "dtype_test")
})
}

264
doc.go
View File

@ -1,68 +1,54 @@
// Package pgx is a PostgreSQL database driver.
/*
pgx provides lower level access to PostgreSQL than the standard database/sql. It remains as similar to the database/sql
interface as possible while providing better speed and access to PostgreSQL specific features. Import
github.com/jackc/pgx/v4/stdlib to use pgx as a database/sql compatible driver.
pgx provides a native PostgreSQL driver and can act as a database/sql driver. The native PostgreSQL interface is similar
to the database/sql interface while providing better speed and access to PostgreSQL specific features. Use
github.com/jackc/pgx/v5/stdlib to use pgx as a database/sql compatible driver. See that package's documentation for
details.
Establishing a Connection
The primary way of establishing a connection is with `pgx.Connect`.
The primary way of establishing a connection is with [pgx.Connect]:
conn, err := pgx.Connect(context.Background(), os.Getenv("DATABASE_URL"))
The database connection string can be in URL or DSN format. Both PostgreSQL settings and pgx settings can be specified
here. In addition, a config struct can be created by `ParseConfig` and modified before establishing the connection with
`ConnectConfig`.
config, err := pgx.ParseConfig(os.Getenv("DATABASE_URL"))
if err != nil {
// ...
}
config.Logger = log15adapter.NewLogger(log.New("module", "pgx"))
conn, err := pgx.ConnectConfig(context.Background(), config)
The database connection string can be in URL or key/value format. Both PostgreSQL settings and pgx settings can be
specified here. In addition, a config struct can be created by [ParseConfig] and modified before establishing the
connection with [ConnectConfig] to configure settings such as tracing that cannot be configured with a connection
string.
Connection Pool
`*pgx.Conn` represents a single connection to the database and is not concurrency safe. Use sub-package pgxpool for a
concurrency safe connection pool.
[*pgx.Conn] represents a single connection to the database and is not concurrency safe. Use package
github.com/jackc/pgx/v5/pgxpool for a concurrency safe connection pool.
Query Interface
pgx implements Query and Scan in the familiar database/sql style.
pgx implements Query in the familiar database/sql style. However, pgx provides generic functions such as CollectRows and
ForEachRow that are a simpler and safer way of processing rows than manually calling defer rows.Close(), rows.Next(),
rows.Scan, and rows.Err().
var sum int32
CollectRows can be used collect all returned rows into a slice.
// Send the query to the server. The returned rows MUST be closed
// before conn can be used again.
rows, err := conn.Query(context.Background(), "select generate_series(1,$1)", 10)
rows, _ := conn.Query(context.Background(), "select generate_series(1,$1)", 5)
numbers, err := pgx.CollectRows(rows, pgx.RowTo[int32])
if err != nil {
return err
return err
}
// numbers => [1 2 3 4 5]
// rows.Close is called by rows.Next when all rows are read
// or an error occurs in Next or Scan. So it may optionally be
// omitted if nothing in the rows.Next loop can panic. It is
// safe to close rows multiple times.
defer rows.Close()
ForEachRow can be used to execute a callback function for every row. This is often easier than iterating over rows
directly.
// Iterate through the result set
for rows.Next() {
var n int32
err = rows.Scan(&n)
if err != nil {
return err
}
sum += n
var sum, n int32
rows, _ := conn.Query(context.Background(), "select generate_series(1,$1)", 10)
_, err := pgx.ForEachRow(rows, []any{&n}, func() error {
sum += n
return nil
})
if err != nil {
return err
}
// Any errors encountered by rows.Next or rows.Scan will be returned here
if rows.Err() != nil {
return rows.Err()
}
// No errors found - do something with sum
pgx also implements QueryRow in the same style as database/sql.
var name string
@ -82,131 +68,11 @@ Use Exec to execute a query that does not return a result set.
return errors.New("No row found to delete")
}
Base Type Mapping
PostgreSQL Data Types
pgx maps between all common base types directly between Go and PostgreSQL. In particular:
Go PostgreSQL
-----------------------
string varchar
text
// Integers are automatically be converted to any other integer type if
// it can be done without overflow or underflow.
int8
int16 smallint
int32 int
int64 bigint
int
uint8
uint16
uint32
uint64
uint
// Floats are strict and do not automatically convert like integers.
float32 float4
float64 float8
time.Time date
timestamp
timestamptz
[]byte bytea
Null Mapping
pgx can map nulls in two ways. The first is package pgtype provides types that have a data field and a status field.
They work in a similar fashion to database/sql. The second is to use a pointer to a pointer.
var foo pgtype.Varchar
var bar *string
err := conn.QueryRow("select foo, bar from widgets where id=$1", 42).Scan(&foo, &bar)
if err != nil {
return err
}
Array Mapping
pgx maps between int16, int32, int64, float32, float64, and string Go slices and the equivalent PostgreSQL array type.
Go slices of native types do not support nulls, so if a PostgreSQL array that contains a null is read into a native Go
slice an error will occur. The pgtype package includes many more array types for PostgreSQL types that do not directly
map to native Go types.
JSON and JSONB Mapping
pgx includes built-in support to marshal and unmarshal between Go types and the PostgreSQL JSON and JSONB.
Inet and CIDR Mapping
pgx encodes from net.IPNet to and from inet and cidr PostgreSQL types. In addition, as a convenience pgx will encode
from a net.IP; it will assume a /32 netmask for IPv4 and a /128 for IPv6.
Custom Type Support
pgx includes support for the common data types like integers, floats, strings, dates, and times that have direct
mappings between Go and SQL. In addition, pgx uses the github.com/jackc/pgtype library to support more types. See
documention for that library for instructions on how to implement custom types.
See example_custom_type_test.go for an example of a custom type for the PostgreSQL point type.
pgx also includes support for custom types implementing the database/sql.Scanner and database/sql/driver.Valuer
interfaces.
If pgx does cannot natively encode a type and that type is a renamed type (e.g. type MyTime time.Time) pgx will attempt
to encode the underlying type. While this is usually desired behavior it can produce surprising behavior if one the
underlying type and the renamed type each implement database/sql interfaces and the other implements pgx interfaces. It
is recommended that this situation be avoided by implementing pgx interfaces on the renamed type.
Composite types and row values
Row values and composite types are represented as pgtype.Record (https://pkg.go.dev/github.com/jackc/pgtype?tab=doc#Record).
It is possible to get values of your custom type by implementing DecodeBinary interface. Decoding into
pgtype.Record first can simplify process by avoiding dealing with raw protocol directly.
For example:
type MyType struct {
a int // NULL will cause decoding error
b *string // there can be NULL in this position in SQL
}
func (t *MyType) DecodeBinary(ci *pgtype.ConnInfo, src []byte) error {
r := pgtype.Record{
Fields: []pgtype.Value{&pgtype.Int4{}, &pgtype.Text{}},
}
if err := r.DecodeBinary(ci, src); err != nil {
return err
}
if r.Status != pgtype.Present {
return errors.New("BUG: decoding should not be called on NULL value")
}
a := r.Fields[0].(*pgtype.Int4)
b := r.Fields[1].(*pgtype.Text)
// type compatibility is checked by AssignTo
// only lossless assignments will succeed
if err := a.AssignTo(&t.a); err != nil {
return err
}
// AssignTo also deals with null value handling
if err := b.AssignTo(&t.b); err != nil {
return err
}
return nil
}
result := MyType{}
err := conn.QueryRow(context.Background(), "select row(1, 'foo'::text)", pgx.QueryResultFormats{pgx.BinaryFormatCode}).Scan(&r)
Raw Bytes Mapping
[]byte passed as arguments to Query, QueryRow, and Exec are passed unmodified to PostgreSQL.
pgx uses the pgtype package to converting Go values to and from PostgreSQL values. It supports many PostgreSQL types
directly and is customizable and extendable. User defined data types such as enums, domains, and composite types may
require type registration. See that package's documentation for details.
Transactions
@ -233,7 +99,19 @@ Transactions are started by calling Begin.
The Tx returned from Begin also implements the Begin method. This can be used to implement pseudo nested transactions.
These are internally implemented with savepoints.
Use BeginTx to control the transaction mode.
Use BeginTx to control the transaction mode. BeginTx also can be used to ensure a new transaction is created instead of
a pseudo nested transaction.
BeginFunc and BeginTxFunc are functions that begin a transaction, execute a function, and commit or rollback the
transaction depending on the return value of the function. These can be simpler and less error prone to use.
err = pgx.BeginFunc(context.Background(), conn, func(tx pgx.Tx) error {
_, err := tx.Exec(context.Background(), "insert into foo(id) values (1)")
return err
})
if err != nil {
return err
}
Prepared Statements
@ -245,10 +123,10 @@ for information on how to customize or disable the statement cache.
Copy Protocol
Use CopyFrom to efficiently insert multiple rows at a time using the PostgreSQL copy protocol. CopyFrom accepts a
CopyFromSource interface. If the data is already in a [][]interface{} use CopyFromRows to wrap it in a CopyFromSource
interface. Or implement CopyFromSource to avoid buffering the entire data set in memory.
CopyFromSource interface. If the data is already in a [][]any use CopyFromRows to wrap it in a CopyFromSource interface.
Or implement CopyFromSource to avoid buffering the entire data set in memory.
rows := [][]interface{}{
rows := [][]any{
{"John", "Smith", int32(36)},
{"Jane", "Doe", int32(29)},
}
@ -260,37 +138,57 @@ interface. Or implement CopyFromSource to avoid buffering the entire data set in
pgx.CopyFromRows(rows),
)
When you already have a typed array using CopyFromSlice can be more convenient.
rows := []User{
{"John", "Smith", 36},
{"Jane", "Doe", 29},
}
copyCount, err := conn.CopyFrom(
context.Background(),
pgx.Identifier{"people"},
[]string{"first_name", "last_name", "age"},
pgx.CopyFromSlice(len(rows), func(i int) ([]any, error) {
return []any{rows[i].FirstName, rows[i].LastName, rows[i].Age}, nil
}),
)
CopyFrom can be faster than an insert with as few as 5 rows.
Listen and Notify
pgx can listen to the PostgreSQL notification system with the `Conn.WaitForNotification` method. It blocks until a
context is received or the context is canceled.
notification is received or the context is canceled.
_, err := conn.Exec(context.Background(), "listen channelname")
if err != nil {
return nil
return err
}
if notification, err := conn.WaitForNotification(context.Background()); err != nil {
// do something with notification
notification, err := conn.WaitForNotification(context.Background())
if err != nil {
return err
}
// do something with notification
Logging
Tracing and Logging
pgx defines a simple logger interface. Connections optionally accept a logger that satisfies this interface. Set
LogLevel to control logging verbosity. Adapters for github.com/inconshreveable/log15, github.com/sirupsen/logrus,
go.uber.org/zap, github.com/rs/zerolog, and the testing log are provided in the log directory.
pgx supports tracing by setting ConnConfig.Tracer. To combine several tracers you can use the multitracer.Tracer.
In addition, the tracelog package provides the TraceLog type which lets a traditional logger act as a Tracer.
For debug tracing of the actual PostgreSQL wire protocol messages see github.com/jackc/pgx/v5/pgproto3.
Lower Level PostgreSQL Functionality
pgx is implemented on top of github.com/jackc/pgconn a lower level PostgreSQL driver. The Conn.PgConn() method can be
used to access this lower layer.
github.com/jackc/pgx/v5/pgconn contains a lower level PostgreSQL driver roughly at the level of libpq. pgx.Conn is
implemented on top of pgconn. The Conn.PgConn() method can be used to access this lower layer.
PgBouncer
pgx is compatible with PgBouncer in two modes. One is when the connection has a statement cache in "describe" mode. The
other is when the connection is using the simple protocol. This can be set with the PreferSimpleProtocol config option.
By default pgx automatically uses prepared statements. Prepared statements are incompatible with PgBouncer. This can be
disabled by setting a different QueryExecMode in ConnConfig.DefaultQueryExecMode.
*/
package pgx

View File

@ -1,107 +0,0 @@
package pgx_test
import (
"context"
"fmt"
"os"
"regexp"
"strconv"
"github.com/jackc/pgtype"
"github.com/jackc/pgx/v4"
errors "golang.org/x/xerrors"
)
var pointRegexp *regexp.Regexp = regexp.MustCompile(`^\((.*),(.*)\)$`)
// Point represents a point that may be null.
type Point struct {
X, Y float64 // Coordinates of point
Status pgtype.Status
}
func (dst *Point) Set(src interface{}) error {
return errors.Errorf("cannot convert %v to Point", src)
}
func (dst *Point) Get() interface{} {
switch dst.Status {
case pgtype.Present:
return dst
case pgtype.Null:
return nil
default:
return dst.Status
}
}
func (src *Point) AssignTo(dst interface{}) error {
return errors.Errorf("cannot assign %v to %T", src, dst)
}
func (dst *Point) DecodeText(ci *pgtype.ConnInfo, src []byte) error {
if src == nil {
*dst = Point{Status: pgtype.Null}
return nil
}
s := string(src)
match := pointRegexp.FindStringSubmatch(s)
if match == nil {
return errors.Errorf("Received invalid point: %v", s)
}
x, err := strconv.ParseFloat(match[1], 64)
if err != nil {
return errors.Errorf("Received invalid point: %v", s)
}
y, err := strconv.ParseFloat(match[2], 64)
if err != nil {
return errors.Errorf("Received invalid point: %v", s)
}
*dst = Point{X: x, Y: y, Status: pgtype.Present}
return nil
}
func (src *Point) String() string {
if src.Status == pgtype.Null {
return "null point"
}
return fmt.Sprintf("%.1f, %.1f", src.X, src.Y)
}
func Example_CustomType() {
conn, err := pgx.Connect(context.Background(), os.Getenv("PGX_TEST_DATABASE"))
if err != nil {
fmt.Printf("Unable to establish connection: %v", err)
return
}
// Override registered handler for point
conn.ConnInfo().RegisterDataType(pgtype.DataType{
Value: &Point{},
Name: "point",
OID: 600,
})
p := &Point{}
err = conn.QueryRow(context.Background(), "select null::point").Scan(p)
if err != nil {
fmt.Println(err)
return
}
fmt.Println(p)
err = conn.QueryRow(context.Background(), "select point(1.5,2.5)").Scan(p)
if err != nil {
fmt.Println(err)
return
}
fmt.Println(p)
// Output:
// null point
// 1.5, 2.5
}

View File

@ -6,14 +6,14 @@ import (
"fmt"
"os"
"github.com/jackc/pgx/v4/pgxpool"
"github.com/jackc/pgx/v5/pgxpool"
)
var pool *pgxpool.Pool
func main() {
var err error
pool, err = pgxpool.Connect(context.Background(), os.Getenv("DATABASE_URL"))
pool, err = pgxpool.New(context.Background(), os.Getenv("DATABASE_URL"))
if err != nil {
fmt.Fprintln(os.Stderr, "Unable to connect to database:", err)
os.Exit(1)

View File

@ -6,7 +6,7 @@ import (
"os"
"strconv"
"github.com/jackc/pgx/v4"
"github.com/jackc/pgx/v5"
)
var conn *pgx.Conn

View File

@ -2,14 +2,13 @@ package main
import (
"context"
"io/ioutil"
"io"
"log"
"net/http"
"os"
"github.com/jackc/pgx/v4"
"github.com/jackc/pgx/v4/log/log15adapter"
"github.com/jackc/pgx/v4/pgxpool"
log "gopkg.in/inconshreveable/log15.v2"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgxpool"
)
var db *pgxpool.Pool
@ -30,7 +29,7 @@ func getUrlHandler(w http.ResponseWriter, req *http.Request) {
func putUrlHandler(w http.ResponseWriter, req *http.Request) {
id := req.URL.Path
var url string
if body, err := ioutil.ReadAll(req.Body); err == nil {
if body, err := io.ReadAll(req.Body); err == nil {
url = string(body)
} else {
http.Error(w, "Internal server error", http.StatusInternalServerError)
@ -71,28 +70,21 @@ func urlHandler(w http.ResponseWriter, req *http.Request) {
}
func main() {
logger := log15adapter.NewLogger(log.New("module", "pgx"))
poolConfig, err := pgxpool.ParseConfig(os.Getenv("DATABASE_URL"))
if err != nil {
log.Crit("Unable to parse DATABASE_URL", "error", err)
os.Exit(1)
log.Fatalln("Unable to parse DATABASE_URL:", err)
}
poolConfig.ConnConfig.Logger = logger
db, err = pgxpool.ConnectConfig(context.Background(), poolConfig)
db, err = pgxpool.NewWithConfig(context.Background(), poolConfig)
if err != nil {
log.Crit("Unable to create connection pool", "error", err)
os.Exit(1)
log.Fatalln("Unable to create connection pool:", err)
}
http.HandleFunc("/", urlHandler)
log.Info("Starting URL shortener on localhost:8080")
log.Println("Starting URL shortener on localhost:8080")
err = http.ListenAndServe("localhost:8080", nil)
if err != nil {
log.Crit("Unable to start web server", "error", err)
os.Exit(1)
log.Fatalln("Unable to start web server:", err)
}
}

View File

@ -1,168 +1,146 @@
package pgx
import (
"database/sql/driver"
"fmt"
"reflect"
"github.com/jackc/pgtype"
"github.com/jackc/pgx/v5/pgconn"
"github.com/jackc/pgx/v5/pgtype"
)
type extendedQueryBuilder struct {
paramValues [][]byte
// ExtendedQueryBuilder is used to choose the parameter formats, to format the parameters and to choose the result
// formats for an extended query.
type ExtendedQueryBuilder struct {
ParamValues [][]byte
paramValueBytes []byte
paramFormats []int16
resultFormats []int16
resetCount int
ParamFormats []int16
ResultFormats []int16
}
func (eqb *extendedQueryBuilder) AppendParam(ci *pgtype.ConnInfo, oid uint32, arg interface{}) error {
f := chooseParameterFormatCode(ci, oid, arg)
eqb.paramFormats = append(eqb.paramFormats, f)
// Build sets ParamValues, ParamFormats, and ResultFormats for use with *PgConn.ExecParams or *PgConn.ExecPrepared. If
// sd is nil then QueryExecModeExec behavior will be used.
func (eqb *ExtendedQueryBuilder) Build(m *pgtype.Map, sd *pgconn.StatementDescription, args []any) error {
eqb.reset()
v, err := eqb.encodeExtendedParamValue(ci, oid, f, arg)
if err != nil {
return err
if sd == nil {
for i := range args {
err := eqb.appendParam(m, 0, pgtype.TextFormatCode, args[i])
if err != nil {
err = fmt.Errorf("failed to encode args[%d]: %w", i, err)
return err
}
}
return nil
}
if len(sd.ParamOIDs) != len(args) {
return fmt.Errorf("mismatched param and argument count")
}
for i := range args {
err := eqb.appendParam(m, sd.ParamOIDs[i], -1, args[i])
if err != nil {
err = fmt.Errorf("failed to encode args[%d]: %w", i, err)
return err
}
}
for i := range sd.Fields {
eqb.appendResultFormat(m.FormatCodeForOID(sd.Fields[i].DataTypeOID))
}
eqb.paramValues = append(eqb.paramValues, v)
return nil
}
func (eqb *extendedQueryBuilder) AppendResultFormat(f int16) {
eqb.resultFormats = append(eqb.resultFormats, f)
// appendParam appends a parameter to the query. format may be -1 to automatically choose the format. If arg is nil it
// must be an untyped nil.
func (eqb *ExtendedQueryBuilder) appendParam(m *pgtype.Map, oid uint32, format int16, arg any) error {
if format == -1 {
preferredFormat := eqb.chooseParameterFormatCode(m, oid, arg)
preferredErr := eqb.appendParam(m, oid, preferredFormat, arg)
if preferredErr == nil {
return nil
}
var otherFormat int16
if preferredFormat == TextFormatCode {
otherFormat = BinaryFormatCode
} else {
otherFormat = TextFormatCode
}
otherErr := eqb.appendParam(m, oid, otherFormat, arg)
if otherErr == nil {
return nil
}
return preferredErr // return the error from the preferred format
}
v, err := eqb.encodeExtendedParamValue(m, oid, format, arg)
if err != nil {
return err
}
eqb.ParamFormats = append(eqb.ParamFormats, format)
eqb.ParamValues = append(eqb.ParamValues, v)
return nil
}
func (eqb *extendedQueryBuilder) Reset() {
eqb.paramValues = eqb.paramValues[0:0]
// appendResultFormat appends a result format to the query.
func (eqb *ExtendedQueryBuilder) appendResultFormat(format int16) {
eqb.ResultFormats = append(eqb.ResultFormats, format)
}
// reset readies eqb to build another query.
func (eqb *ExtendedQueryBuilder) reset() {
eqb.ParamValues = eqb.ParamValues[0:0]
eqb.paramValueBytes = eqb.paramValueBytes[0:0]
eqb.paramFormats = eqb.paramFormats[0:0]
eqb.resultFormats = eqb.resultFormats[0:0]
eqb.ParamFormats = eqb.ParamFormats[0:0]
eqb.ResultFormats = eqb.ResultFormats[0:0]
eqb.resetCount++
// Every so often shrink our reserved memory if it is abnormally high
if eqb.resetCount%128 == 0 {
if cap(eqb.paramValues) > 64 {
eqb.paramValues = make([][]byte, 0, cap(eqb.paramValues)/2)
}
if cap(eqb.paramValueBytes) > 256 {
eqb.paramValueBytes = make([]byte, 0, cap(eqb.paramValueBytes)/2)
}
if cap(eqb.paramFormats) > 64 {
eqb.paramFormats = make([]int16, 0, cap(eqb.paramFormats)/2)
}
if cap(eqb.resultFormats) > 64 {
eqb.resultFormats = make([]int16, 0, cap(eqb.resultFormats)/2)
}
if cap(eqb.ParamValues) > 64 {
eqb.ParamValues = make([][]byte, 0, 64)
}
if cap(eqb.paramValueBytes) > 256 {
eqb.paramValueBytes = make([]byte, 0, 256)
}
if cap(eqb.ParamFormats) > 64 {
eqb.ParamFormats = make([]int16, 0, 64)
}
if cap(eqb.ResultFormats) > 64 {
eqb.ResultFormats = make([]int16, 0, 64)
}
}
func (eqb *extendedQueryBuilder) encodeExtendedParamValue(ci *pgtype.ConnInfo, oid uint32, formatCode int16, arg interface{}) ([]byte, error) {
if arg == nil {
return nil, nil
}
refVal := reflect.ValueOf(arg)
argIsPtr := refVal.Kind() == reflect.Ptr
if argIsPtr && refVal.IsNil() {
return nil, nil
}
func (eqb *ExtendedQueryBuilder) encodeExtendedParamValue(m *pgtype.Map, oid uint32, formatCode int16, arg any) ([]byte, error) {
if eqb.paramValueBytes == nil {
eqb.paramValueBytes = make([]byte, 0, 128)
}
var err error
var buf []byte
pos := len(eqb.paramValueBytes)
if arg, ok := arg.(string); ok {
return []byte(arg), nil
buf, err := m.Encode(oid, formatCode, arg, eqb.paramValueBytes)
if err != nil {
return nil, err
}
if formatCode == TextFormatCode {
if arg, ok := arg.(pgtype.TextEncoder); ok {
buf, err = arg.EncodeText(ci, eqb.paramValueBytes)
if err != nil {
return nil, err
}
if buf == nil {
return nil, nil
}
eqb.paramValueBytes = buf
return eqb.paramValueBytes[pos:], nil
}
} else if formatCode == BinaryFormatCode {
if arg, ok := arg.(pgtype.BinaryEncoder); ok {
buf, err = arg.EncodeBinary(ci, eqb.paramValueBytes)
if err != nil {
return nil, err
}
if buf == nil {
return nil, nil
}
eqb.paramValueBytes = buf
return eqb.paramValueBytes[pos:], nil
}
if buf == nil {
return nil, nil
}
if argIsPtr {
// We have already checked that arg is not pointing to nil,
// so it is safe to dereference here.
arg = refVal.Elem().Interface()
return eqb.encodeExtendedParamValue(ci, oid, formatCode, arg)
}
if dt, ok := ci.DataTypeForOID(oid); ok {
value := dt.Value
err := value.Set(arg)
if err != nil {
{
if arg, ok := arg.(driver.Valuer); ok {
v, err := callValuerValue(arg)
if err != nil {
return nil, err
}
return eqb.encodeExtendedParamValue(ci, oid, formatCode, v)
}
}
return nil, err
}
return eqb.encodeExtendedParamValue(ci, oid, formatCode, value)
}
// There is no data type registered for the destination OID, but maybe there is data type registered for the arg
// type. If so use it's text encoder (if available).
if dt, ok := ci.DataTypeForValue(arg); ok {
value := dt.Value
if textEncoder, ok := value.(pgtype.TextEncoder); ok {
err := value.Set(arg)
if err != nil {
return nil, err
}
buf, err = textEncoder.EncodeText(ci, eqb.paramValueBytes)
if err != nil {
return nil, err
}
if buf == nil {
return nil, nil
}
eqb.paramValueBytes = buf
return eqb.paramValueBytes[pos:], nil
}
}
if strippedArg, ok := stripNamedType(&refVal); ok {
return eqb.encodeExtendedParamValue(ci, oid, formatCode, strippedArg)
}
return nil, SerializationError(fmt.Sprintf("Cannot encode %T into oid %v - %T must implement Encoder or be converted to a string", arg, oid, arg))
eqb.paramValueBytes = buf
return eqb.paramValueBytes[pos:], nil
}
// chooseParameterFormatCode determines the correct format code for an
// argument to a prepared statement. It defaults to TextFormatCode if no
// determination can be made.
func (eqb *ExtendedQueryBuilder) chooseParameterFormatCode(m *pgtype.Map, oid uint32, arg any) int16 {
switch arg.(type) {
case string, *string:
return TextFormatCode
}
return m.FormatCodeForOID(oid)
}

34
go.mod
View File

@ -1,21 +1,21 @@
module github.com/jackc/pgx/v4
module github.com/jackc/pgx/v5
go 1.12
go 1.23.0
require (
github.com/cockroachdb/apd v1.1.0
github.com/gofrs/uuid v3.2.0+incompatible
github.com/jackc/pgconn v1.7.2
github.com/jackc/pgio v1.0.0
github.com/jackc/pgproto3/v2 v2.0.6
github.com/jackc/pgtype v1.6.1
github.com/jackc/puddle v1.1.2
github.com/mattn/go-colorable v0.1.6 // indirect
github.com/rs/zerolog v1.15.0
github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc
github.com/sirupsen/logrus v1.4.2
github.com/stretchr/testify v1.5.1
go.uber.org/zap v1.10.0
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543
gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec
github.com/jackc/pgpassfile v1.0.0
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761
github.com/jackc/puddle/v2 v2.2.2
github.com/stretchr/testify v1.8.1
golang.org/x/crypto v0.37.0
golang.org/x/sync v0.13.0
golang.org/x/text v0.24.0
)
require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/kr/pretty v0.3.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

289
go.sum
View File

@ -1,274 +1,45 @@
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE=
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0=
github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
github.com/jackc/chunkreader/v2 v2.0.0 h1:DUwgMQuuPnS0rhMXenUtZpqZqrR/30NWY+qQvTpSvEs=
github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8=
github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3 h1:ZFYpB74Kq8xE9gmfxCmXD6QxZ27ja+j3HwGFc+YurhQ=
github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA=
github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb h1:d6GP9szHvXVopAOAnZ7WhRnF3Xdxrylmm/9jnfmW4Ag=
github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE=
github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s=
github.com/jackc/pgconn v1.4.0/go.mod h1:Y2O3ZDF0q4mMacyWV3AstPJpeHXWGEetiFttmq5lahk=
github.com/jackc/pgconn v1.5.0 h1:oFSOilzIZkyg787M1fEmyMfOUUvwj0daqYMfaWwNL4o=
github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI=
github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853 h1:LRlrfJW9S99uiOCY8F/qLvX1yEY1TVAaCBHFb79yHBQ=
github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI=
github.com/jackc/pgconn v1.6.0 h1:8FiBxMxS/Z0eQ9BeE1HhL6pzPL1R5x+ZuQ+T86WgZ4I=
github.com/jackc/pgconn v1.6.0/go.mod h1:yeseQo4xhQbgyJs2c87RAXOH2i624N0Fh1KSPJya7qo=
github.com/jackc/pgconn v1.6.1 h1:lwofaXKPbIx6qEaK8mNm7uZuOwxHw+PnAFGDsDFpkRI=
github.com/jackc/pgconn v1.6.1/go.mod h1:g8mKMqmSUO6AzAvha7vy07g1rbGOlc7iF0nU0ei83hc=
github.com/jackc/pgconn v1.6.2 h1:ifRs/oHByR6NfEXfusvjoTqX/KcSvDYNFASoK/wXKfs=
github.com/jackc/pgconn v1.6.2/go.mod h1:w2pne1C2tZgP+TvjqLpOigGzNqjBgQW9dUw/4Chex78=
github.com/jackc/pgconn v1.6.3 h1:4Ks3RKvSvKPolXZsnLQTDAsokDhgID14Cv4ehECmzlY=
github.com/jackc/pgconn v1.6.3/go.mod h1:w2pne1C2tZgP+TvjqLpOigGzNqjBgQW9dUw/4Chex78=
github.com/jackc/pgconn v1.6.4 h1:S7T6cx5o2OqmxdHaXLH1ZeD1SbI8jBznyYE9Ec0RCQ8=
github.com/jackc/pgconn v1.6.4/go.mod h1:w2pne1C2tZgP+TvjqLpOigGzNqjBgQW9dUw/4Chex78=
github.com/jackc/pgconn v1.6.5-0.20200821030021-3eb5432c4738 h1:t/IRFEw2da5v6DroUIYPbEIDWxGGyvVLItoO7gOUBZM=
github.com/jackc/pgconn v1.6.5-0.20200821030021-3eb5432c4738/go.mod h1:gm9GeeZiC+Ja7JV4fB/MNDeaOqsCrzFiZlLVhAompxk=
github.com/jackc/pgconn v1.6.5-0.20200821030840-fdfc783345f6 h1:7f1RmJO6KZI4cskLrNHBd5CCLmLpIQ0BD75hsorvdz8=
github.com/jackc/pgconn v1.6.5-0.20200821030840-fdfc783345f6/go.mod h1:gm9GeeZiC+Ja7JV4fB/MNDeaOqsCrzFiZlLVhAompxk=
github.com/jackc/pgconn v1.6.5-0.20200905181414-0d4f029683fc h1:9ThyBXKdyBFN2Y1NSCPGCA0kdWCNpd9u4SKWwtr6GfU=
github.com/jackc/pgconn v1.6.5-0.20200905181414-0d4f029683fc/go.mod h1:gm9GeeZiC+Ja7JV4fB/MNDeaOqsCrzFiZlLVhAompxk=
github.com/jackc/pgconn v1.7.0 h1:pwjzcYyfmz/HQOQlENvG1OcDqauTGaqlVahq934F0/U=
github.com/jackc/pgconn v1.7.0/go.mod h1:sF/lPpNEMEOp+IYhyQGdAvrG20gWf6A1tKlr0v7JMeA=
github.com/jackc/pgconn v1.7.1 h1:Ii3hORkg9yTX+8etl2LtfFnL+YzmnR6VSLeTflQBkaQ=
github.com/jackc/pgconn v1.7.1/go.mod h1:sF/lPpNEMEOp+IYhyQGdAvrG20gWf6A1tKlr0v7JMeA=
github.com/jackc/pgconn v1.7.2 h1:195tt17jkjy+FrFlY0pgyrul5kRLb7BGXY3JTrNxeXU=
github.com/jackc/pgconn v1.7.2/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o=
github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=
github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=
github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2 h1:JVX6jT/XfzNqIjye4717ITLaNwV9mWbJx0dLCpcRzdA=
github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A=
github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78=
github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db h1:UpaKn/gYxzH6/zWyRQH1S260zvKqwJJ4h8+Kf09ooh0=
github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA=
github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711 h1:vZp4bYotXUkFx7JUSm7U8KV/7Q0AOdrQxxBBj0ZmZsg=
github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg=
github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29 h1:f2HwOeI1NIJyNFVVeh1gUISyt57iw/fmI/IXJfH3ATE=
github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
github.com/jackc/pgproto3/v2 v2.0.1 h1:Rdjp4NFjwHnEslx2b66FfCI2S0LhO4itac3hXz6WX9M=
github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
github.com/jackc/pgproto3/v2 v2.0.2 h1:q1Hsy66zh4vuNsajBUF2PNqfAMMfxU5mk594lPE9vjY=
github.com/jackc/pgproto3/v2 v2.0.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
github.com/jackc/pgproto3/v2 v2.0.4 h1:RHkX5ZUD9bl/kn0f9dYUWs1N7Nwvo1wwUYvKiR26Zco=
github.com/jackc/pgproto3/v2 v2.0.4/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
github.com/jackc/pgproto3/v2 v2.0.5 h1:NUbEWPmCQZbMmYlTjVoNPhc0CfnYyz2bfUAh6A5ZVJM=
github.com/jackc/pgproto3/v2 v2.0.5/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
github.com/jackc/pgproto3/v2 v2.0.6 h1:b1105ZGEMFe7aCvrT1Cca3VoVb4ZFMaFJLJcg/3zD+8=
github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8 h1:Q3tB+ExeflWUW7AFcAhXqk40s9mnNYLk1nOkKNZ5GnU=
github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg=
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0 h1:mX93v750WifMD1htCt7vqeolcnpaG1gz8URVGjSzcUM=
github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg=
github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90 h1:aN5Vlwa2Q3QvxHDtZNi1x+GYkQyketBadMjtiug7AbM=
github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc=
github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59 h1:xOamcCJ9MFJTxR5bvw3ZXmiP8evQMohdt2VJ57C0W8Q=
github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
github.com/jackc/pgtype v1.2.0/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0=
github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a h1:XUNeoL8E15IgWouQ8gfA6EPHOfTqVetdxBhAKMYKNGo=
github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a/go.mod h1:vaogEUkALtxZMCH411K+tKzNpwzCKU+AnPzBKZ+I+Po=
github.com/jackc/pgtype v1.3.1-0.20200513130519-238967ec4e4c h1:dkoQjaMKaLf/zTPpbgbZjnU1qN4bpDY+uLBSyiKh/ns=
github.com/jackc/pgtype v1.3.1-0.20200513130519-238967ec4e4c/go.mod h1:f3c+S645fwV5ZqwPvLWZmmnAfPkmaTeLnXs0byan+aA=
github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c h1:nXT9KGu1TBy57S0XcCGkTUgqOrvj3jY7Yb+kw5Q2HVc=
github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/VzJ9/lxTO5R5sf80p0DiucVtN7ZxvaC4GmQ=
github.com/jackc/pgtype v1.4.0 h1:pHQfb4jh9iKqHyxPthq1fr+0HwSNIl3btYPbw2m2lbM=
github.com/jackc/pgtype v1.4.0/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig=
github.com/jackc/pgtype v1.4.1 h1:8PRKqCS9Nt2FQbNegoEAIlY6r/DTP2aaXyh5bAEn89g=
github.com/jackc/pgtype v1.4.1/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig=
github.com/jackc/pgtype v1.4.2 h1:t+6LWm5eWPLX1H5Se702JSBcirq6uWa4jiG4wV1rAWY=
github.com/jackc/pgtype v1.4.2/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig=
github.com/jackc/pgtype v1.4.3-0.20200905161353-e7d2b057a716 h1:DrP52jA32liWkjCF/g3rYC1QjnRh6kvyXaZSevAtlqE=
github.com/jackc/pgtype v1.4.3-0.20200905161353-e7d2b057a716/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig=
github.com/jackc/pgtype v1.5.0 h1:jzBqRk2HFG2CV4AIwgCI2PwTgm6UUoCAK2ofHHRirtc=
github.com/jackc/pgtype v1.5.0/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig=
github.com/jackc/pgtype v1.6.0 h1:jNYK10ttF4cwp3pDk98juNUsU7T9eSlzN11bWR7McrI=
github.com/jackc/pgtype v1.6.0/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig=
github.com/jackc/pgtype v1.6.1 h1:CAtFD7TS95KrxRAh3bidgLwva48WYxk8YkbHZsSWfbI=
github.com/jackc/pgtype v1.6.1/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig=
github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
github.com/jackc/pgx/v4 v4.5.0/go.mod h1:EpAKPLdnTorwmPUUsqrPxy5fphV18j9q3wrfRXgo+kA=
github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9/go.mod h1:t3/cdRQl6fOLDxqtlyhe9UWgfIi9R8+8v8GKV5TRA/o=
github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9sW1JMiNn0kdYBaLelIhw7Pg4qd+Vk6tw7Hg=
github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b h1:cIcUpcEP55F/QuZWEtXyqHoWk+IV4TBiLjtBkeq/Q1c=
github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9 h1:KLBBPU++1T3DHtm1B1QaIHy80Vhu0wNMErIFCNgAL8Y=
github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v1.1.1 h1:PJAw7H/9hoWC4Kf3J8iNmL1SwA6E8vfsLqBiL+F6CtI=
github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v1.1.2-0.20200821025810-91d0159cc97a h1:ec2LCBkfN1pOq0PhLRH/QitjSXr9s2dnh0gOFyohxHM=
github.com/jackc/puddle v1.1.2-0.20200821025810-91d0159cc97a/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v1.1.2 h1:mpQEXihFnWGDy6X98EOTh81JYuxn7txby8ilJ3iIPGM=
github.com/jackc/puddle v1.1.2/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.1.0 h1:/5u4a+KGJptBRqGzPvYQL9p0d/tPR4S31+Tnzj9lEO4=
github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU=
github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg=
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.7 h1:UvyT9uN+3r7yLEYSlJsbQGdsaB/a0DlgWP3pql6iwOc=
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.9 h1:d5US/mDsogSGW37IV293h//ZFaeajb69h+EHFsv2xGg=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
github.com/rs/zerolog v1.13.0 h1:hSNcYHyxDWycfePW7pUI8swuFkcSMPKh3E63Pokg1Hk=
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
github.com/rs/zerolog v1.15.0 h1:uPRuwkWF4J6fGsJ2R0Gn2jB1EQiav9k3S6CSdygQJXY=
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE=
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc h1:jUIKcSPO9MoMJBbEoyE/RJoE8vz7Mb8AjvifMMwSyvY=
github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a h1:Igim7XhdOpBnWPuYJ70XcNpq8q3BCACtVgNfoJxOV7g=
golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59 h1:3zb4D3T4G8jdExgVU/95+vQXfpEPiMdCaZgmGVxjNHM=
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e h1:nFYrTHrdrAOpShe27kaFHjsqYSEQ0KWqdWLu3xuZJts=
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ=
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs=
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373 h1:PPwnA7z1Pjf7XYaBP9GL1VAMZmcIWyFz7QCMSIIa3Bg=
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522 h1:bhOzK9QyoD0ogCnFro1m2mz41+Ib0oOhfJnBp5MR4K4=
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec h1:RlWgLqCMMIYYEVcAR5MDsuHlVkaIPDAF+5Dehzg8L5A=
gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@ -1,61 +0,0 @@
package pgx
import (
"database/sql/driver"
"reflect"
)
// This file contains code copied from the Go standard library due to the
// required function not being public.
// Copyright (c) 2009 The Go Authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// From database/sql/convert.go
var valuerReflectType = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
// callValuerValue returns vr.Value(), with one exception:
// If vr.Value is an auto-generated method on a pointer type and the
// pointer is nil, it would panic at runtime in the panicwrap
// method. Treat it like nil instead.
// Issue 8415.
//
// This is so people can implement driver.Value on value types and
// still use nil pointers to those types to mean nil/NULL, just like
// string/*string.
//
// This function is mirrored in the database/sql/driver package.
func callValuerValue(vr driver.Valuer) (v driver.Value, err error) {
if rv := reflect.ValueOf(vr); rv.Kind() == reflect.Ptr &&
rv.IsNil() &&
rv.Type().Elem().Implements(valuerReflectType) {
return nil, nil
}
return vr.Value()
}

View File

@ -2,52 +2,26 @@ package pgx_test
import (
"context"
"github.com/stretchr/testify/assert"
"os"
"testing"
"github.com/jackc/pgconn"
"github.com/jackc/pgx/v4"
"github.com/stretchr/testify/assert"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
"github.com/jackc/pgx/v5/pgxtest"
"github.com/stretchr/testify/require"
)
func testWithAndWithoutPreferSimpleProtocol(t *testing.T, f func(t *testing.T, conn *pgx.Conn)) {
t.Run("SimpleProto",
func(t *testing.T) {
config, err := pgx.ParseConfig(os.Getenv("PGX_TEST_DATABASE"))
require.NoError(t, err)
var defaultConnTestRunner pgxtest.ConnTestRunner
config.PreferSimpleProtocol = true
conn, err := pgx.ConnectConfig(context.Background(), config)
require.NoError(t, err)
defer func() {
err := conn.Close(context.Background())
require.NoError(t, err)
}()
f(t, conn)
ensureConnValid(t, conn)
},
)
t.Run("DefaultProto",
func(t *testing.T) {
config, err := pgx.ParseConfig(os.Getenv("PGX_TEST_DATABASE"))
require.NoError(t, err)
conn, err := pgx.ConnectConfig(context.Background(), config)
require.NoError(t, err)
defer func() {
err := conn.Close(context.Background())
require.NoError(t, err)
}()
f(t, conn)
ensureConnValid(t, conn)
},
)
func init() {
defaultConnTestRunner = pgxtest.DefaultConnTestRunner()
defaultConnTestRunner.CreateConfig = func(ctx context.Context, t testing.TB) *pgx.ConnConfig {
config, err := pgx.ParseConfig(os.Getenv("PGX_TEST_DATABASE"))
require.NoError(t, err)
return config
}
}
func mustConnectString(t testing.TB, connString string) *pgx.Conn {
@ -79,7 +53,7 @@ func closeConn(t testing.TB, conn *pgx.Conn) {
}
}
func mustExec(t testing.TB, conn *pgx.Conn, sql string, arguments ...interface{}) (commandTag pgconn.CommandTag) {
func mustExec(t testing.TB, conn *pgx.Conn, sql string, arguments ...any) (commandTag pgconn.CommandTag) {
var err error
if commandTag, err = conn.Exec(context.Background(), sql, arguments...); err != nil {
t.Fatalf("Exec unexpectedly failed with %v: %v", sql, err)
@ -88,7 +62,7 @@ func mustExec(t testing.TB, conn *pgx.Conn, sql string, arguments ...interface{}
}
// Do a simple query to ensure the connection is still usable
func ensureConnValid(t *testing.T, conn *pgx.Conn) {
func ensureConnValid(t testing.TB, conn *pgx.Conn) {
var sum, rowCount int32
rows, err := conn.Query(context.Background(), "select generate_series(1,$1)", 10)
@ -105,7 +79,7 @@ func ensureConnValid(t *testing.T, conn *pgx.Conn) {
}
if rows.Err() != nil {
t.Fatalf("conn.Query failed: %v", err)
t.Fatalf("conn.Query failed: %v", rows.Err())
}
if rowCount != 10 {
@ -124,13 +98,11 @@ func assertConfigsEqual(t *testing.T, expected, actual *pgx.ConnConfig, testName
return
}
assert.Equalf(t, expected.Logger, actual.Logger, "%s - Logger", testName)
assert.Equalf(t, expected.LogLevel, actual.LogLevel, "%s - LogLevel", testName)
assert.Equalf(t, expected.Tracer, actual.Tracer, "%s - Tracer", testName)
assert.Equalf(t, expected.ConnString(), actual.ConnString(), "%s - ConnString", testName)
// Can't test function equality, so just test that they are set or not.
assert.Equalf(t, expected.BuildStatementCache == nil, actual.BuildStatementCache == nil, "%s - BuildStatementCache", testName)
assert.Equalf(t, expected.PreferSimpleProtocol, actual.PreferSimpleProtocol, "%s - PreferSimpleProtocol", testName)
assert.Equalf(t, expected.StatementCacheCapacity, actual.StatementCacheCapacity, "%s - StatementCacheCapacity", testName)
assert.Equalf(t, expected.DescriptionCacheCapacity, actual.DescriptionCacheCapacity, "%s - DescriptionCacheCapacity", testName)
assert.Equalf(t, expected.DefaultQueryExecMode, actual.DefaultQueryExecMode, "%s - DefaultQueryExecMode", testName)
assert.Equalf(t, expected.Host, actual.Host, "%s - Host", testName)
assert.Equalf(t, expected.Database, actual.Database, "%s - Database", testName)
assert.Equalf(t, expected.Port, actual.Port, "%s - Port", testName)

View File

@ -0,0 +1,70 @@
// Package iobufpool implements a global segregated-fit pool of buffers for IO.
//
// It uses *[]byte instead of []byte to avoid the sync.Pool allocation with Put. Unfortunately, using a pointer to avoid
// an allocation is purposely not documented. https://github.com/golang/go/issues/16323
package iobufpool
import "sync"
const minPoolExpOf2 = 8
var pools [18]*sync.Pool
func init() {
for i := range pools {
bufLen := 1 << (minPoolExpOf2 + i)
pools[i] = &sync.Pool{
New: func() any {
buf := make([]byte, bufLen)
return &buf
},
}
}
}
// Get gets a []byte of len size with cap <= size*2.
func Get(size int) *[]byte {
i := getPoolIdx(size)
if i >= len(pools) {
buf := make([]byte, size)
return &buf
}
ptrBuf := (pools[i].Get().(*[]byte))
*ptrBuf = (*ptrBuf)[:size]
return ptrBuf
}
func getPoolIdx(size int) int {
size--
size >>= minPoolExpOf2
i := 0
for size > 0 {
size >>= 1
i++
}
return i
}
// Put returns buf to the pool.
func Put(buf *[]byte) {
i := putPoolIdx(cap(*buf))
if i < 0 {
return
}
pools[i].Put(buf)
}
func putPoolIdx(size int) int {
minPoolSize := 1 << minPoolExpOf2
for i := range pools {
if size == minPoolSize<<i {
return i
}
}
return -1
}

View File

@ -0,0 +1,36 @@
package iobufpool
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestPoolIdx(t *testing.T) {
tests := []struct {
size int
expected int
}{
{size: 0, expected: 0},
{size: 1, expected: 0},
{size: 255, expected: 0},
{size: 256, expected: 0},
{size: 257, expected: 1},
{size: 511, expected: 1},
{size: 512, expected: 1},
{size: 513, expected: 2},
{size: 1023, expected: 2},
{size: 1024, expected: 2},
{size: 1025, expected: 3},
{size: 2047, expected: 3},
{size: 2048, expected: 3},
{size: 2049, expected: 4},
{size: 8388607, expected: 15},
{size: 8388608, expected: 15},
{size: 8388609, expected: 16},
}
for _, tt := range tests {
idx := getPoolIdx(tt.size)
assert.Equalf(t, tt.expected, idx, "size: %d", tt.size)
}
}

View File

@ -0,0 +1,85 @@
package iobufpool_test
import (
"testing"
"github.com/jackc/pgx/v5/internal/iobufpool"
"github.com/stretchr/testify/assert"
)
func TestGetCap(t *testing.T) {
tests := []struct {
requestedLen int
expectedCap int
}{
{requestedLen: 0, expectedCap: 256},
{requestedLen: 128, expectedCap: 256},
{requestedLen: 255, expectedCap: 256},
{requestedLen: 256, expectedCap: 256},
{requestedLen: 257, expectedCap: 512},
{requestedLen: 511, expectedCap: 512},
{requestedLen: 512, expectedCap: 512},
{requestedLen: 513, expectedCap: 1024},
{requestedLen: 1023, expectedCap: 1024},
{requestedLen: 1024, expectedCap: 1024},
{requestedLen: 33554431, expectedCap: 33554432},
{requestedLen: 33554432, expectedCap: 33554432},
// Above 32 MiB skip the pool and allocate exactly the requested size.
{requestedLen: 33554433, expectedCap: 33554433},
}
for _, tt := range tests {
buf := iobufpool.Get(tt.requestedLen)
assert.Equalf(t, tt.requestedLen, len(*buf), "bad len for requestedLen: %d", len(*buf), tt.requestedLen)
assert.Equalf(t, tt.expectedCap, cap(*buf), "bad cap for requestedLen: %d", tt.requestedLen)
}
}
func TestPutHandlesWrongSizedBuffers(t *testing.T) {
for putBufSize := range []int{0, 1, 128, 250, 256, 257, 1023, 1024, 1025, 1 << 28} {
putBuf := make([]byte, putBufSize)
iobufpool.Put(&putBuf)
tests := []struct {
requestedLen int
expectedCap int
}{
{requestedLen: 0, expectedCap: 256},
{requestedLen: 128, expectedCap: 256},
{requestedLen: 255, expectedCap: 256},
{requestedLen: 256, expectedCap: 256},
{requestedLen: 257, expectedCap: 512},
{requestedLen: 511, expectedCap: 512},
{requestedLen: 512, expectedCap: 512},
{requestedLen: 513, expectedCap: 1024},
{requestedLen: 1023, expectedCap: 1024},
{requestedLen: 1024, expectedCap: 1024},
{requestedLen: 33554431, expectedCap: 33554432},
{requestedLen: 33554432, expectedCap: 33554432},
// Above 32 MiB skip the pool and allocate exactly the requested size.
{requestedLen: 33554433, expectedCap: 33554433},
}
for _, tt := range tests {
getBuf := iobufpool.Get(tt.requestedLen)
assert.Equalf(t, tt.requestedLen, len(*getBuf), "len(putBuf): %d, requestedLen: %d", len(putBuf), tt.requestedLen)
assert.Equalf(t, tt.expectedCap, cap(*getBuf), "cap(putBuf): %d, requestedLen: %d", cap(putBuf), tt.requestedLen)
}
}
}
func TestPutGetBufferReuse(t *testing.T) {
// There is no way to guarantee a buffer will be reused. It should be, but a GC between the Put and the Get will cause
// it not to be. So try many times.
for i := 0; i < 100000; i++ {
buf := iobufpool.Get(4)
(*buf)[0] = 1
iobufpool.Put(buf)
buf = iobufpool.Get(4)
if (*buf)[0] == 1 {
return
}
}
t.Error("buffer was never reused")
}

6
internal/pgio/README.md Normal file
View File

@ -0,0 +1,6 @@
# pgio
Package pgio is a low-level toolkit building messages in the PostgreSQL wire protocol.
pgio provides functions for appending integers to a []byte while doing byte
order conversion.

6
internal/pgio/doc.go Normal file
View File

@ -0,0 +1,6 @@
// Package pgio is a low-level toolkit building messages in the PostgreSQL wire protocol.
/*
pgio provides functions for appending integers to a []byte while doing byte
order conversion.
*/
package pgio

40
internal/pgio/write.go Normal file
View File

@ -0,0 +1,40 @@
package pgio
import "encoding/binary"
func AppendUint16(buf []byte, n uint16) []byte {
wp := len(buf)
buf = append(buf, 0, 0)
binary.BigEndian.PutUint16(buf[wp:], n)
return buf
}
func AppendUint32(buf []byte, n uint32) []byte {
wp := len(buf)
buf = append(buf, 0, 0, 0, 0)
binary.BigEndian.PutUint32(buf[wp:], n)
return buf
}
func AppendUint64(buf []byte, n uint64) []byte {
wp := len(buf)
buf = append(buf, 0, 0, 0, 0, 0, 0, 0, 0)
binary.BigEndian.PutUint64(buf[wp:], n)
return buf
}
func AppendInt16(buf []byte, n int16) []byte {
return AppendUint16(buf, uint16(n))
}
func AppendInt32(buf []byte, n int32) []byte {
return AppendUint32(buf, uint32(n))
}
func AppendInt64(buf []byte, n int64) []byte {
return AppendUint64(buf, uint64(n))
}
func SetInt32(buf []byte, n int32) {
binary.BigEndian.PutUint32(buf, uint32(n))
}

View File

@ -0,0 +1,78 @@
package pgio
import (
"reflect"
"testing"
)
func TestAppendUint16NilBuf(t *testing.T) {
buf := AppendUint16(nil, 1)
if !reflect.DeepEqual(buf, []byte{0, 1}) {
t.Errorf("AppendUint16(nil, 1) => %v, want %v", buf, []byte{0, 1})
}
}
func TestAppendUint16EmptyBuf(t *testing.T) {
buf := []byte{}
buf = AppendUint16(buf, 1)
if !reflect.DeepEqual(buf, []byte{0, 1}) {
t.Errorf("AppendUint16(nil, 1) => %v, want %v", buf, []byte{0, 1})
}
}
func TestAppendUint16BufWithCapacityDoesNotAllocate(t *testing.T) {
buf := make([]byte, 0, 4)
AppendUint16(buf, 1)
buf = buf[0:2]
if !reflect.DeepEqual(buf, []byte{0, 1}) {
t.Errorf("AppendUint16(nil, 1) => %v, want %v", buf, []byte{0, 1})
}
}
func TestAppendUint32NilBuf(t *testing.T) {
buf := AppendUint32(nil, 1)
if !reflect.DeepEqual(buf, []byte{0, 0, 0, 1}) {
t.Errorf("AppendUint32(nil, 1) => %v, want %v", buf, []byte{0, 0, 0, 1})
}
}
func TestAppendUint32EmptyBuf(t *testing.T) {
buf := []byte{}
buf = AppendUint32(buf, 1)
if !reflect.DeepEqual(buf, []byte{0, 0, 0, 1}) {
t.Errorf("AppendUint32(nil, 1) => %v, want %v", buf, []byte{0, 0, 0, 1})
}
}
func TestAppendUint32BufWithCapacityDoesNotAllocate(t *testing.T) {
buf := make([]byte, 0, 4)
AppendUint32(buf, 1)
buf = buf[0:4]
if !reflect.DeepEqual(buf, []byte{0, 0, 0, 1}) {
t.Errorf("AppendUint32(nil, 1) => %v, want %v", buf, []byte{0, 0, 0, 1})
}
}
func TestAppendUint64NilBuf(t *testing.T) {
buf := AppendUint64(nil, 1)
if !reflect.DeepEqual(buf, []byte{0, 0, 0, 0, 0, 0, 0, 1}) {
t.Errorf("AppendUint64(nil, 1) => %v, want %v", buf, []byte{0, 0, 0, 0, 0, 0, 0, 1})
}
}
func TestAppendUint64EmptyBuf(t *testing.T) {
buf := []byte{}
buf = AppendUint64(buf, 1)
if !reflect.DeepEqual(buf, []byte{0, 0, 0, 0, 0, 0, 0, 1}) {
t.Errorf("AppendUint64(nil, 1) => %v, want %v", buf, []byte{0, 0, 0, 0, 0, 0, 0, 1})
}
}
func TestAppendUint64BufWithCapacityDoesNotAllocate(t *testing.T) {
buf := make([]byte, 0, 8)
AppendUint64(buf, 1)
buf = buf[0:8]
if !reflect.DeepEqual(buf, []byte{0, 0, 0, 0, 0, 0, 0, 1}) {
t.Errorf("AppendUint64(nil, 1) => %v, want %v", buf, []byte{0, 0, 0, 0, 0, 0, 0, 1})
}
}

136
internal/pgmock/pgmock.go Normal file
View File

@ -0,0 +1,136 @@
// Package pgmock provides the ability to mock a PostgreSQL server.
package pgmock
import (
"fmt"
"io"
"reflect"
"github.com/jackc/pgx/v5/pgproto3"
)
type Step interface {
Step(*pgproto3.Backend) error
}
type Script struct {
Steps []Step
}
func (s *Script) Run(backend *pgproto3.Backend) error {
for _, step := range s.Steps {
err := step.Step(backend)
if err != nil {
return err
}
}
return nil
}
func (s *Script) Step(backend *pgproto3.Backend) error {
return s.Run(backend)
}
type expectMessageStep struct {
want pgproto3.FrontendMessage
any bool
}
func (e *expectMessageStep) Step(backend *pgproto3.Backend) error {
msg, err := backend.Receive()
if err != nil {
return err
}
if e.any && reflect.TypeOf(msg) == reflect.TypeOf(e.want) {
return nil
}
if !reflect.DeepEqual(msg, e.want) {
return fmt.Errorf("msg => %#v, e.want => %#v", msg, e.want)
}
return nil
}
type expectStartupMessageStep struct {
want *pgproto3.StartupMessage
any bool
}
func (e *expectStartupMessageStep) Step(backend *pgproto3.Backend) error {
msg, err := backend.ReceiveStartupMessage()
if err != nil {
return err
}
if e.any {
return nil
}
if !reflect.DeepEqual(msg, e.want) {
return fmt.Errorf("msg => %#v, e.want => %#v", msg, e.want)
}
return nil
}
func ExpectMessage(want pgproto3.FrontendMessage) Step {
return expectMessage(want, false)
}
func ExpectAnyMessage(want pgproto3.FrontendMessage) Step {
return expectMessage(want, true)
}
func expectMessage(want pgproto3.FrontendMessage, any bool) Step {
if want, ok := want.(*pgproto3.StartupMessage); ok {
return &expectStartupMessageStep{want: want, any: any}
}
return &expectMessageStep{want: want, any: any}
}
type sendMessageStep struct {
msg pgproto3.BackendMessage
}
func (e *sendMessageStep) Step(backend *pgproto3.Backend) error {
backend.Send(e.msg)
return backend.Flush()
}
func SendMessage(msg pgproto3.BackendMessage) Step {
return &sendMessageStep{msg: msg}
}
type waitForCloseMessageStep struct{}
func (e *waitForCloseMessageStep) Step(backend *pgproto3.Backend) error {
for {
msg, err := backend.Receive()
if err == io.EOF {
return nil
} else if err != nil {
return err
}
if _, ok := msg.(*pgproto3.Terminate); ok {
return nil
}
}
}
func WaitForClose() Step {
return &waitForCloseMessageStep{}
}
func AcceptUnauthenticatedConnRequestSteps() []Step {
return []Step{
ExpectAnyMessage(&pgproto3.StartupMessage{ProtocolVersion: pgproto3.ProtocolVersionNumber, Parameters: map[string]string{}}),
SendMessage(&pgproto3.AuthenticationOk{}),
SendMessage(&pgproto3.BackendKeyData{ProcessID: 0, SecretKey: 0}),
SendMessage(&pgproto3.ReadyForQuery{TxStatus: 'I'}),
}
}

View File

@ -0,0 +1,91 @@
package pgmock_test
import (
"context"
"fmt"
"net"
"strings"
"testing"
"time"
"github.com/jackc/pgx/v5/internal/pgmock"
"github.com/jackc/pgx/v5/pgconn"
"github.com/jackc/pgx/v5/pgproto3"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestScript(t *testing.T) {
script := &pgmock.Script{
Steps: pgmock.AcceptUnauthenticatedConnRequestSteps(),
}
script.Steps = append(script.Steps, pgmock.ExpectMessage(&pgproto3.Query{String: "select 42"}))
script.Steps = append(script.Steps, pgmock.SendMessage(&pgproto3.RowDescription{
Fields: []pgproto3.FieldDescription{
{
Name: []byte("?column?"),
TableOID: 0,
TableAttributeNumber: 0,
DataTypeOID: 23,
DataTypeSize: 4,
TypeModifier: -1,
Format: 0,
},
},
}))
script.Steps = append(script.Steps, pgmock.SendMessage(&pgproto3.DataRow{
Values: [][]byte{[]byte("42")},
}))
script.Steps = append(script.Steps, pgmock.SendMessage(&pgproto3.CommandComplete{CommandTag: []byte("SELECT 1")}))
script.Steps = append(script.Steps, pgmock.SendMessage(&pgproto3.ReadyForQuery{TxStatus: 'I'}))
script.Steps = append(script.Steps, pgmock.ExpectMessage(&pgproto3.Terminate{}))
ln, err := net.Listen("tcp", "127.0.0.1:")
require.NoError(t, err)
defer ln.Close()
serverErrChan := make(chan error, 1)
go func() {
defer close(serverErrChan)
conn, err := ln.Accept()
if err != nil {
serverErrChan <- err
return
}
defer conn.Close()
err = conn.SetDeadline(time.Now().Add(time.Second))
if err != nil {
serverErrChan <- err
return
}
err = script.Run(pgproto3.NewBackend(conn, conn))
if err != nil {
serverErrChan <- err
return
}
}()
host, port, _ := strings.Cut(ln.Addr().String(), ":")
connStr := fmt.Sprintf("sslmode=disable host=%s port=%s", host, port)
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
pgConn, err := pgconn.Connect(ctx, connStr)
require.NoError(t, err)
results, err := pgConn.Exec(ctx, "select 42").ReadAll()
assert.NoError(t, err)
assert.Len(t, results, 1)
assert.Nil(t, results[0].Err)
assert.Equal(t, "SELECT 1", results[0].CommandTag.String())
assert.Len(t, results[0].Rows, 1)
assert.Equal(t, "42", string(results[0].Rows[0][0]))
pgConn.Close(ctx)
assert.NoError(t, <-serverErrChan)
}

View File

@ -0,0 +1,60 @@
#!/usr/bin/env bash
current_branch=$(git rev-parse --abbrev-ref HEAD)
if [ "$current_branch" == "HEAD" ]; then
current_branch=$(git rev-parse HEAD)
fi
restore_branch() {
echo "Restoring original branch/commit: $current_branch"
git checkout "$current_branch"
}
trap restore_branch EXIT
# Check if there are uncommitted changes
if ! git diff --quiet || ! git diff --cached --quiet; then
echo "There are uncommitted changes. Please commit or stash them before running this script."
exit 1
fi
# Ensure that at least one commit argument is passed
if [ "$#" -lt 1 ]; then
echo "Usage: $0 <commit1> <commit2> ... <commitN>"
exit 1
fi
commits=("$@")
benchmarks_dir=benchmarks
if ! mkdir -p "${benchmarks_dir}"; then
echo "Unable to create dir for benchmarks data"
exit 1
fi
# Benchmark results
bench_files=()
# Run benchmark for each listed commit
for i in "${!commits[@]}"; do
commit="${commits[i]}"
git checkout "$commit" || {
echo "Failed to checkout $commit"
exit 1
}
# Sanitized commmit message
commit_message=$(git log -1 --pretty=format:"%s" | tr -c '[:alnum:]-_' '_')
# Benchmark data will go there
bench_file="${benchmarks_dir}/${i}_${commit_message}.bench"
if ! go test -bench=. -count=10 >"$bench_file"; then
echo "Benchmarking failed for commit $commit"
exit 1
fi
bench_files+=("$bench_file")
done
# go install golang.org/x/perf/cmd/benchstat[@latest]
benchstat "${bench_files[@]}"

View File

@ -3,97 +3,209 @@ package sanitize
import (
"bytes"
"encoding/hex"
"fmt"
"slices"
"strconv"
"strings"
"sync"
"time"
"unicode/utf8"
errors "golang.org/x/xerrors"
)
// Part is either a string or an int. A string is raw SQL. An int is a
// argument placeholder.
type Part interface{}
type Part any
type Query struct {
Parts []Part
}
func (q *Query) Sanitize(args ...interface{}) (string, error) {
// utf.DecodeRune returns the utf8.RuneError for errors. But that is actually rune U+FFFD -- the unicode replacement
// character. utf8.RuneError is not an error if it is also width 3.
//
// https://github.com/jackc/pgx/issues/1380
const replacementcharacterwidth = 3
const maxBufSize = 16384 // 16 Ki
var bufPool = &pool[*bytes.Buffer]{
new: func() *bytes.Buffer {
return &bytes.Buffer{}
},
reset: func(b *bytes.Buffer) bool {
n := b.Len()
b.Reset()
return n < maxBufSize
},
}
var null = []byte("null")
func (q *Query) Sanitize(args ...any) (string, error) {
argUse := make([]bool, len(args))
buf := &bytes.Buffer{}
buf := bufPool.get()
defer bufPool.put(buf)
for _, part := range q.Parts {
var str string
switch part := part.(type) {
case string:
str = part
buf.WriteString(part)
case int:
argIdx := part - 1
if argIdx >= len(args) {
return "", errors.Errorf("insufficient arguments")
var p []byte
if argIdx < 0 {
return "", fmt.Errorf("first sql argument must be > 0")
}
if argIdx >= len(args) {
return "", fmt.Errorf("insufficient arguments")
}
// Prevent SQL injection via Line Comment Creation
// https://github.com/jackc/pgx/security/advisories/GHSA-m7wr-2xf7-cm9p
buf.WriteByte(' ')
arg := args[argIdx]
switch arg := arg.(type) {
case nil:
str = "null"
p = null
case int64:
str = strconv.FormatInt(arg, 10)
p = strconv.AppendInt(buf.AvailableBuffer(), arg, 10)
case float64:
str = strconv.FormatFloat(arg, 'f', -1, 64)
p = strconv.AppendFloat(buf.AvailableBuffer(), arg, 'f', -1, 64)
case bool:
str = strconv.FormatBool(arg)
p = strconv.AppendBool(buf.AvailableBuffer(), arg)
case []byte:
str = QuoteBytes(arg)
p = QuoteBytes(buf.AvailableBuffer(), arg)
case string:
str = QuoteString(arg)
p = QuoteString(buf.AvailableBuffer(), arg)
case time.Time:
str = arg.Truncate(time.Microsecond).Format("'2006-01-02 15:04:05.999999999Z07:00:00'")
p = arg.Truncate(time.Microsecond).
AppendFormat(buf.AvailableBuffer(), "'2006-01-02 15:04:05.999999999Z07:00:00'")
default:
return "", errors.Errorf("invalid arg type: %T", arg)
return "", fmt.Errorf("invalid arg type: %T", arg)
}
argUse[argIdx] = true
buf.Write(p)
// Prevent SQL injection via Line Comment Creation
// https://github.com/jackc/pgx/security/advisories/GHSA-m7wr-2xf7-cm9p
buf.WriteByte(' ')
default:
return "", errors.Errorf("invalid Part type: %T", part)
return "", fmt.Errorf("invalid Part type: %T", part)
}
buf.WriteString(str)
}
for i, used := range argUse {
if !used {
return "", errors.Errorf("unused argument: %d", i)
return "", fmt.Errorf("unused argument: %d", i)
}
}
return buf.String(), nil
}
func NewQuery(sql string) (*Query, error) {
l := &sqlLexer{
src: sql,
stateFn: rawState,
query := &Query{}
query.init(sql)
return query, nil
}
var sqlLexerPool = &pool[*sqlLexer]{
new: func() *sqlLexer {
return &sqlLexer{}
},
reset: func(sl *sqlLexer) bool {
*sl = sqlLexer{}
return true
},
}
func (q *Query) init(sql string) {
parts := q.Parts[:0]
if parts == nil {
// dirty, but fast heuristic to preallocate for ~90% usecases
n := strings.Count(sql, "$") + strings.Count(sql, "--") + 1
parts = make([]Part, 0, n)
}
l := sqlLexerPool.get()
defer sqlLexerPool.put(l)
l.src = sql
l.stateFn = rawState
l.parts = parts
for l.stateFn != nil {
l.stateFn = l.stateFn(l)
}
query := &Query{Parts: l.parts}
return query, nil
q.Parts = l.parts
}
func QuoteString(str string) string {
return "'" + strings.Replace(str, "'", "''", -1) + "'"
func QuoteString(dst []byte, str string) []byte {
const quote = '\''
// Preallocate space for the worst case scenario
dst = slices.Grow(dst, len(str)*2+2)
// Add opening quote
dst = append(dst, quote)
// Iterate through the string without allocating
for i := 0; i < len(str); i++ {
if str[i] == quote {
dst = append(dst, quote, quote)
} else {
dst = append(dst, str[i])
}
}
// Add closing quote
dst = append(dst, quote)
return dst
}
func QuoteBytes(buf []byte) string {
return `'\x` + hex.EncodeToString(buf) + "'"
func QuoteBytes(dst, buf []byte) []byte {
if len(buf) == 0 {
return append(dst, `'\x'`...)
}
// Calculate required length
requiredLen := 3 + hex.EncodedLen(len(buf)) + 1
// Ensure dst has enough capacity
if cap(dst)-len(dst) < requiredLen {
newDst := make([]byte, len(dst), len(dst)+requiredLen)
copy(newDst, dst)
dst = newDst
}
// Record original length and extend slice
origLen := len(dst)
dst = dst[:origLen+requiredLen]
// Add prefix
dst[origLen] = '\''
dst[origLen+1] = '\\'
dst[origLen+2] = 'x'
// Encode bytes directly into dst
hex.Encode(dst[origLen+3:len(dst)-1], buf)
// Add suffix
dst[len(dst)-1] = '\''
return dst
}
type sqlLexer struct {
src string
start int
pos int
nested int // multiline comment nesting level.
stateFn stateFn
parts []Part
}
@ -125,12 +237,26 @@ func rawState(l *sqlLexer) stateFn {
l.start = l.pos
return placeholderState
}
case utf8.RuneError:
if l.pos-l.start > 0 {
l.parts = append(l.parts, l.src[l.start:l.pos])
l.start = l.pos
case '-':
nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
if nextRune == '-' {
l.pos += width
return oneLineCommentState
}
case '/':
nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
if nextRune == '*' {
l.pos += width
return multilineCommentState
}
case utf8.RuneError:
if width != replacementcharacterwidth {
if l.pos-l.start > 0 {
l.parts = append(l.parts, l.src[l.start:l.pos])
l.start = l.pos
}
return nil
}
return nil
}
}
}
@ -148,11 +274,13 @@ func singleQuoteState(l *sqlLexer) stateFn {
}
l.pos += width
case utf8.RuneError:
if l.pos-l.start > 0 {
l.parts = append(l.parts, l.src[l.start:l.pos])
l.start = l.pos
if width != replacementcharacterwidth {
if l.pos-l.start > 0 {
l.parts = append(l.parts, l.src[l.start:l.pos])
l.start = l.pos
}
return nil
}
return nil
}
}
}
@ -170,11 +298,13 @@ func doubleQuoteState(l *sqlLexer) stateFn {
}
l.pos += width
case utf8.RuneError:
if l.pos-l.start > 0 {
l.parts = append(l.parts, l.src[l.start:l.pos])
l.start = l.pos
if width != replacementcharacterwidth {
if l.pos-l.start > 0 {
l.parts = append(l.parts, l.src[l.start:l.pos])
l.start = l.pos
}
return nil
}
return nil
}
}
}
@ -216,22 +346,115 @@ func escapeStringState(l *sqlLexer) stateFn {
}
l.pos += width
case utf8.RuneError:
if l.pos-l.start > 0 {
l.parts = append(l.parts, l.src[l.start:l.pos])
l.start = l.pos
if width != replacementcharacterwidth {
if l.pos-l.start > 0 {
l.parts = append(l.parts, l.src[l.start:l.pos])
l.start = l.pos
}
return nil
}
return nil
}
}
}
func oneLineCommentState(l *sqlLexer) stateFn {
for {
r, width := utf8.DecodeRuneInString(l.src[l.pos:])
l.pos += width
switch r {
case '\\':
_, width = utf8.DecodeRuneInString(l.src[l.pos:])
l.pos += width
case '\n', '\r':
return rawState
case utf8.RuneError:
if width != replacementcharacterwidth {
if l.pos-l.start > 0 {
l.parts = append(l.parts, l.src[l.start:l.pos])
l.start = l.pos
}
return nil
}
}
}
}
func multilineCommentState(l *sqlLexer) stateFn {
for {
r, width := utf8.DecodeRuneInString(l.src[l.pos:])
l.pos += width
switch r {
case '/':
nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
if nextRune == '*' {
l.pos += width
l.nested++
}
case '*':
nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
if nextRune != '/' {
continue
}
l.pos += width
if l.nested == 0 {
return rawState
}
l.nested--
case utf8.RuneError:
if width != replacementcharacterwidth {
if l.pos-l.start > 0 {
l.parts = append(l.parts, l.src[l.start:l.pos])
l.start = l.pos
}
return nil
}
}
}
}
var queryPool = &pool[*Query]{
new: func() *Query {
return &Query{}
},
reset: func(q *Query) bool {
n := len(q.Parts)
q.Parts = q.Parts[:0]
return n < 64 // drop too large queries
},
}
// SanitizeSQL replaces placeholder values with args. It quotes and escapes args
// as necessary. This function is only safe when standard_conforming_strings is
// on.
func SanitizeSQL(sql string, args ...interface{}) (string, error) {
query, err := NewQuery(sql)
if err != nil {
return "", err
}
func SanitizeSQL(sql string, args ...any) (string, error) {
query := queryPool.get()
query.init(sql)
defer queryPool.put(query)
return query.Sanitize(args...)
}
type pool[E any] struct {
p sync.Pool
new func() E
reset func(E) bool
}
func (pool *pool[E]) get() E {
v, ok := pool.p.Get().(E)
if !ok {
v = pool.new()
}
return v
}
func (p *pool[E]) put(v E) {
if p.reset(v) {
p.p.Put(v)
}
}

View File

@ -0,0 +1,62 @@
// sanitize_benchmark_test.go
package sanitize_test
import (
"testing"
"time"
"github.com/jackc/pgx/v5/internal/sanitize"
)
var benchmarkSanitizeResult string
const benchmarkQuery = "" +
`SELECT *
FROM "water_containers"
WHERE NOT "id" = $1 -- int64
AND "tags" NOT IN $2 -- nil
AND "volume" > $3 -- float64
AND "transportable" = $4 -- bool
AND position($5 IN "sign") -- bytes
AND "label" LIKE $6 -- string
AND "created_at" > $7; -- time.Time`
var benchmarkArgs = []any{
int64(12345),
nil,
float64(500),
true,
[]byte("8BADF00D"),
"kombucha's han'dy awokowa",
time.Date(2015, 10, 1, 0, 0, 0, 0, time.UTC),
}
func BenchmarkSanitize(b *testing.B) {
query, err := sanitize.NewQuery(benchmarkQuery)
if err != nil {
b.Fatalf("failed to create query: %v", err)
}
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
benchmarkSanitizeResult, err = query.Sanitize(benchmarkArgs...)
if err != nil {
b.Fatalf("failed to sanitize query: %v", err)
}
}
}
var benchmarkNewSQLResult string
func BenchmarkSanitizeSQL(b *testing.B) {
b.ReportAllocs()
var err error
for i := 0; i < b.N; i++ {
benchmarkNewSQLResult, err = sanitize.SanitizeSQL(benchmarkQuery, benchmarkArgs...)
if err != nil {
b.Fatalf("failed to sanitize SQL: %v", err)
}
}
}

View File

@ -0,0 +1,55 @@
package sanitize_test
import (
"strings"
"testing"
"github.com/jackc/pgx/v5/internal/sanitize"
)
func FuzzQuoteString(f *testing.F) {
const prefix = "prefix"
f.Add("new\nline")
f.Add("sample text")
f.Add("sample q'u'o't'e's")
f.Add("select 'quoted $42', $1")
f.Fuzz(func(t *testing.T, input string) {
got := string(sanitize.QuoteString([]byte(prefix), input))
want := oldQuoteString(input)
quoted, ok := strings.CutPrefix(got, prefix)
if !ok {
t.Fatalf("result has no prefix")
}
if want != quoted {
t.Errorf("got %q", got)
t.Fatalf("want %q", want)
}
})
}
func FuzzQuoteBytes(f *testing.F) {
const prefix = "prefix"
f.Add([]byte(nil))
f.Add([]byte("\n"))
f.Add([]byte("sample text"))
f.Add([]byte("sample q'u'o't'e's"))
f.Add([]byte("select 'quoted $42', $1"))
f.Fuzz(func(t *testing.T, input []byte) {
got := string(sanitize.QuoteBytes([]byte(prefix), input))
want := oldQuoteBytes(input)
quoted, ok := strings.CutPrefix(got, prefix)
if !ok {
t.Fatalf("result has no prefix")
}
if want != quoted {
t.Errorf("got %q", got)
t.Fatalf("want %q", want)
}
})
}

View File

@ -1,10 +1,12 @@
package sanitize_test
import (
"encoding/hex"
"strings"
"testing"
"time"
"github.com/jackc/pgx/v4/internal/sanitize"
"github.com/jackc/pgx/v5/internal/sanitize"
)
func TestNewQuery(t *testing.T) {
@ -60,6 +62,44 @@ func TestNewQuery(t *testing.T) {
sql: `select e'escape string\' $42', $1`,
expected: sanitize.Query{Parts: []sanitize.Part{`select e'escape string\' $42', `, 1}},
},
{
sql: `select /* a baby's toy */ 'barbie', $1`,
expected: sanitize.Query{Parts: []sanitize.Part{`select /* a baby's toy */ 'barbie', `, 1}},
},
{
sql: `select /* *_* */ $1`,
expected: sanitize.Query{Parts: []sanitize.Part{`select /* *_* */ `, 1}},
},
{
sql: `select 42 /* /* /* 42 */ */ */, $1`,
expected: sanitize.Query{Parts: []sanitize.Part{`select 42 /* /* /* 42 */ */ */, `, 1}},
},
{
sql: "select -- a baby's toy\n'barbie', $1",
expected: sanitize.Query{Parts: []sanitize.Part{"select -- a baby's toy\n'barbie', ", 1}},
},
{
sql: "select 42 -- is a Deep Thought's favorite number",
expected: sanitize.Query{Parts: []sanitize.Part{"select 42 -- is a Deep Thought's favorite number"}},
},
{
sql: "select 42, -- \\nis a Deep Thought's favorite number\n$1",
expected: sanitize.Query{Parts: []sanitize.Part{"select 42, -- \\nis a Deep Thought's favorite number\n", 1}},
},
{
sql: "select 42, -- \\nis a Deep Thought's favorite number\r$1",
expected: sanitize.Query{Parts: []sanitize.Part{"select 42, -- \\nis a Deep Thought's favorite number\r", 1}},
},
{
// https://github.com/jackc/pgx/issues/1380
sql: "select 'hello w<>rld'",
expected: sanitize.Query{Parts: []sanitize.Part{"select 'hello w<>rld'"}},
},
{
// Unterminated quoted string
sql: "select 'hello world",
expected: sanitize.Query{Parts: []sanitize.Part{"select 'hello world"}},
},
}
for i, tt := range successTests {
@ -83,58 +123,68 @@ func TestNewQuery(t *testing.T) {
func TestQuerySanitize(t *testing.T) {
successfulTests := []struct {
query sanitize.Query
args []interface{}
args []any
expected string
}{
{
query: sanitize.Query{Parts: []sanitize.Part{"select 42"}},
args: []interface{}{},
args: []any{},
expected: `select 42`,
},
{
query: sanitize.Query{Parts: []sanitize.Part{"select ", 1}},
args: []interface{}{int64(42)},
expected: `select 42`,
args: []any{int64(42)},
expected: `select 42 `,
},
{
query: sanitize.Query{Parts: []sanitize.Part{"select ", 1}},
args: []interface{}{float64(1.23)},
expected: `select 1.23`,
args: []any{float64(1.23)},
expected: `select 1.23 `,
},
{
query: sanitize.Query{Parts: []sanitize.Part{"select ", 1}},
args: []interface{}{true},
expected: `select true`,
args: []any{true},
expected: `select true `,
},
{
query: sanitize.Query{Parts: []sanitize.Part{"select ", 1}},
args: []interface{}{[]byte{0, 1, 2, 3, 255}},
expected: `select '\x00010203ff'`,
args: []any{[]byte{0, 1, 2, 3, 255}},
expected: `select '\x00010203ff' `,
},
{
query: sanitize.Query{Parts: []sanitize.Part{"select ", 1}},
args: []interface{}{nil},
expected: `select null`,
args: []any{nil},
expected: `select null `,
},
{
query: sanitize.Query{Parts: []sanitize.Part{"select ", 1}},
args: []interface{}{"foobar"},
expected: `select 'foobar'`,
args: []any{"foobar"},
expected: `select 'foobar' `,
},
{
query: sanitize.Query{Parts: []sanitize.Part{"select ", 1}},
args: []interface{}{"foo'bar"},
expected: `select 'foo''bar'`,
args: []any{"foo'bar"},
expected: `select 'foo''bar' `,
},
{
query: sanitize.Query{Parts: []sanitize.Part{"select ", 1}},
args: []interface{}{`foo\'bar`},
expected: `select 'foo\''bar'`,
args: []any{`foo\'bar`},
expected: `select 'foo\''bar' `,
},
{
query: sanitize.Query{Parts: []sanitize.Part{"insert ", 1}},
args: []interface{}{time.Date(2020, time.March, 1, 23, 59, 59, 999999999, time.UTC)},
expected: `insert '2020-03-01 23:59:59.999999Z'`,
args: []any{time.Date(2020, time.March, 1, 23, 59, 59, 999999999, time.UTC)},
expected: `insert '2020-03-01 23:59:59.999999Z' `,
},
{
query: sanitize.Query{Parts: []sanitize.Part{"select 1-", 1}},
args: []any{int64(-1)},
expected: `select 1- -1 `,
},
{
query: sanitize.Query{Parts: []sanitize.Part{"select 1-", 1}},
args: []any{float64(-1)},
expected: `select 1- -1 `,
},
}
@ -152,22 +202,22 @@ func TestQuerySanitize(t *testing.T) {
errorTests := []struct {
query sanitize.Query
args []interface{}
args []any
expected string
}{
{
query: sanitize.Query{Parts: []sanitize.Part{"select ", 1, ", ", 2}},
args: []interface{}{int64(42)},
args: []any{int64(42)},
expected: `insufficient arguments`,
},
{
query: sanitize.Query{Parts: []sanitize.Part{"select 'foo'"}},
args: []interface{}{int64(42)},
args: []any{int64(42)},
expected: `unused argument: 0`,
},
{
query: sanitize.Query{Parts: []sanitize.Part{"select ", 1}},
args: []interface{}{42},
args: []any{42},
expected: `invalid arg type: int`,
},
}
@ -179,3 +229,55 @@ func TestQuerySanitize(t *testing.T) {
}
}
}
func TestQuoteString(t *testing.T) {
tc := func(name, input string) {
t.Run(name, func(t *testing.T) {
t.Parallel()
got := string(sanitize.QuoteString(nil, input))
want := oldQuoteString(input)
if got != want {
t.Errorf("got: %s", got)
t.Fatalf("want: %s", want)
}
})
}
tc("empty", "")
tc("text", "abcd")
tc("with quotes", `one's hat is always a cat`)
}
// This function was used before optimizations.
// You should keep for testing purposes - we want to ensure there are no breaking changes.
func oldQuoteString(str string) string {
return "'" + strings.ReplaceAll(str, "'", "''") + "'"
}
func TestQuoteBytes(t *testing.T) {
tc := func(name string, input []byte) {
t.Run(name, func(t *testing.T) {
t.Parallel()
got := string(sanitize.QuoteBytes(nil, input))
want := oldQuoteBytes(input)
if got != want {
t.Errorf("got: %s", got)
t.Fatalf("want: %s", want)
}
})
}
tc("nil", nil)
tc("empty", []byte{})
tc("text", []byte("abcd"))
}
// This function was used before optimizations.
// You should keep for testing purposes - we want to ensure there are no breaking changes.
func oldQuoteBytes(buf []byte) string {
return `'\x` + hex.EncodeToString(buf) + "'"
}

View File

@ -0,0 +1,111 @@
package stmtcache
import (
"container/list"
"github.com/jackc/pgx/v5/pgconn"
)
// LRUCache implements Cache with a Least Recently Used (LRU) cache.
type LRUCache struct {
cap int
m map[string]*list.Element
l *list.List
invalidStmts []*pgconn.StatementDescription
}
// NewLRUCache creates a new LRUCache. cap is the maximum size of the cache.
func NewLRUCache(cap int) *LRUCache {
return &LRUCache{
cap: cap,
m: make(map[string]*list.Element),
l: list.New(),
}
}
// Get returns the statement description for sql. Returns nil if not found.
func (c *LRUCache) Get(key string) *pgconn.StatementDescription {
if el, ok := c.m[key]; ok {
c.l.MoveToFront(el)
return el.Value.(*pgconn.StatementDescription)
}
return nil
}
// Put stores sd in the cache. Put panics if sd.SQL is "". Put does nothing if sd.SQL already exists in the cache or
// sd.SQL has been invalidated and HandleInvalidated has not been called yet.
func (c *LRUCache) Put(sd *pgconn.StatementDescription) {
if sd.SQL == "" {
panic("cannot store statement description with empty SQL")
}
if _, present := c.m[sd.SQL]; present {
return
}
// The statement may have been invalidated but not yet handled. Do not readd it to the cache.
for _, invalidSD := range c.invalidStmts {
if invalidSD.SQL == sd.SQL {
return
}
}
if c.l.Len() == c.cap {
c.invalidateOldest()
}
el := c.l.PushFront(sd)
c.m[sd.SQL] = el
}
// Invalidate invalidates statement description identified by sql. Does nothing if not found.
func (c *LRUCache) Invalidate(sql string) {
if el, ok := c.m[sql]; ok {
delete(c.m, sql)
c.invalidStmts = append(c.invalidStmts, el.Value.(*pgconn.StatementDescription))
c.l.Remove(el)
}
}
// InvalidateAll invalidates all statement descriptions.
func (c *LRUCache) InvalidateAll() {
el := c.l.Front()
for el != nil {
c.invalidStmts = append(c.invalidStmts, el.Value.(*pgconn.StatementDescription))
el = el.Next()
}
c.m = make(map[string]*list.Element)
c.l = list.New()
}
// GetInvalidated returns a slice of all statement descriptions invalidated since the last call to RemoveInvalidated.
func (c *LRUCache) GetInvalidated() []*pgconn.StatementDescription {
return c.invalidStmts
}
// RemoveInvalidated removes all invalidated statement descriptions. No other calls to Cache must be made between a
// call to GetInvalidated and RemoveInvalidated or RemoveInvalidated may remove statement descriptions that were
// never seen by the call to GetInvalidated.
func (c *LRUCache) RemoveInvalidated() {
c.invalidStmts = nil
}
// Len returns the number of cached prepared statement descriptions.
func (c *LRUCache) Len() int {
return c.l.Len()
}
// Cap returns the maximum number of cached prepared statement descriptions.
func (c *LRUCache) Cap() int {
return c.cap
}
func (c *LRUCache) invalidateOldest() {
oldest := c.l.Back()
sd := oldest.Value.(*pgconn.StatementDescription)
c.invalidStmts = append(c.invalidStmts, sd)
delete(c.m, sd.SQL)
c.l.Remove(oldest)
}

View File

@ -0,0 +1,45 @@
// Package stmtcache is a cache for statement descriptions.
package stmtcache
import (
"crypto/sha256"
"encoding/hex"
"github.com/jackc/pgx/v5/pgconn"
)
// StatementName returns a statement name that will be stable for sql across multiple connections and program
// executions.
func StatementName(sql string) string {
digest := sha256.Sum256([]byte(sql))
return "stmtcache_" + hex.EncodeToString(digest[0:24])
}
// Cache caches statement descriptions.
type Cache interface {
// Get returns the statement description for sql. Returns nil if not found.
Get(sql string) *pgconn.StatementDescription
// Put stores sd in the cache. Put panics if sd.SQL is "". Put does nothing if sd.SQL already exists in the cache.
Put(sd *pgconn.StatementDescription)
// Invalidate invalidates statement description identified by sql. Does nothing if not found.
Invalidate(sql string)
// InvalidateAll invalidates all statement descriptions.
InvalidateAll()
// GetInvalidated returns a slice of all statement descriptions invalidated since the last call to RemoveInvalidated.
GetInvalidated() []*pgconn.StatementDescription
// RemoveInvalidated removes all invalidated statement descriptions. No other calls to Cache must be made between a
// call to GetInvalidated and RemoveInvalidated or RemoveInvalidated may remove statement descriptions that were
// never seen by the call to GetInvalidated.
RemoveInvalidated()
// Len returns the number of cached prepared statement descriptions.
Len() int
// Cap returns the maximum number of cached prepared statement descriptions.
Cap() int
}

View File

@ -0,0 +1,77 @@
package stmtcache
import (
"math"
"github.com/jackc/pgx/v5/pgconn"
)
// UnlimitedCache implements Cache with no capacity limit.
type UnlimitedCache struct {
m map[string]*pgconn.StatementDescription
invalidStmts []*pgconn.StatementDescription
}
// NewUnlimitedCache creates a new UnlimitedCache.
func NewUnlimitedCache() *UnlimitedCache {
return &UnlimitedCache{
m: make(map[string]*pgconn.StatementDescription),
}
}
// Get returns the statement description for sql. Returns nil if not found.
func (c *UnlimitedCache) Get(sql string) *pgconn.StatementDescription {
return c.m[sql]
}
// Put stores sd in the cache. Put panics if sd.SQL is "". Put does nothing if sd.SQL already exists in the cache.
func (c *UnlimitedCache) Put(sd *pgconn.StatementDescription) {
if sd.SQL == "" {
panic("cannot store statement description with empty SQL")
}
if _, present := c.m[sd.SQL]; present {
return
}
c.m[sd.SQL] = sd
}
// Invalidate invalidates statement description identified by sql. Does nothing if not found.
func (c *UnlimitedCache) Invalidate(sql string) {
if sd, ok := c.m[sql]; ok {
delete(c.m, sql)
c.invalidStmts = append(c.invalidStmts, sd)
}
}
// InvalidateAll invalidates all statement descriptions.
func (c *UnlimitedCache) InvalidateAll() {
for _, sd := range c.m {
c.invalidStmts = append(c.invalidStmts, sd)
}
c.m = make(map[string]*pgconn.StatementDescription)
}
// GetInvalidated returns a slice of all statement descriptions invalidated since the last call to RemoveInvalidated.
func (c *UnlimitedCache) GetInvalidated() []*pgconn.StatementDescription {
return c.invalidStmts
}
// RemoveInvalidated removes all invalidated statement descriptions. No other calls to Cache must be made between a
// call to GetInvalidated and RemoveInvalidated or RemoveInvalidated may remove statement descriptions that were
// never seen by the call to GetInvalidated.
func (c *UnlimitedCache) RemoveInvalidated() {
c.invalidStmts = nil
}
// Len returns the number of cached prepared statement descriptions.
func (c *UnlimitedCache) Len() int {
return len(c.m)
}
// Cap returns the maximum number of cached prepared statement descriptions.
func (c *UnlimitedCache) Cap() int {
return math.MaxInt
}

View File

@ -2,11 +2,17 @@ package pgx
import (
"context"
"errors"
"io"
errors "golang.org/x/xerrors"
"github.com/jackc/pgx/v5/pgtype"
)
// The PostgreSQL wire protocol has a limit of 1 GB - 1 per message. See definition of
// PQ_LARGE_MESSAGE_LIMIT in the PostgreSQL source code. To allow for the other data
// in the message,maxLargeObjectMessageLength should be no larger than 1 GB - 1 KB.
var maxLargeObjectMessageLength = 1024*1024*1024 - 1024
// LargeObjects is a structure used to access the large objects API. It is only valid within the transaction where it
// was created.
//
@ -57,10 +63,10 @@ func (o *LargeObjects) Unlink(ctx context.Context, oid uint32) error {
// A LargeObject is a large object stored on the server. It is only valid within the transaction that it was initialized
// in. It uses the context it was initialized with for all operations. It implements these interfaces:
//
// io.Writer
// io.Reader
// io.Seeker
// io.Closer
// io.Writer
// io.Reader
// io.Seeker
// io.Closer
type LargeObject struct {
ctx context.Context
tx Tx
@ -69,32 +75,65 @@ type LargeObject struct {
// Write writes p to the large object and returns the number of bytes written and an error if not all of p was written.
func (o *LargeObject) Write(p []byte) (int, error) {
var n int
err := o.tx.QueryRow(o.ctx, "select lowrite($1, $2)", o.fd, p).Scan(&n)
if err != nil {
return n, err
nTotal := 0
for {
expected := len(p) - nTotal
if expected == 0 {
break
} else if expected > maxLargeObjectMessageLength {
expected = maxLargeObjectMessageLength
}
var n int
err := o.tx.QueryRow(o.ctx, "select lowrite($1, $2)", o.fd, p[nTotal:nTotal+expected]).Scan(&n)
if err != nil {
return nTotal, err
}
if n < 0 {
return nTotal, errors.New("failed to write to large object")
}
nTotal += n
if n < expected {
return nTotal, errors.New("short write to large object")
} else if n > expected {
return nTotal, errors.New("invalid write to large object")
}
}
if n < 0 {
return 0, errors.New("failed to write to large object")
}
return n, nil
return nTotal, nil
}
// Read reads up to len(p) bytes into p returning the number of bytes read.
func (o *LargeObject) Read(p []byte) (int, error) {
var res []byte
err := o.tx.QueryRow(o.ctx, "select loread($1, $2)", o.fd, len(p)).Scan(&res)
copy(p, res)
if err != nil {
return len(res), err
nTotal := 0
for {
expected := len(p) - nTotal
if expected == 0 {
break
} else if expected > maxLargeObjectMessageLength {
expected = maxLargeObjectMessageLength
}
res := pgtype.PreallocBytes(p[nTotal:])
err := o.tx.QueryRow(o.ctx, "select loread($1, $2)", o.fd, expected).Scan(&res)
// We compute expected so that it always fits into p, so it should never happen
// that PreallocBytes's ScanBytes had to allocate a new slice.
nTotal += len(res)
if err != nil {
return nTotal, err
}
if len(res) < expected {
return nTotal, io.EOF
} else if len(res) > expected {
return nTotal, errors.New("invalid read of large object")
}
}
if len(res) < len(p) {
err = io.EOF
}
return len(res), err
return nTotal, nil
}
// Seek moves the current location pointer to the new location specified by offset.
@ -109,13 +148,13 @@ func (o *LargeObject) Tell() (n int64, err error) {
return n, err
}
// Trunctes the large object to size.
// Truncate the large object to size.
func (o *LargeObject) Truncate(size int64) (err error) {
_, err = o.tx.Exec(o.ctx, "select lo_truncate64($1, $2)", o.fd, size)
return err
}
// Close closees the large object descriptor.
// Close the large object descriptor.
func (o *LargeObject) Close() error {
_, err := o.tx.Exec(o.ctx, "select lo_close($1)", o.fd)
return err

View File

@ -0,0 +1,20 @@
package pgx
import (
"testing"
)
// SetMaxLargeObjectMessageLength sets internal maxLargeObjectMessageLength variable
// to the given length for the duration of the test.
//
// Tests using this helper should not use t.Parallel().
func SetMaxLargeObjectMessageLength(t *testing.T, length int) {
t.Helper()
original := maxLargeObjectMessageLength
t.Cleanup(func() {
maxLargeObjectMessageLength = original
})
maxLargeObjectMessageLength = length
}

View File

@ -7,14 +7,16 @@ import (
"testing"
"time"
"github.com/jackc/pgconn"
"github.com/jackc/pgx/v4"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
"github.com/jackc/pgx/v5/pgxtest"
)
func TestLargeObjects(t *testing.T) {
t.Parallel()
// We use a very short limit to test chunking logic.
pgx.SetMaxLargeObjectMessageLength(t, 2)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
defer cancel()
conn, err := pgx.Connect(ctx, os.Getenv("PGX_TEST_DATABASE"))
@ -22,6 +24,8 @@ func TestLargeObjects(t *testing.T) {
t.Fatal(err)
}
pgxtest.SkipCockroachDB(t, conn, "Server does support large objects")
tx, err := conn.Begin(ctx)
if err != nil {
t.Fatal(err)
@ -30,10 +34,11 @@ func TestLargeObjects(t *testing.T) {
testLargeObjects(t, ctx, tx)
}
func TestLargeObjectsPreferSimpleProtocol(t *testing.T) {
t.Parallel()
func TestLargeObjectsSimpleProtocol(t *testing.T) {
// We use a very short limit to test chunking logic.
pgx.SetMaxLargeObjectMessageLength(t, 2)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
defer cancel()
config, err := pgx.ParseConfig(os.Getenv("PGX_TEST_DATABASE"))
@ -41,13 +46,15 @@ func TestLargeObjectsPreferSimpleProtocol(t *testing.T) {
t.Fatal(err)
}
config.PreferSimpleProtocol = true
config.DefaultQueryExecMode = pgx.QueryExecModeSimpleProtocol
conn, err := pgx.ConnectConfig(ctx, config)
if err != nil {
t.Fatal(err)
}
pgxtest.SkipCockroachDB(t, conn, "Server does support large objects")
tx, err := conn.Begin(ctx)
if err != nil {
t.Fatal(err)
@ -155,9 +162,10 @@ func testLargeObjects(t *testing.T, ctx context.Context, tx pgx.Tx) {
}
func TestLargeObjectsMultipleTransactions(t *testing.T) {
t.Parallel()
// We use a very short limit to test chunking logic.
pgx.SetMaxLargeObjectMessageLength(t, 2)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
defer cancel()
conn, err := pgx.Connect(ctx, os.Getenv("PGX_TEST_DATABASE"))
@ -165,6 +173,8 @@ func TestLargeObjectsMultipleTransactions(t *testing.T) {
t.Fatal(err)
}
pgxtest.SkipCockroachDB(t, conn, "Server does support large objects")
tx, err := conn.Begin(ctx)
if err != nil {
t.Fatal(err)

View File

@ -1,49 +0,0 @@
// Package log15adapter provides a logger that writes to a github.com/inconshreveable/log15.Logger
// log.
package log15adapter
import (
"context"
"github.com/jackc/pgx/v4"
)
// Log15Logger interface defines the subset of
// github.com/inconshreveable/log15.Logger that this adapter uses.
type Log15Logger interface {
Debug(msg string, ctx ...interface{})
Info(msg string, ctx ...interface{})
Warn(msg string, ctx ...interface{})
Error(msg string, ctx ...interface{})
Crit(msg string, ctx ...interface{})
}
type Logger struct {
l Log15Logger
}
func NewLogger(l Log15Logger) *Logger {
return &Logger{l: l}
}
func (l *Logger) Log(ctx context.Context, level pgx.LogLevel, msg string, data map[string]interface{}) {
logArgs := make([]interface{}, 0, len(data))
for k, v := range data {
logArgs = append(logArgs, k, v)
}
switch level {
case pgx.LogLevelTrace:
l.l.Debug(msg, append(logArgs, "PGX_LOG_LEVEL", level)...)
case pgx.LogLevelDebug:
l.l.Debug(msg, logArgs...)
case pgx.LogLevelInfo:
l.l.Info(msg, logArgs...)
case pgx.LogLevelWarn:
l.l.Warn(msg, logArgs...)
case pgx.LogLevelError:
l.l.Error(msg, logArgs...)
default:
l.l.Error(msg, append(logArgs, "INVALID_PGX_LOG_LEVEL", level)...)
}
}

View File

@ -1,42 +0,0 @@
// Package logrusadapter provides a logger that writes to a github.com/sirupsen/logrus.Logger
// log.
package logrusadapter
import (
"context"
"github.com/jackc/pgx/v4"
"github.com/sirupsen/logrus"
)
type Logger struct {
l logrus.FieldLogger
}
func NewLogger(l logrus.FieldLogger) *Logger {
return &Logger{l: l}
}
func (l *Logger) Log(ctx context.Context, level pgx.LogLevel, msg string, data map[string]interface{}) {
var logger logrus.FieldLogger
if data != nil {
logger = l.l.WithFields(data)
} else {
logger = l.l
}
switch level {
case pgx.LogLevelTrace:
logger.WithField("PGX_LOG_LEVEL", level).Debug(msg)
case pgx.LogLevelDebug:
logger.Debug(msg)
case pgx.LogLevelInfo:
logger.Info(msg)
case pgx.LogLevelWarn:
logger.Warn(msg)
case pgx.LogLevelError:
logger.Error(msg)
default:
logger.WithField("INVALID_PGX_LOG_LEVEL", level).Error(msg)
}
}

View File

@ -6,13 +6,13 @@ import (
"context"
"fmt"
"github.com/jackc/pgx/v4"
"github.com/jackc/pgx/v5/tracelog"
)
// TestingLogger interface defines the subset of testing.TB methods used by this
// adapter.
type TestingLogger interface {
Log(args ...interface{})
Log(args ...any)
}
type Logger struct {
@ -23,8 +23,8 @@ func NewLogger(l TestingLogger) *Logger {
return &Logger{l: l}
}
func (l *Logger) Log(ctx context.Context, level pgx.LogLevel, msg string, data map[string]interface{}) {
logArgs := make([]interface{}, 0, 2+len(data))
func (l *Logger) Log(ctx context.Context, level tracelog.LogLevel, msg string, data map[string]any) {
logArgs := make([]any, 0, 2+len(data))
logArgs = append(logArgs, level, msg)
for k, v := range data {
logArgs = append(logArgs, fmt.Sprintf("%s=%v", k, v))

View File

@ -1,42 +0,0 @@
// Package zapadapter provides a logger that writes to a go.uber.org/zap.Logger.
package zapadapter
import (
"context"
"github.com/jackc/pgx/v4"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
type Logger struct {
logger *zap.Logger
}
func NewLogger(logger *zap.Logger) *Logger {
return &Logger{logger: logger.WithOptions(zap.AddCallerSkip(1))}
}
func (pl *Logger) Log(ctx context.Context, level pgx.LogLevel, msg string, data map[string]interface{}) {
fields := make([]zapcore.Field, len(data))
i := 0
for k, v := range data {
fields[i] = zap.Reflect(k, v)
i++
}
switch level {
case pgx.LogLevelTrace:
pl.logger.Debug(msg, append(fields, zap.Stringer("PGX_LOG_LEVEL", level))...)
case pgx.LogLevelDebug:
pl.logger.Debug(msg, fields...)
case pgx.LogLevelInfo:
pl.logger.Info(msg, fields...)
case pgx.LogLevelWarn:
pl.logger.Warn(msg, fields...)
case pgx.LogLevelError:
pl.logger.Error(msg, fields...)
default:
pl.logger.Error(msg, append(fields, zap.Stringer("PGX_LOG_LEVEL", level))...)
}
}

View File

@ -1,42 +0,0 @@
// Package zerologadapter provides a logger that writes to a github.com/rs/zerolog.
package zerologadapter
import (
"context"
"github.com/jackc/pgx/v4"
"github.com/rs/zerolog"
)
type Logger struct {
logger zerolog.Logger
}
// NewLogger accepts a zerolog.Logger as input and returns a new custom pgx
// logging fascade as output.
func NewLogger(logger zerolog.Logger) *Logger {
return &Logger{
logger: logger.With().Str("module", "pgx").Logger(),
}
}
func (pl *Logger) Log(ctx context.Context, level pgx.LogLevel, msg string, data map[string]interface{}) {
var zlevel zerolog.Level
switch level {
case pgx.LogLevelNone:
zlevel = zerolog.NoLevel
case pgx.LogLevelError:
zlevel = zerolog.ErrorLevel
case pgx.LogLevelWarn:
zlevel = zerolog.WarnLevel
case pgx.LogLevelInfo:
zlevel = zerolog.InfoLevel
case pgx.LogLevelDebug:
zlevel = zerolog.DebugLevel
default:
zlevel = zerolog.DebugLevel
}
pgxlog := pl.logger.With().Fields(data).Logger()
pgxlog.WithLevel(zlevel).Msg(msg)
}

View File

@ -1,99 +0,0 @@
package pgx
import (
"context"
"encoding/hex"
"fmt"
errors "golang.org/x/xerrors"
)
// The values for log levels are chosen such that the zero value means that no
// log level was specified.
const (
LogLevelTrace = 6
LogLevelDebug = 5
LogLevelInfo = 4
LogLevelWarn = 3
LogLevelError = 2
LogLevelNone = 1
)
// LogLevel represents the pgx logging level. See LogLevel* constants for
// possible values.
type LogLevel int
func (ll LogLevel) String() string {
switch ll {
case LogLevelTrace:
return "trace"
case LogLevelDebug:
return "debug"
case LogLevelInfo:
return "info"
case LogLevelWarn:
return "warn"
case LogLevelError:
return "error"
case LogLevelNone:
return "none"
default:
return fmt.Sprintf("invalid level %d", ll)
}
}
// Logger is the interface used to get logging from pgx internals.
type Logger interface {
// Log a message at the given level with data key/value pairs. data may be nil.
Log(ctx context.Context, level LogLevel, msg string, data map[string]interface{})
}
// LogLevelFromString converts log level string to constant
//
// Valid levels:
// trace
// debug
// info
// warn
// error
// none
func LogLevelFromString(s string) (LogLevel, error) {
switch s {
case "trace":
return LogLevelTrace, nil
case "debug":
return LogLevelDebug, nil
case "info":
return LogLevelInfo, nil
case "warn":
return LogLevelWarn, nil
case "error":
return LogLevelError, nil
case "none":
return LogLevelNone, nil
default:
return 0, errors.New("invalid log level")
}
}
func logQueryArgs(args []interface{}) []interface{} {
logArgs := make([]interface{}, 0, len(args))
for _, a := range args {
switch v := a.(type) {
case []byte:
if len(v) < 64 {
a = hex.EncodeToString(v)
} else {
a = fmt.Sprintf("%x (truncated %d bytes)", v[:64], len(v)-64)
}
case string:
if len(v) > 64 {
a = fmt.Sprintf("%s (truncated %d bytes)", v[:64], len(v)-64)
}
}
logArgs = append(logArgs, a)
}
return logArgs
}

View File

@ -1,23 +0,0 @@
package pgx
import (
"database/sql/driver"
"github.com/jackc/pgtype"
)
func convertDriverValuers(args []interface{}) ([]interface{}, error) {
for i, arg := range args {
switch arg := arg.(type) {
case pgtype.BinaryEncoder:
case pgtype.TextEncoder:
case driver.Valuer:
v, err := callValuerValue(arg)
if err != nil {
return nil, err
}
args[i] = v
}
}
return args, nil
}

152
multitracer/tracer.go Normal file
View File

@ -0,0 +1,152 @@
// Package multitracer provides a Tracer that can combine several tracers into one.
package multitracer
import (
"context"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgxpool"
)
// Tracer can combine several tracers into one.
// You can use New to automatically split tracers by interface.
type Tracer struct {
QueryTracers []pgx.QueryTracer
BatchTracers []pgx.BatchTracer
CopyFromTracers []pgx.CopyFromTracer
PrepareTracers []pgx.PrepareTracer
ConnectTracers []pgx.ConnectTracer
PoolAcquireTracers []pgxpool.AcquireTracer
PoolReleaseTracers []pgxpool.ReleaseTracer
}
// New returns new Tracer from tracers with automatically split tracers by interface.
func New(tracers ...pgx.QueryTracer) *Tracer {
var t Tracer
for _, tracer := range tracers {
t.QueryTracers = append(t.QueryTracers, tracer)
if batchTracer, ok := tracer.(pgx.BatchTracer); ok {
t.BatchTracers = append(t.BatchTracers, batchTracer)
}
if copyFromTracer, ok := tracer.(pgx.CopyFromTracer); ok {
t.CopyFromTracers = append(t.CopyFromTracers, copyFromTracer)
}
if prepareTracer, ok := tracer.(pgx.PrepareTracer); ok {
t.PrepareTracers = append(t.PrepareTracers, prepareTracer)
}
if connectTracer, ok := tracer.(pgx.ConnectTracer); ok {
t.ConnectTracers = append(t.ConnectTracers, connectTracer)
}
if poolAcquireTracer, ok := tracer.(pgxpool.AcquireTracer); ok {
t.PoolAcquireTracers = append(t.PoolAcquireTracers, poolAcquireTracer)
}
if poolReleaseTracer, ok := tracer.(pgxpool.ReleaseTracer); ok {
t.PoolReleaseTracers = append(t.PoolReleaseTracers, poolReleaseTracer)
}
}
return &t
}
func (t *Tracer) TraceQueryStart(ctx context.Context, conn *pgx.Conn, data pgx.TraceQueryStartData) context.Context {
for _, tracer := range t.QueryTracers {
ctx = tracer.TraceQueryStart(ctx, conn, data)
}
return ctx
}
func (t *Tracer) TraceQueryEnd(ctx context.Context, conn *pgx.Conn, data pgx.TraceQueryEndData) {
for _, tracer := range t.QueryTracers {
tracer.TraceQueryEnd(ctx, conn, data)
}
}
func (t *Tracer) TraceBatchStart(ctx context.Context, conn *pgx.Conn, data pgx.TraceBatchStartData) context.Context {
for _, tracer := range t.BatchTracers {
ctx = tracer.TraceBatchStart(ctx, conn, data)
}
return ctx
}
func (t *Tracer) TraceBatchQuery(ctx context.Context, conn *pgx.Conn, data pgx.TraceBatchQueryData) {
for _, tracer := range t.BatchTracers {
tracer.TraceBatchQuery(ctx, conn, data)
}
}
func (t *Tracer) TraceBatchEnd(ctx context.Context, conn *pgx.Conn, data pgx.TraceBatchEndData) {
for _, tracer := range t.BatchTracers {
tracer.TraceBatchEnd(ctx, conn, data)
}
}
func (t *Tracer) TraceCopyFromStart(ctx context.Context, conn *pgx.Conn, data pgx.TraceCopyFromStartData) context.Context {
for _, tracer := range t.CopyFromTracers {
ctx = tracer.TraceCopyFromStart(ctx, conn, data)
}
return ctx
}
func (t *Tracer) TraceCopyFromEnd(ctx context.Context, conn *pgx.Conn, data pgx.TraceCopyFromEndData) {
for _, tracer := range t.CopyFromTracers {
tracer.TraceCopyFromEnd(ctx, conn, data)
}
}
func (t *Tracer) TracePrepareStart(ctx context.Context, conn *pgx.Conn, data pgx.TracePrepareStartData) context.Context {
for _, tracer := range t.PrepareTracers {
ctx = tracer.TracePrepareStart(ctx, conn, data)
}
return ctx
}
func (t *Tracer) TracePrepareEnd(ctx context.Context, conn *pgx.Conn, data pgx.TracePrepareEndData) {
for _, tracer := range t.PrepareTracers {
tracer.TracePrepareEnd(ctx, conn, data)
}
}
func (t *Tracer) TraceConnectStart(ctx context.Context, data pgx.TraceConnectStartData) context.Context {
for _, tracer := range t.ConnectTracers {
ctx = tracer.TraceConnectStart(ctx, data)
}
return ctx
}
func (t *Tracer) TraceConnectEnd(ctx context.Context, data pgx.TraceConnectEndData) {
for _, tracer := range t.ConnectTracers {
tracer.TraceConnectEnd(ctx, data)
}
}
func (t *Tracer) TraceAcquireStart(ctx context.Context, pool *pgxpool.Pool, data pgxpool.TraceAcquireStartData) context.Context {
for _, tracer := range t.PoolAcquireTracers {
ctx = tracer.TraceAcquireStart(ctx, pool, data)
}
return ctx
}
func (t *Tracer) TraceAcquireEnd(ctx context.Context, pool *pgxpool.Pool, data pgxpool.TraceAcquireEndData) {
for _, tracer := range t.PoolAcquireTracers {
tracer.TraceAcquireEnd(ctx, pool, data)
}
}
func (t *Tracer) TraceRelease(pool *pgxpool.Pool, data pgxpool.TraceReleaseData) {
for _, tracer := range t.PoolReleaseTracers {
tracer.TraceRelease(pool, data)
}
}

115
multitracer/tracer_test.go Normal file
View File

@ -0,0 +1,115 @@
package multitracer_test
import (
"context"
"testing"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/multitracer"
"github.com/jackc/pgx/v5/pgxpool"
"github.com/stretchr/testify/require"
)
type testFullTracer struct{}
func (tt *testFullTracer) TraceQueryStart(ctx context.Context, conn *pgx.Conn, data pgx.TraceQueryStartData) context.Context {
return ctx
}
func (tt *testFullTracer) TraceQueryEnd(ctx context.Context, conn *pgx.Conn, data pgx.TraceQueryEndData) {
}
func (tt *testFullTracer) TraceBatchStart(ctx context.Context, conn *pgx.Conn, data pgx.TraceBatchStartData) context.Context {
return ctx
}
func (tt *testFullTracer) TraceBatchQuery(ctx context.Context, conn *pgx.Conn, data pgx.TraceBatchQueryData) {
}
func (tt *testFullTracer) TraceBatchEnd(ctx context.Context, conn *pgx.Conn, data pgx.TraceBatchEndData) {
}
func (tt *testFullTracer) TraceCopyFromStart(ctx context.Context, conn *pgx.Conn, data pgx.TraceCopyFromStartData) context.Context {
return ctx
}
func (tt *testFullTracer) TraceCopyFromEnd(ctx context.Context, conn *pgx.Conn, data pgx.TraceCopyFromEndData) {
}
func (tt *testFullTracer) TracePrepareStart(ctx context.Context, conn *pgx.Conn, data pgx.TracePrepareStartData) context.Context {
return ctx
}
func (tt *testFullTracer) TracePrepareEnd(ctx context.Context, conn *pgx.Conn, data pgx.TracePrepareEndData) {
}
func (tt *testFullTracer) TraceConnectStart(ctx context.Context, data pgx.TraceConnectStartData) context.Context {
return ctx
}
func (tt *testFullTracer) TraceConnectEnd(ctx context.Context, data pgx.TraceConnectEndData) {
}
func (tt *testFullTracer) TraceAcquireStart(ctx context.Context, pool *pgxpool.Pool, data pgxpool.TraceAcquireStartData) context.Context {
return ctx
}
func (tt *testFullTracer) TraceAcquireEnd(ctx context.Context, pool *pgxpool.Pool, data pgxpool.TraceAcquireEndData) {
}
func (tt *testFullTracer) TraceRelease(pool *pgxpool.Pool, data pgxpool.TraceReleaseData) {
}
type testCopyTracer struct{}
func (tt *testCopyTracer) TraceQueryStart(ctx context.Context, conn *pgx.Conn, data pgx.TraceQueryStartData) context.Context {
return ctx
}
func (tt *testCopyTracer) TraceQueryEnd(ctx context.Context, conn *pgx.Conn, data pgx.TraceQueryEndData) {
}
func (tt *testCopyTracer) TraceCopyFromStart(ctx context.Context, conn *pgx.Conn, data pgx.TraceCopyFromStartData) context.Context {
return ctx
}
func (tt *testCopyTracer) TraceCopyFromEnd(ctx context.Context, conn *pgx.Conn, data pgx.TraceCopyFromEndData) {
}
func TestNew(t *testing.T) {
t.Parallel()
fullTracer := &testFullTracer{}
copyTracer := &testCopyTracer{}
mt := multitracer.New(fullTracer, copyTracer)
require.Equal(
t,
&multitracer.Tracer{
QueryTracers: []pgx.QueryTracer{
fullTracer,
copyTracer,
},
BatchTracers: []pgx.BatchTracer{
fullTracer,
},
CopyFromTracers: []pgx.CopyFromTracer{
fullTracer,
copyTracer,
},
PrepareTracers: []pgx.PrepareTracer{
fullTracer,
},
ConnectTracers: []pgx.ConnectTracer{
fullTracer,
},
PoolAcquireTracers: []pgxpool.AcquireTracer{
fullTracer,
},
PoolReleaseTracers: []pgxpool.ReleaseTracer{
fullTracer,
},
},
mt,
)
}

295
named_args.go Normal file
View File

@ -0,0 +1,295 @@
package pgx
import (
"context"
"fmt"
"strconv"
"strings"
"unicode/utf8"
)
// NamedArgs can be used as the first argument to a query method. It will replace every '@' named placeholder with a '$'
// ordinal placeholder and construct the appropriate arguments.
//
// For example, the following two queries are equivalent:
//
// conn.Query(ctx, "select * from widgets where foo = @foo and bar = @bar", pgx.NamedArgs{"foo": 1, "bar": 2})
// conn.Query(ctx, "select * from widgets where foo = $1 and bar = $2", 1, 2)
//
// Named placeholders are case sensitive and must start with a letter or underscore. Subsequent characters can be
// letters, numbers, or underscores.
type NamedArgs map[string]any
// RewriteQuery implements the QueryRewriter interface.
func (na NamedArgs) RewriteQuery(ctx context.Context, conn *Conn, sql string, args []any) (newSQL string, newArgs []any, err error) {
return rewriteQuery(na, sql, false)
}
// StrictNamedArgs can be used in the same way as NamedArgs, but provided arguments are also checked to include all
// named arguments that the sql query uses, and no extra arguments.
type StrictNamedArgs map[string]any
// RewriteQuery implements the QueryRewriter interface.
func (sna StrictNamedArgs) RewriteQuery(ctx context.Context, conn *Conn, sql string, args []any) (newSQL string, newArgs []any, err error) {
return rewriteQuery(sna, sql, true)
}
type namedArg string
type sqlLexer struct {
src string
start int
pos int
nested int // multiline comment nesting level.
stateFn stateFn
parts []any
nameToOrdinal map[namedArg]int
}
type stateFn func(*sqlLexer) stateFn
func rewriteQuery(na map[string]any, sql string, isStrict bool) (newSQL string, newArgs []any, err error) {
l := &sqlLexer{
src: sql,
stateFn: rawState,
nameToOrdinal: make(map[namedArg]int, len(na)),
}
for l.stateFn != nil {
l.stateFn = l.stateFn(l)
}
sb := strings.Builder{}
for _, p := range l.parts {
switch p := p.(type) {
case string:
sb.WriteString(p)
case namedArg:
sb.WriteRune('$')
sb.WriteString(strconv.Itoa(l.nameToOrdinal[p]))
}
}
newArgs = make([]any, len(l.nameToOrdinal))
for name, ordinal := range l.nameToOrdinal {
var found bool
newArgs[ordinal-1], found = na[string(name)]
if isStrict && !found {
return "", nil, fmt.Errorf("argument %s found in sql query but not present in StrictNamedArgs", name)
}
}
if isStrict {
for name := range na {
if _, found := l.nameToOrdinal[namedArg(name)]; !found {
return "", nil, fmt.Errorf("argument %s of StrictNamedArgs not found in sql query", name)
}
}
}
return sb.String(), newArgs, nil
}
func rawState(l *sqlLexer) stateFn {
for {
r, width := utf8.DecodeRuneInString(l.src[l.pos:])
l.pos += width
switch r {
case 'e', 'E':
nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
if nextRune == '\'' {
l.pos += width
return escapeStringState
}
case '\'':
return singleQuoteState
case '"':
return doubleQuoteState
case '@':
nextRune, _ := utf8.DecodeRuneInString(l.src[l.pos:])
if isLetter(nextRune) || nextRune == '_' {
if l.pos-l.start > 0 {
l.parts = append(l.parts, l.src[l.start:l.pos-width])
}
l.start = l.pos
return namedArgState
}
case '-':
nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
if nextRune == '-' {
l.pos += width
return oneLineCommentState
}
case '/':
nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
if nextRune == '*' {
l.pos += width
return multilineCommentState
}
case utf8.RuneError:
if l.pos-l.start > 0 {
l.parts = append(l.parts, l.src[l.start:l.pos])
l.start = l.pos
}
return nil
}
}
}
func isLetter(r rune) bool {
return (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z')
}
func namedArgState(l *sqlLexer) stateFn {
for {
r, width := utf8.DecodeRuneInString(l.src[l.pos:])
l.pos += width
if r == utf8.RuneError {
if l.pos-l.start > 0 {
na := namedArg(l.src[l.start:l.pos])
if _, found := l.nameToOrdinal[na]; !found {
l.nameToOrdinal[na] = len(l.nameToOrdinal) + 1
}
l.parts = append(l.parts, na)
l.start = l.pos
}
return nil
} else if !(isLetter(r) || (r >= '0' && r <= '9') || r == '_') {
l.pos -= width
na := namedArg(l.src[l.start:l.pos])
if _, found := l.nameToOrdinal[na]; !found {
l.nameToOrdinal[na] = len(l.nameToOrdinal) + 1
}
l.parts = append(l.parts, namedArg(na))
l.start = l.pos
return rawState
}
}
}
func singleQuoteState(l *sqlLexer) stateFn {
for {
r, width := utf8.DecodeRuneInString(l.src[l.pos:])
l.pos += width
switch r {
case '\'':
nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
if nextRune != '\'' {
return rawState
}
l.pos += width
case utf8.RuneError:
if l.pos-l.start > 0 {
l.parts = append(l.parts, l.src[l.start:l.pos])
l.start = l.pos
}
return nil
}
}
}
func doubleQuoteState(l *sqlLexer) stateFn {
for {
r, width := utf8.DecodeRuneInString(l.src[l.pos:])
l.pos += width
switch r {
case '"':
nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
if nextRune != '"' {
return rawState
}
l.pos += width
case utf8.RuneError:
if l.pos-l.start > 0 {
l.parts = append(l.parts, l.src[l.start:l.pos])
l.start = l.pos
}
return nil
}
}
}
func escapeStringState(l *sqlLexer) stateFn {
for {
r, width := utf8.DecodeRuneInString(l.src[l.pos:])
l.pos += width
switch r {
case '\\':
_, width = utf8.DecodeRuneInString(l.src[l.pos:])
l.pos += width
case '\'':
nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
if nextRune != '\'' {
return rawState
}
l.pos += width
case utf8.RuneError:
if l.pos-l.start > 0 {
l.parts = append(l.parts, l.src[l.start:l.pos])
l.start = l.pos
}
return nil
}
}
}
func oneLineCommentState(l *sqlLexer) stateFn {
for {
r, width := utf8.DecodeRuneInString(l.src[l.pos:])
l.pos += width
switch r {
case '\\':
_, width = utf8.DecodeRuneInString(l.src[l.pos:])
l.pos += width
case '\n', '\r':
return rawState
case utf8.RuneError:
if l.pos-l.start > 0 {
l.parts = append(l.parts, l.src[l.start:l.pos])
l.start = l.pos
}
return nil
}
}
}
func multilineCommentState(l *sqlLexer) stateFn {
for {
r, width := utf8.DecodeRuneInString(l.src[l.pos:])
l.pos += width
switch r {
case '/':
nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
if nextRune == '*' {
l.pos += width
l.nested++
}
case '*':
nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
if nextRune != '/' {
continue
}
l.pos += width
if l.nested == 0 {
return rawState
}
l.nested--
case utf8.RuneError:
if l.pos-l.start > 0 {
l.parts = append(l.parts, l.src[l.start:l.pos])
l.start = l.pos
}
return nil
}
}
}

162
named_args_test.go Normal file
View File

@ -0,0 +1,162 @@
package pgx_test
import (
"context"
"testing"
"github.com/jackc/pgx/v5"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNamedArgsRewriteQuery(t *testing.T) {
t.Parallel()
for i, tt := range []struct {
sql string
args []any
namedArgs pgx.NamedArgs
expectedSQL string
expectedArgs []any
}{
{
sql: "select * from users where id = @id",
namedArgs: pgx.NamedArgs{"id": int32(42)},
expectedSQL: "select * from users where id = $1",
expectedArgs: []any{int32(42)},
},
{
sql: "select * from t where foo < @abc and baz = @def and bar < @abc",
namedArgs: pgx.NamedArgs{"abc": int32(42), "def": int32(1)},
expectedSQL: "select * from t where foo < $1 and baz = $2 and bar < $1",
expectedArgs: []any{int32(42), int32(1)},
},
{
sql: "select @a::int, @b::text",
namedArgs: pgx.NamedArgs{"a": int32(42), "b": "foo"},
expectedSQL: "select $1::int, $2::text",
expectedArgs: []any{int32(42), "foo"},
},
{
sql: "select @Abc::int, @b_4::text, @_c::int",
namedArgs: pgx.NamedArgs{"Abc": int32(42), "b_4": "foo", "_c": int32(1)},
expectedSQL: "select $1::int, $2::text, $3::int",
expectedArgs: []any{int32(42), "foo", int32(1)},
},
{
sql: "at end @",
namedArgs: pgx.NamedArgs{"a": int32(42), "b": "foo"},
expectedSQL: "at end @",
expectedArgs: []any{},
},
{
sql: "ignores without valid character after @ foo bar",
namedArgs: pgx.NamedArgs{"a": int32(42), "b": "foo"},
expectedSQL: "ignores without valid character after @ foo bar",
expectedArgs: []any{},
},
{
sql: "name cannot start with number @1 foo bar",
namedArgs: pgx.NamedArgs{"a": int32(42), "b": "foo"},
expectedSQL: "name cannot start with number @1 foo bar",
expectedArgs: []any{},
},
{
sql: `select *, '@foo' as "@bar" from users where id = @id`,
namedArgs: pgx.NamedArgs{"id": int32(42)},
expectedSQL: `select *, '@foo' as "@bar" from users where id = $1`,
expectedArgs: []any{int32(42)},
},
{
sql: `select * -- @foo
from users -- @single line comments
where id = @id;`,
namedArgs: pgx.NamedArgs{"id": int32(42)},
expectedSQL: `select * -- @foo
from users -- @single line comments
where id = $1;`,
expectedArgs: []any{int32(42)},
},
{
sql: `select * /* @multi line
@comment
*/
/* /* with @nesting */ */
from users
where id = @id;`,
namedArgs: pgx.NamedArgs{"id": int32(42)},
expectedSQL: `select * /* @multi line
@comment
*/
/* /* with @nesting */ */
from users
where id = $1;`,
expectedArgs: []any{int32(42)},
},
{
sql: "extra provided argument",
namedArgs: pgx.NamedArgs{"extra": int32(1)},
expectedSQL: "extra provided argument",
expectedArgs: []any{},
},
{
sql: "@missing argument",
namedArgs: pgx.NamedArgs{},
expectedSQL: "$1 argument",
expectedArgs: []any{nil},
},
// test comments and quotes
} {
sql, args, err := tt.namedArgs.RewriteQuery(context.Background(), nil, tt.sql, tt.args)
require.NoError(t, err)
assert.Equalf(t, tt.expectedSQL, sql, "%d", i)
assert.Equalf(t, tt.expectedArgs, args, "%d", i)
}
}
func TestStrictNamedArgsRewriteQuery(t *testing.T) {
t.Parallel()
for i, tt := range []struct {
sql string
namedArgs pgx.StrictNamedArgs
expectedSQL string
expectedArgs []any
isExpectedError bool
}{
{
sql: "no arguments",
namedArgs: pgx.StrictNamedArgs{},
expectedSQL: "no arguments",
expectedArgs: []any{},
isExpectedError: false,
},
{
sql: "@all @matches",
namedArgs: pgx.StrictNamedArgs{"all": int32(1), "matches": int32(2)},
expectedSQL: "$1 $2",
expectedArgs: []any{int32(1), int32(2)},
isExpectedError: false,
},
{
sql: "extra provided argument",
namedArgs: pgx.StrictNamedArgs{"extra": int32(1)},
isExpectedError: true,
},
{
sql: "@missing argument",
namedArgs: pgx.StrictNamedArgs{},
isExpectedError: true,
},
} {
sql, args, err := tt.namedArgs.RewriteQuery(context.Background(), nil, tt.sql, nil)
if tt.isExpectedError {
assert.Errorf(t, err, "%d", i)
} else {
require.NoErrorf(t, err, "%d", i)
assert.Equalf(t, tt.expectedSQL, sql, "%d", i)
assert.Equalf(t, tt.expectedArgs, args, "%d", i)
}
}
}

View File

@ -5,9 +5,7 @@ import (
"os"
"testing"
"github.com/jackc/pgconn"
"github.com/jackc/pgconn/stmtcache"
"github.com/jackc/pgx/v4"
"github.com/jackc/pgx/v5"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -19,9 +17,8 @@ func TestPgbouncerStatementCacheDescribe(t *testing.T) {
}
config := mustParseConfig(t, connString)
config.BuildStatementCache = func(conn *pgconn.PgConn) stmtcache.Cache {
return stmtcache.New(conn, stmtcache.ModeDescribe, 1024)
}
config.DefaultQueryExecMode = pgx.QueryExecModeCacheDescribe
config.DescriptionCacheCapacity = 1024
testPgbouncer(t, config, 10, 100)
}
@ -33,8 +30,7 @@ func TestPgbouncerSimpleProtocol(t *testing.T) {
}
config := mustParseConfig(t, connString)
config.BuildStatementCache = nil
config.PreferSimpleProtocol = true
config.DefaultQueryExecMode = pgx.QueryExecModeSimpleProtocol
testPgbouncer(t, config, 10, 100)
}
@ -76,5 +72,4 @@ func testPgbouncer(t *testing.T, config *pgx.ConnConfig, workers, iterations int
for i := 0; i < workers; i++ {
<-doneChan
}
}

29
pgconn/README.md Normal file
View File

@ -0,0 +1,29 @@
# pgconn
Package pgconn is a low-level PostgreSQL database driver. It operates at nearly the same level as the C library libpq.
It is primarily intended to serve as the foundation for higher level libraries such as https://github.com/jackc/pgx.
Applications should handle normal queries with a higher level library and only use pgconn directly when required for
low-level access to PostgreSQL functionality.
## Example Usage
```go
pgConn, err := pgconn.Connect(context.Background(), os.Getenv("DATABASE_URL"))
if err != nil {
log.Fatalln("pgconn failed to connect:", err)
}
defer pgConn.Close(context.Background())
result := pgConn.ExecParams(context.Background(), "SELECT email FROM users WHERE id=$1", [][]byte{[]byte("123")}, nil, nil, nil)
for result.NextRow() {
fmt.Println("User 123 has email:", string(result.Values()[0]))
}
_, err = result.Close()
if err != nil {
log.Fatalln("failed reading result:", err)
}
```
## Testing
See CONTRIBUTING.md for setup instructions.

272
pgconn/auth_scram.go Normal file
View File

@ -0,0 +1,272 @@
// SCRAM-SHA-256 authentication
//
// Resources:
// https://tools.ietf.org/html/rfc5802
// https://tools.ietf.org/html/rfc8265
// https://www.postgresql.org/docs/current/sasl-authentication.html
//
// Inspiration drawn from other implementations:
// https://github.com/lib/pq/pull/608
// https://github.com/lib/pq/pull/788
// https://github.com/lib/pq/pull/833
package pgconn
import (
"bytes"
"crypto/hmac"
"crypto/rand"
"crypto/sha256"
"encoding/base64"
"errors"
"fmt"
"strconv"
"github.com/jackc/pgx/v5/pgproto3"
"golang.org/x/crypto/pbkdf2"
"golang.org/x/text/secure/precis"
)
const clientNonceLen = 18
// Perform SCRAM authentication.
func (c *PgConn) scramAuth(serverAuthMechanisms []string) error {
sc, err := newScramClient(serverAuthMechanisms, c.config.Password)
if err != nil {
return err
}
// Send client-first-message in a SASLInitialResponse
saslInitialResponse := &pgproto3.SASLInitialResponse{
AuthMechanism: "SCRAM-SHA-256",
Data: sc.clientFirstMessage(),
}
c.frontend.Send(saslInitialResponse)
err = c.flushWithPotentialWriteReadDeadlock()
if err != nil {
return err
}
// Receive server-first-message payload in an AuthenticationSASLContinue.
saslContinue, err := c.rxSASLContinue()
if err != nil {
return err
}
err = sc.recvServerFirstMessage(saslContinue.Data)
if err != nil {
return err
}
// Send client-final-message in a SASLResponse
saslResponse := &pgproto3.SASLResponse{
Data: []byte(sc.clientFinalMessage()),
}
c.frontend.Send(saslResponse)
err = c.flushWithPotentialWriteReadDeadlock()
if err != nil {
return err
}
// Receive server-final-message payload in an AuthenticationSASLFinal.
saslFinal, err := c.rxSASLFinal()
if err != nil {
return err
}
return sc.recvServerFinalMessage(saslFinal.Data)
}
func (c *PgConn) rxSASLContinue() (*pgproto3.AuthenticationSASLContinue, error) {
msg, err := c.receiveMessage()
if err != nil {
return nil, err
}
switch m := msg.(type) {
case *pgproto3.AuthenticationSASLContinue:
return m, nil
case *pgproto3.ErrorResponse:
return nil, ErrorResponseToPgError(m)
}
return nil, fmt.Errorf("expected AuthenticationSASLContinue message but received unexpected message %T", msg)
}
func (c *PgConn) rxSASLFinal() (*pgproto3.AuthenticationSASLFinal, error) {
msg, err := c.receiveMessage()
if err != nil {
return nil, err
}
switch m := msg.(type) {
case *pgproto3.AuthenticationSASLFinal:
return m, nil
case *pgproto3.ErrorResponse:
return nil, ErrorResponseToPgError(m)
}
return nil, fmt.Errorf("expected AuthenticationSASLFinal message but received unexpected message %T", msg)
}
type scramClient struct {
serverAuthMechanisms []string
password []byte
clientNonce []byte
clientFirstMessageBare []byte
serverFirstMessage []byte
clientAndServerNonce []byte
salt []byte
iterations int
saltedPassword []byte
authMessage []byte
}
func newScramClient(serverAuthMechanisms []string, password string) (*scramClient, error) {
sc := &scramClient{
serverAuthMechanisms: serverAuthMechanisms,
}
// Ensure server supports SCRAM-SHA-256
hasScramSHA256 := false
for _, mech := range sc.serverAuthMechanisms {
if mech == "SCRAM-SHA-256" {
hasScramSHA256 = true
break
}
}
if !hasScramSHA256 {
return nil, errors.New("server does not support SCRAM-SHA-256")
}
// precis.OpaqueString is equivalent to SASLprep for password.
var err error
sc.password, err = precis.OpaqueString.Bytes([]byte(password))
if err != nil {
// PostgreSQL allows passwords invalid according to SCRAM / SASLprep.
sc.password = []byte(password)
}
buf := make([]byte, clientNonceLen)
_, err = rand.Read(buf)
if err != nil {
return nil, err
}
sc.clientNonce = make([]byte, base64.RawStdEncoding.EncodedLen(len(buf)))
base64.RawStdEncoding.Encode(sc.clientNonce, buf)
return sc, nil
}
func (sc *scramClient) clientFirstMessage() []byte {
sc.clientFirstMessageBare = []byte(fmt.Sprintf("n=,r=%s", sc.clientNonce))
return []byte(fmt.Sprintf("n,,%s", sc.clientFirstMessageBare))
}
func (sc *scramClient) recvServerFirstMessage(serverFirstMessage []byte) error {
sc.serverFirstMessage = serverFirstMessage
buf := serverFirstMessage
if !bytes.HasPrefix(buf, []byte("r=")) {
return errors.New("invalid SCRAM server-first-message received from server: did not include r=")
}
buf = buf[2:]
idx := bytes.IndexByte(buf, ',')
if idx == -1 {
return errors.New("invalid SCRAM server-first-message received from server: did not include s=")
}
sc.clientAndServerNonce = buf[:idx]
buf = buf[idx+1:]
if !bytes.HasPrefix(buf, []byte("s=")) {
return errors.New("invalid SCRAM server-first-message received from server: did not include s=")
}
buf = buf[2:]
idx = bytes.IndexByte(buf, ',')
if idx == -1 {
return errors.New("invalid SCRAM server-first-message received from server: did not include i=")
}
saltStr := buf[:idx]
buf = buf[idx+1:]
if !bytes.HasPrefix(buf, []byte("i=")) {
return errors.New("invalid SCRAM server-first-message received from server: did not include i=")
}
buf = buf[2:]
iterationsStr := buf
var err error
sc.salt, err = base64.StdEncoding.DecodeString(string(saltStr))
if err != nil {
return fmt.Errorf("invalid SCRAM salt received from server: %w", err)
}
sc.iterations, err = strconv.Atoi(string(iterationsStr))
if err != nil || sc.iterations <= 0 {
return fmt.Errorf("invalid SCRAM iteration count received from server: %w", err)
}
if !bytes.HasPrefix(sc.clientAndServerNonce, sc.clientNonce) {
return errors.New("invalid SCRAM nonce: did not start with client nonce")
}
if len(sc.clientAndServerNonce) <= len(sc.clientNonce) {
return errors.New("invalid SCRAM nonce: did not include server nonce")
}
return nil
}
func (sc *scramClient) clientFinalMessage() string {
clientFinalMessageWithoutProof := []byte(fmt.Sprintf("c=biws,r=%s", sc.clientAndServerNonce))
sc.saltedPassword = pbkdf2.Key([]byte(sc.password), sc.salt, sc.iterations, 32, sha256.New)
sc.authMessage = bytes.Join([][]byte{sc.clientFirstMessageBare, sc.serverFirstMessage, clientFinalMessageWithoutProof}, []byte(","))
clientProof := computeClientProof(sc.saltedPassword, sc.authMessage)
return fmt.Sprintf("%s,p=%s", clientFinalMessageWithoutProof, clientProof)
}
func (sc *scramClient) recvServerFinalMessage(serverFinalMessage []byte) error {
if !bytes.HasPrefix(serverFinalMessage, []byte("v=")) {
return errors.New("invalid SCRAM server-final-message received from server")
}
serverSignature := serverFinalMessage[2:]
if !hmac.Equal(serverSignature, computeServerSignature(sc.saltedPassword, sc.authMessage)) {
return errors.New("invalid SCRAM ServerSignature received from server")
}
return nil
}
func computeHMAC(key, msg []byte) []byte {
mac := hmac.New(sha256.New, key)
mac.Write(msg)
return mac.Sum(nil)
}
func computeClientProof(saltedPassword, authMessage []byte) []byte {
clientKey := computeHMAC(saltedPassword, []byte("Client Key"))
storedKey := sha256.Sum256(clientKey)
clientSignature := computeHMAC(storedKey[:], authMessage)
clientProof := make([]byte, len(clientSignature))
for i := 0; i < len(clientSignature); i++ {
clientProof[i] = clientKey[i] ^ clientSignature[i]
}
buf := make([]byte, base64.StdEncoding.EncodedLen(len(clientProof)))
base64.StdEncoding.Encode(buf, clientProof)
return buf
}
func computeServerSignature(saltedPassword, authMessage []byte) []byte {
serverKey := computeHMAC(saltedPassword, []byte("Server Key"))
serverSignature := computeHMAC(serverKey, authMessage)
buf := make([]byte, base64.StdEncoding.EncodedLen(len(serverSignature)))
base64.StdEncoding.Encode(buf, serverSignature)
return buf
}

View File

@ -0,0 +1,73 @@
package pgconn
import (
"strings"
"testing"
)
func BenchmarkCommandTagRowsAffected(b *testing.B) {
benchmarks := []struct {
commandTag string
rowsAffected int64
}{
{"UPDATE 1", 1},
{"UPDATE 123456789", 123456789},
{"INSERT 0 1", 1},
{"INSERT 0 123456789", 123456789},
}
for _, bm := range benchmarks {
ct := CommandTag{s: bm.commandTag}
b.Run(bm.commandTag, func(b *testing.B) {
var n int64
for i := 0; i < b.N; i++ {
n = ct.RowsAffected()
}
if n != bm.rowsAffected {
b.Errorf("expected %d got %d", bm.rowsAffected, n)
}
})
}
}
func BenchmarkCommandTagTypeFromString(b *testing.B) {
ct := CommandTag{s: "UPDATE 1"}
var update bool
for i := 0; i < b.N; i++ {
update = strings.HasPrefix(ct.String(), "UPDATE")
}
if !update {
b.Error("expected update")
}
}
func BenchmarkCommandTagInsert(b *testing.B) {
benchmarks := []struct {
commandTag string
is bool
}{
{"INSERT 1", true},
{"INSERT 1234567890", true},
{"UPDATE 1", false},
{"UPDATE 1234567890", false},
{"DELETE 1", false},
{"DELETE 1234567890", false},
{"SELECT 1", false},
{"SELECT 1234567890", false},
{"UNKNOWN 1234567890", false},
}
for _, bm := range benchmarks {
ct := CommandTag{s: bm.commandTag}
b.Run(bm.commandTag, func(b *testing.B) {
var is bool
for i := 0; i < b.N; i++ {
is = ct.Insert()
}
if is != bm.is {
b.Errorf("expected %v got %v", bm.is, is)
}
})
}
}

250
pgconn/benchmark_test.go Normal file
View File

@ -0,0 +1,250 @@
package pgconn_test
import (
"bytes"
"context"
"os"
"testing"
"github.com/jackc/pgx/v5/pgconn"
"github.com/stretchr/testify/require"
)
func BenchmarkConnect(b *testing.B) {
benchmarks := []struct {
name string
env string
}{
{"Unix socket", "PGX_TEST_UNIX_SOCKET_CONN_STRING"},
{"TCP", "PGX_TEST_TCP_CONN_STRING"},
}
for _, bm := range benchmarks {
bm := bm
b.Run(bm.name, func(b *testing.B) {
connString := os.Getenv(bm.env)
if connString == "" {
b.Skipf("Skipping due to missing environment variable %v", bm.env)
}
for i := 0; i < b.N; i++ {
conn, err := pgconn.Connect(context.Background(), connString)
require.Nil(b, err)
err = conn.Close(context.Background())
require.Nil(b, err)
}
})
}
}
func BenchmarkExec(b *testing.B) {
expectedValues := [][]byte{[]byte("hello"), []byte("42"), []byte("2019-01-01")}
benchmarks := []struct {
name string
ctx context.Context
}{
// Using an empty context other than context.Background() to compare
// performance
{"background context", context.Background()},
{"empty context", context.TODO()},
}
for _, bm := range benchmarks {
bm := bm
b.Run(bm.name, func(b *testing.B) {
conn, err := pgconn.Connect(bm.ctx, os.Getenv("PGX_TEST_DATABASE"))
require.Nil(b, err)
defer closeConn(b, conn)
b.ResetTimer()
for i := 0; i < b.N; i++ {
mrr := conn.Exec(bm.ctx, "select 'hello'::text as a, 42::int4 as b, '2019-01-01'::date")
for mrr.NextResult() {
rr := mrr.ResultReader()
rowCount := 0
for rr.NextRow() {
rowCount++
if len(rr.Values()) != len(expectedValues) {
b.Fatalf("unexpected number of values: %d", len(rr.Values()))
}
for i := range rr.Values() {
if !bytes.Equal(rr.Values()[i], expectedValues[i]) {
b.Fatalf("unexpected values: %s %s", rr.Values()[i], expectedValues[i])
}
}
}
_, err = rr.Close()
if err != nil {
b.Fatal(err)
}
if rowCount != 1 {
b.Fatalf("unexpected rowCount: %d", rowCount)
}
}
err := mrr.Close()
if err != nil {
b.Fatal(err)
}
}
})
}
}
func BenchmarkExecPossibleToCancel(b *testing.B) {
conn, err := pgconn.Connect(context.Background(), os.Getenv("PGX_TEST_DATABASE"))
require.Nil(b, err)
defer closeConn(b, conn)
expectedValues := [][]byte{[]byte("hello"), []byte("42"), []byte("2019-01-01")}
b.ResetTimer()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
for i := 0; i < b.N; i++ {
mrr := conn.Exec(ctx, "select 'hello'::text as a, 42::int4 as b, '2019-01-01'::date")
for mrr.NextResult() {
rr := mrr.ResultReader()
rowCount := 0
for rr.NextRow() {
rowCount++
if len(rr.Values()) != len(expectedValues) {
b.Fatalf("unexpected number of values: %d", len(rr.Values()))
}
for i := range rr.Values() {
if !bytes.Equal(rr.Values()[i], expectedValues[i]) {
b.Fatalf("unexpected values: %s %s", rr.Values()[i], expectedValues[i])
}
}
}
_, err = rr.Close()
if err != nil {
b.Fatal(err)
}
if rowCount != 1 {
b.Fatalf("unexpected rowCount: %d", rowCount)
}
}
err := mrr.Close()
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkExecPrepared(b *testing.B) {
expectedValues := [][]byte{[]byte("hello"), []byte("42"), []byte("2019-01-01")}
benchmarks := []struct {
name string
ctx context.Context
}{
// Using an empty context other than context.Background() to compare
// performance
{"background context", context.Background()},
{"empty context", context.TODO()},
}
for _, bm := range benchmarks {
bm := bm
b.Run(bm.name, func(b *testing.B) {
conn, err := pgconn.Connect(bm.ctx, os.Getenv("PGX_TEST_DATABASE"))
require.Nil(b, err)
defer closeConn(b, conn)
_, err = conn.Prepare(bm.ctx, "ps1", "select 'hello'::text as a, 42::int4 as b, '2019-01-01'::date", nil)
require.Nil(b, err)
b.ResetTimer()
for i := 0; i < b.N; i++ {
rr := conn.ExecPrepared(bm.ctx, "ps1", nil, nil, nil)
rowCount := 0
for rr.NextRow() {
rowCount++
if len(rr.Values()) != len(expectedValues) {
b.Fatalf("unexpected number of values: %d", len(rr.Values()))
}
for i := range rr.Values() {
if !bytes.Equal(rr.Values()[i], expectedValues[i]) {
b.Fatalf("unexpected values: %s %s", rr.Values()[i], expectedValues[i])
}
}
}
_, err = rr.Close()
if err != nil {
b.Fatal(err)
}
if rowCount != 1 {
b.Fatalf("unexpected rowCount: %d", rowCount)
}
}
})
}
}
func BenchmarkExecPreparedPossibleToCancel(b *testing.B) {
conn, err := pgconn.Connect(context.Background(), os.Getenv("PGX_TEST_DATABASE"))
require.Nil(b, err)
defer closeConn(b, conn)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_, err = conn.Prepare(ctx, "ps1", "select 'hello'::text as a, 42::int4 as b, '2019-01-01'::date", nil)
require.Nil(b, err)
expectedValues := [][]byte{[]byte("hello"), []byte("42"), []byte("2019-01-01")}
b.ResetTimer()
for i := 0; i < b.N; i++ {
rr := conn.ExecPrepared(ctx, "ps1", nil, nil, nil)
rowCount := 0
for rr.NextRow() {
rowCount += 1
if len(rr.Values()) != len(expectedValues) {
b.Fatalf("unexpected number of values: %d", len(rr.Values()))
}
for i := range rr.Values() {
if !bytes.Equal(rr.Values()[i], expectedValues[i]) {
b.Fatalf("unexpected values: %s %s", rr.Values()[i], expectedValues[i])
}
}
}
_, err = rr.Close()
if err != nil {
b.Fatal(err)
}
if rowCount != 1 {
b.Fatalf("unexpected rowCount: %d", rowCount)
}
}
}
// func BenchmarkChanToSetDeadlinePossibleToCancel(b *testing.B) {
// conn, err := pgconn.Connect(context.Background(), os.Getenv("PGX_TEST_DATABASE"))
// require.Nil(b, err)
// defer closeConn(b, conn)
// ctx, cancel := context.WithCancel(context.Background())
// defer cancel()
// b.ResetTimer()
// for i := 0; i < b.N; i++ {
// conn.ChanToSetDeadline().Watch(ctx)
// conn.ChanToSetDeadline().Ignore()
// }
// }

953
pgconn/config.go Normal file
View File

@ -0,0 +1,953 @@
package pgconn
import (
"context"
"crypto/tls"
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"io"
"math"
"net"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/jackc/pgpassfile"
"github.com/jackc/pgservicefile"
"github.com/jackc/pgx/v5/pgconn/ctxwatch"
"github.com/jackc/pgx/v5/pgproto3"
)
type (
AfterConnectFunc func(ctx context.Context, pgconn *PgConn) error
ValidateConnectFunc func(ctx context.Context, pgconn *PgConn) error
GetSSLPasswordFunc func(ctx context.Context) string
)
// Config is the settings used to establish a connection to a PostgreSQL server. It must be created by [ParseConfig]. A
// manually initialized Config will cause ConnectConfig to panic.
type Config struct {
Host string // host (e.g. localhost) or absolute path to unix domain socket directory (e.g. /private/tmp)
Port uint16
Database string
User string
Password string
TLSConfig *tls.Config // nil disables TLS
ConnectTimeout time.Duration
DialFunc DialFunc // e.g. net.Dialer.DialContext
LookupFunc LookupFunc // e.g. net.Resolver.LookupHost
BuildFrontend BuildFrontendFunc
// BuildContextWatcherHandler is called to create a ContextWatcherHandler for a connection. The handler is called
// when a context passed to a PgConn method is canceled.
BuildContextWatcherHandler func(*PgConn) ctxwatch.Handler
RuntimeParams map[string]string // Run-time parameters to set on connection as session default values (e.g. search_path or application_name)
KerberosSrvName string
KerberosSpn string
Fallbacks []*FallbackConfig
SSLNegotiation string // sslnegotiation=postgres or sslnegotiation=direct
// ValidateConnect is called during a connection attempt after a successful authentication with the PostgreSQL server.
// It can be used to validate that the server is acceptable. If this returns an error the connection is closed and the next
// fallback config is tried. This allows implementing high availability behavior such as libpq does with target_session_attrs.
ValidateConnect ValidateConnectFunc
// AfterConnect is called after ValidateConnect. It can be used to set up the connection (e.g. Set session variables
// or prepare statements). If this returns an error the connection attempt fails.
AfterConnect AfterConnectFunc
// OnNotice is a callback function called when a notice response is received.
OnNotice NoticeHandler
// OnNotification is a callback function called when a notification from the LISTEN/NOTIFY system is received.
OnNotification NotificationHandler
// OnPgError is a callback function called when a Postgres error is received by the server. The default handler will close
// the connection on any FATAL errors. If you override this handler you should call the previously set handler or ensure
// that you close on FATAL errors by returning false.
OnPgError PgErrorHandler
createdByParseConfig bool // Used to enforce created by ParseConfig rule.
}
// ParseConfigOptions contains options that control how a config is built such as GetSSLPassword.
type ParseConfigOptions struct {
// GetSSLPassword gets the password to decrypt a SSL client certificate. This is analogous to the libpq function
// PQsetSSLKeyPassHook_OpenSSL.
GetSSLPassword GetSSLPasswordFunc
}
// Copy returns a deep copy of the config that is safe to use and modify.
// The only exception is the TLSConfig field:
// according to the tls.Config docs it must not be modified after creation.
func (c *Config) Copy() *Config {
newConf := new(Config)
*newConf = *c
if newConf.TLSConfig != nil {
newConf.TLSConfig = c.TLSConfig.Clone()
}
if newConf.RuntimeParams != nil {
newConf.RuntimeParams = make(map[string]string, len(c.RuntimeParams))
for k, v := range c.RuntimeParams {
newConf.RuntimeParams[k] = v
}
}
if newConf.Fallbacks != nil {
newConf.Fallbacks = make([]*FallbackConfig, len(c.Fallbacks))
for i, fallback := range c.Fallbacks {
newFallback := new(FallbackConfig)
*newFallback = *fallback
if newFallback.TLSConfig != nil {
newFallback.TLSConfig = fallback.TLSConfig.Clone()
}
newConf.Fallbacks[i] = newFallback
}
}
return newConf
}
// FallbackConfig is additional settings to attempt a connection with when the primary Config fails to establish a
// network connection. It is used for TLS fallback such as sslmode=prefer and high availability (HA) connections.
type FallbackConfig struct {
Host string // host (e.g. localhost) or path to unix domain socket directory (e.g. /private/tmp)
Port uint16
TLSConfig *tls.Config // nil disables TLS
}
// connectOneConfig is the configuration for a single attempt to connect to a single host.
type connectOneConfig struct {
network string
address string
originalHostname string // original hostname before resolving
tlsConfig *tls.Config // nil disables TLS
}
// isAbsolutePath checks if the provided value is an absolute path either
// beginning with a forward slash (as on Linux-based systems) or with a capital
// letter A-Z followed by a colon and a backslash, e.g., "C:\", (as on Windows).
func isAbsolutePath(path string) bool {
isWindowsPath := func(p string) bool {
if len(p) < 3 {
return false
}
drive := p[0]
colon := p[1]
backslash := p[2]
if drive >= 'A' && drive <= 'Z' && colon == ':' && backslash == '\\' {
return true
}
return false
}
return strings.HasPrefix(path, "/") || isWindowsPath(path)
}
// NetworkAddress converts a PostgreSQL host and port into network and address suitable for use with
// net.Dial.
func NetworkAddress(host string, port uint16) (network, address string) {
if isAbsolutePath(host) {
network = "unix"
address = filepath.Join(host, ".s.PGSQL.") + strconv.FormatInt(int64(port), 10)
} else {
network = "tcp"
address = net.JoinHostPort(host, strconv.Itoa(int(port)))
}
return network, address
}
// ParseConfig builds a *Config from connString with similar behavior to the PostgreSQL standard C library libpq. It
// uses the same defaults as libpq (e.g. port=5432) and understands most PG* environment variables. ParseConfig closely
// matches the parsing behavior of libpq. connString may either be in URL format or keyword = value format. See
// https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING for details. connString also may be empty
// to only read from the environment. If a password is not supplied it will attempt to read the .pgpass file.
//
// # Example Keyword/Value
// user=jack password=secret host=pg.example.com port=5432 dbname=mydb sslmode=verify-ca
//
// # Example URL
// postgres://jack:secret@pg.example.com:5432/mydb?sslmode=verify-ca
//
// The returned *Config may be modified. However, it is strongly recommended that any configuration that can be done
// through the connection string be done there. In particular the fields Host, Port, TLSConfig, and Fallbacks can be
// interdependent (e.g. TLSConfig needs knowledge of the host to validate the server certificate). These fields should
// not be modified individually. They should all be modified or all left unchanged.
//
// ParseConfig supports specifying multiple hosts in similar manner to libpq. Host and port may include comma separated
// values that will be tried in order. This can be used as part of a high availability system. See
// https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-MULTIPLE-HOSTS for more information.
//
// # Example URL
// postgres://jack:secret@foo.example.com:5432,bar.example.com:5432/mydb
//
// ParseConfig currently recognizes the following environment variable and their parameter key word equivalents passed
// via database URL or keyword/value:
//
// PGHOST
// PGPORT
// PGDATABASE
// PGUSER
// PGPASSWORD
// PGPASSFILE
// PGSERVICE
// PGSERVICEFILE
// PGSSLMODE
// PGSSLCERT
// PGSSLKEY
// PGSSLROOTCERT
// PGSSLPASSWORD
// PGOPTIONS
// PGAPPNAME
// PGCONNECT_TIMEOUT
// PGTARGETSESSIONATTRS
// PGTZ
//
// See http://www.postgresql.org/docs/current/static/libpq-envars.html for details on the meaning of environment variables.
//
// See https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS for parameter key word names. They are
// usually but not always the environment variable name downcased and without the "PG" prefix.
//
// Important Security Notes:
//
// ParseConfig tries to match libpq behavior with regard to PGSSLMODE. This includes defaulting to "prefer" behavior if
// not set.
//
// See http://www.postgresql.org/docs/current/static/libpq-ssl.html#LIBPQ-SSL-PROTECTION for details on what level of
// security each sslmode provides.
//
// The sslmode "prefer" (the default), sslmode "allow", and multiple hosts are implemented via the Fallbacks field of
// the Config struct. If TLSConfig is manually changed it will not affect the fallbacks. For example, in the case of
// sslmode "prefer" this means it will first try the main Config settings which use TLS, then it will try the fallback
// which does not use TLS. This can lead to an unexpected unencrypted connection if the main TLS config is manually
// changed later but the unencrypted fallback is present. Ensure there are no stale fallbacks when manually setting
// TLSConfig.
//
// Other known differences with libpq:
//
// When multiple hosts are specified, libpq allows them to have different passwords set via the .pgpass file. pgconn
// does not.
//
// In addition, ParseConfig accepts the following options:
//
// - servicefile.
// libpq only reads servicefile from the PGSERVICEFILE environment variable. ParseConfig accepts servicefile as a
// part of the connection string.
func ParseConfig(connString string) (*Config, error) {
var parseConfigOptions ParseConfigOptions
return ParseConfigWithOptions(connString, parseConfigOptions)
}
// ParseConfigWithOptions builds a *Config from connString and options with similar behavior to the PostgreSQL standard
// C library libpq. options contains settings that cannot be specified in a connString such as providing a function to
// get the SSL password.
func ParseConfigWithOptions(connString string, options ParseConfigOptions) (*Config, error) {
defaultSettings := defaultSettings()
envSettings := parseEnvSettings()
connStringSettings := make(map[string]string)
if connString != "" {
var err error
// connString may be a database URL or in PostgreSQL keyword/value format
if strings.HasPrefix(connString, "postgres://") || strings.HasPrefix(connString, "postgresql://") {
connStringSettings, err = parseURLSettings(connString)
if err != nil {
return nil, &ParseConfigError{ConnString: connString, msg: "failed to parse as URL", err: err}
}
} else {
connStringSettings, err = parseKeywordValueSettings(connString)
if err != nil {
return nil, &ParseConfigError{ConnString: connString, msg: "failed to parse as keyword/value", err: err}
}
}
}
settings := mergeSettings(defaultSettings, envSettings, connStringSettings)
if service, present := settings["service"]; present {
serviceSettings, err := parseServiceSettings(settings["servicefile"], service)
if err != nil {
return nil, &ParseConfigError{ConnString: connString, msg: "failed to read service", err: err}
}
settings = mergeSettings(defaultSettings, envSettings, serviceSettings, connStringSettings)
}
config := &Config{
createdByParseConfig: true,
Database: settings["database"],
User: settings["user"],
Password: settings["password"],
RuntimeParams: make(map[string]string),
BuildFrontend: func(r io.Reader, w io.Writer) *pgproto3.Frontend {
return pgproto3.NewFrontend(r, w)
},
BuildContextWatcherHandler: func(pgConn *PgConn) ctxwatch.Handler {
return &DeadlineContextWatcherHandler{Conn: pgConn.conn}
},
OnPgError: func(_ *PgConn, pgErr *PgError) bool {
// we want to automatically close any fatal errors
if strings.EqualFold(pgErr.Severity, "FATAL") {
return false
}
return true
},
}
if connectTimeoutSetting, present := settings["connect_timeout"]; present {
connectTimeout, err := parseConnectTimeoutSetting(connectTimeoutSetting)
if err != nil {
return nil, &ParseConfigError{ConnString: connString, msg: "invalid connect_timeout", err: err}
}
config.ConnectTimeout = connectTimeout
config.DialFunc = makeConnectTimeoutDialFunc(connectTimeout)
} else {
defaultDialer := makeDefaultDialer()
config.DialFunc = defaultDialer.DialContext
}
config.LookupFunc = makeDefaultResolver().LookupHost
notRuntimeParams := map[string]struct{}{
"host": {},
"port": {},
"database": {},
"user": {},
"password": {},
"passfile": {},
"connect_timeout": {},
"sslmode": {},
"sslkey": {},
"sslcert": {},
"sslrootcert": {},
"sslnegotiation": {},
"sslpassword": {},
"sslsni": {},
"krbspn": {},
"krbsrvname": {},
"target_session_attrs": {},
"service": {},
"servicefile": {},
}
// Adding kerberos configuration
if _, present := settings["krbsrvname"]; present {
config.KerberosSrvName = settings["krbsrvname"]
}
if _, present := settings["krbspn"]; present {
config.KerberosSpn = settings["krbspn"]
}
for k, v := range settings {
if _, present := notRuntimeParams[k]; present {
continue
}
config.RuntimeParams[k] = v
}
fallbacks := []*FallbackConfig{}
hosts := strings.Split(settings["host"], ",")
ports := strings.Split(settings["port"], ",")
for i, host := range hosts {
var portStr string
if i < len(ports) {
portStr = ports[i]
} else {
portStr = ports[0]
}
port, err := parsePort(portStr)
if err != nil {
return nil, &ParseConfigError{ConnString: connString, msg: "invalid port", err: err}
}
var tlsConfigs []*tls.Config
// Ignore TLS settings if Unix domain socket like libpq
if network, _ := NetworkAddress(host, port); network == "unix" {
tlsConfigs = append(tlsConfigs, nil)
} else {
var err error
tlsConfigs, err = configTLS(settings, host, options)
if err != nil {
return nil, &ParseConfigError{ConnString: connString, msg: "failed to configure TLS", err: err}
}
}
for _, tlsConfig := range tlsConfigs {
fallbacks = append(fallbacks, &FallbackConfig{
Host: host,
Port: port,
TLSConfig: tlsConfig,
})
}
}
config.Host = fallbacks[0].Host
config.Port = fallbacks[0].Port
config.TLSConfig = fallbacks[0].TLSConfig
config.Fallbacks = fallbacks[1:]
config.SSLNegotiation = settings["sslnegotiation"]
passfile, err := pgpassfile.ReadPassfile(settings["passfile"])
if err == nil {
if config.Password == "" {
host := config.Host
if network, _ := NetworkAddress(config.Host, config.Port); network == "unix" {
host = "localhost"
}
config.Password = passfile.FindPassword(host, strconv.Itoa(int(config.Port)), config.Database, config.User)
}
}
switch tsa := settings["target_session_attrs"]; tsa {
case "read-write":
config.ValidateConnect = ValidateConnectTargetSessionAttrsReadWrite
case "read-only":
config.ValidateConnect = ValidateConnectTargetSessionAttrsReadOnly
case "primary":
config.ValidateConnect = ValidateConnectTargetSessionAttrsPrimary
case "standby":
config.ValidateConnect = ValidateConnectTargetSessionAttrsStandby
case "prefer-standby":
config.ValidateConnect = ValidateConnectTargetSessionAttrsPreferStandby
case "any":
// do nothing
default:
return nil, &ParseConfigError{ConnString: connString, msg: fmt.Sprintf("unknown target_session_attrs value: %v", tsa)}
}
return config, nil
}
func mergeSettings(settingSets ...map[string]string) map[string]string {
settings := make(map[string]string)
for _, s2 := range settingSets {
for k, v := range s2 {
settings[k] = v
}
}
return settings
}
func parseEnvSettings() map[string]string {
settings := make(map[string]string)
nameMap := map[string]string{
"PGHOST": "host",
"PGPORT": "port",
"PGDATABASE": "database",
"PGUSER": "user",
"PGPASSWORD": "password",
"PGPASSFILE": "passfile",
"PGAPPNAME": "application_name",
"PGCONNECT_TIMEOUT": "connect_timeout",
"PGSSLMODE": "sslmode",
"PGSSLKEY": "sslkey",
"PGSSLCERT": "sslcert",
"PGSSLSNI": "sslsni",
"PGSSLROOTCERT": "sslrootcert",
"PGSSLPASSWORD": "sslpassword",
"PGSSLNEGOTIATION": "sslnegotiation",
"PGTARGETSESSIONATTRS": "target_session_attrs",
"PGSERVICE": "service",
"PGSERVICEFILE": "servicefile",
"PGTZ": "timezone",
"PGOPTIONS": "options",
}
for envname, realname := range nameMap {
value := os.Getenv(envname)
if value != "" {
settings[realname] = value
}
}
return settings
}
func parseURLSettings(connString string) (map[string]string, error) {
settings := make(map[string]string)
parsedURL, err := url.Parse(connString)
if err != nil {
if urlErr := new(url.Error); errors.As(err, &urlErr) {
return nil, urlErr.Err
}
return nil, err
}
if parsedURL.User != nil {
settings["user"] = parsedURL.User.Username()
if password, present := parsedURL.User.Password(); present {
settings["password"] = password
}
}
// Handle multiple host:port's in url.Host by splitting them into host,host,host and port,port,port.
var hosts []string
var ports []string
for _, host := range strings.Split(parsedURL.Host, ",") {
if host == "" {
continue
}
if isIPOnly(host) {
hosts = append(hosts, strings.Trim(host, "[]"))
continue
}
h, p, err := net.SplitHostPort(host)
if err != nil {
return nil, fmt.Errorf("failed to split host:port in '%s', err: %w", host, err)
}
if h != "" {
hosts = append(hosts, h)
}
if p != "" {
ports = append(ports, p)
}
}
if len(hosts) > 0 {
settings["host"] = strings.Join(hosts, ",")
}
if len(ports) > 0 {
settings["port"] = strings.Join(ports, ",")
}
database := strings.TrimLeft(parsedURL.Path, "/")
if database != "" {
settings["database"] = database
}
nameMap := map[string]string{
"dbname": "database",
}
for k, v := range parsedURL.Query() {
if k2, present := nameMap[k]; present {
k = k2
}
settings[k] = v[0]
}
return settings, nil
}
func isIPOnly(host string) bool {
return net.ParseIP(strings.Trim(host, "[]")) != nil || !strings.Contains(host, ":")
}
var asciiSpace = [256]uint8{'\t': 1, '\n': 1, '\v': 1, '\f': 1, '\r': 1, ' ': 1}
func parseKeywordValueSettings(s string) (map[string]string, error) {
settings := make(map[string]string)
nameMap := map[string]string{
"dbname": "database",
}
for len(s) > 0 {
var key, val string
eqIdx := strings.IndexRune(s, '=')
if eqIdx < 0 {
return nil, errors.New("invalid keyword/value")
}
key = strings.Trim(s[:eqIdx], " \t\n\r\v\f")
s = strings.TrimLeft(s[eqIdx+1:], " \t\n\r\v\f")
if len(s) == 0 {
} else if s[0] != '\'' {
end := 0
for ; end < len(s); end++ {
if asciiSpace[s[end]] == 1 {
break
}
if s[end] == '\\' {
end++
if end == len(s) {
return nil, errors.New("invalid backslash")
}
}
}
val = strings.Replace(strings.Replace(s[:end], "\\\\", "\\", -1), "\\'", "'", -1)
if end == len(s) {
s = ""
} else {
s = s[end+1:]
}
} else { // quoted string
s = s[1:]
end := 0
for ; end < len(s); end++ {
if s[end] == '\'' {
break
}
if s[end] == '\\' {
end++
}
}
if end == len(s) {
return nil, errors.New("unterminated quoted string in connection info string")
}
val = strings.Replace(strings.Replace(s[:end], "\\\\", "\\", -1), "\\'", "'", -1)
if end == len(s) {
s = ""
} else {
s = s[end+1:]
}
}
if k, ok := nameMap[key]; ok {
key = k
}
if key == "" {
return nil, errors.New("invalid keyword/value")
}
settings[key] = val
}
return settings, nil
}
func parseServiceSettings(servicefilePath, serviceName string) (map[string]string, error) {
servicefile, err := pgservicefile.ReadServicefile(servicefilePath)
if err != nil {
return nil, fmt.Errorf("failed to read service file: %v", servicefilePath)
}
service, err := servicefile.GetService(serviceName)
if err != nil {
return nil, fmt.Errorf("unable to find service: %v", serviceName)
}
nameMap := map[string]string{
"dbname": "database",
}
settings := make(map[string]string, len(service.Settings))
for k, v := range service.Settings {
if k2, present := nameMap[k]; present {
k = k2
}
settings[k] = v
}
return settings, nil
}
// configTLS uses libpq's TLS parameters to construct []*tls.Config. It is
// necessary to allow returning multiple TLS configs as sslmode "allow" and
// "prefer" allow fallback.
func configTLS(settings map[string]string, thisHost string, parseConfigOptions ParseConfigOptions) ([]*tls.Config, error) {
host := thisHost
sslmode := settings["sslmode"]
sslrootcert := settings["sslrootcert"]
sslcert := settings["sslcert"]
sslkey := settings["sslkey"]
sslpassword := settings["sslpassword"]
sslsni := settings["sslsni"]
sslnegotiation := settings["sslnegotiation"]
// Match libpq default behavior
if sslmode == "" {
sslmode = "prefer"
}
if sslsni == "" {
sslsni = "1"
}
tlsConfig := &tls.Config{}
if sslnegotiation == "direct" {
tlsConfig.NextProtos = []string{"postgresql"}
if sslmode == "prefer" {
sslmode = "require"
}
}
if sslrootcert != "" {
var caCertPool *x509.CertPool
if sslrootcert == "system" {
var err error
caCertPool, err = x509.SystemCertPool()
if err != nil {
return nil, fmt.Errorf("unable to load system certificate pool: %w", err)
}
sslmode = "verify-full"
} else {
caCertPool = x509.NewCertPool()
caPath := sslrootcert
caCert, err := os.ReadFile(caPath)
if err != nil {
return nil, fmt.Errorf("unable to read CA file: %w", err)
}
if !caCertPool.AppendCertsFromPEM(caCert) {
return nil, errors.New("unable to add CA to cert pool")
}
}
tlsConfig.RootCAs = caCertPool
tlsConfig.ClientCAs = caCertPool
}
switch sslmode {
case "disable":
return []*tls.Config{nil}, nil
case "allow", "prefer":
tlsConfig.InsecureSkipVerify = true
case "require":
// According to PostgreSQL documentation, if a root CA file exists,
// the behavior of sslmode=require should be the same as that of verify-ca
//
// See https://www.postgresql.org/docs/current/libpq-ssl.html
if sslrootcert != "" {
goto nextCase
}
tlsConfig.InsecureSkipVerify = true
break
nextCase:
fallthrough
case "verify-ca":
// Don't perform the default certificate verification because it
// will verify the hostname. Instead, verify the server's
// certificate chain ourselves in VerifyPeerCertificate and
// ignore the server name. This emulates libpq's verify-ca
// behavior.
//
// See https://github.com/golang/go/issues/21971#issuecomment-332693931
// and https://pkg.go.dev/crypto/tls?tab=doc#example-Config-VerifyPeerCertificate
// for more info.
tlsConfig.InsecureSkipVerify = true
tlsConfig.VerifyPeerCertificate = func(certificates [][]byte, _ [][]*x509.Certificate) error {
certs := make([]*x509.Certificate, len(certificates))
for i, asn1Data := range certificates {
cert, err := x509.ParseCertificate(asn1Data)
if err != nil {
return errors.New("failed to parse certificate from server: " + err.Error())
}
certs[i] = cert
}
// Leave DNSName empty to skip hostname verification.
opts := x509.VerifyOptions{
Roots: tlsConfig.RootCAs,
Intermediates: x509.NewCertPool(),
}
// Skip the first cert because it's the leaf. All others
// are intermediates.
for _, cert := range certs[1:] {
opts.Intermediates.AddCert(cert)
}
_, err := certs[0].Verify(opts)
return err
}
case "verify-full":
tlsConfig.ServerName = host
default:
return nil, errors.New("sslmode is invalid")
}
if (sslcert != "" && sslkey == "") || (sslcert == "" && sslkey != "") {
return nil, errors.New(`both "sslcert" and "sslkey" are required`)
}
if sslcert != "" && sslkey != "" {
buf, err := os.ReadFile(sslkey)
if err != nil {
return nil, fmt.Errorf("unable to read sslkey: %w", err)
}
block, _ := pem.Decode(buf)
if block == nil {
return nil, errors.New("failed to decode sslkey")
}
var pemKey []byte
var decryptedKey []byte
var decryptedError error
// If PEM is encrypted, attempt to decrypt using pass phrase
if x509.IsEncryptedPEMBlock(block) {
// Attempt decryption with pass phrase
// NOTE: only supports RSA (PKCS#1)
if sslpassword != "" {
decryptedKey, decryptedError = x509.DecryptPEMBlock(block, []byte(sslpassword))
}
// if sslpassword not provided or has decryption error when use it
// try to find sslpassword with callback function
if sslpassword == "" || decryptedError != nil {
if parseConfigOptions.GetSSLPassword != nil {
sslpassword = parseConfigOptions.GetSSLPassword(context.Background())
}
if sslpassword == "" {
return nil, fmt.Errorf("unable to find sslpassword")
}
}
decryptedKey, decryptedError = x509.DecryptPEMBlock(block, []byte(sslpassword))
// Should we also provide warning for PKCS#1 needed?
if decryptedError != nil {
return nil, fmt.Errorf("unable to decrypt key: %w", err)
}
pemBytes := pem.Block{
Type: "RSA PRIVATE KEY",
Bytes: decryptedKey,
}
pemKey = pem.EncodeToMemory(&pemBytes)
} else {
pemKey = pem.EncodeToMemory(block)
}
certfile, err := os.ReadFile(sslcert)
if err != nil {
return nil, fmt.Errorf("unable to read cert: %w", err)
}
cert, err := tls.X509KeyPair(certfile, pemKey)
if err != nil {
return nil, fmt.Errorf("unable to load cert: %w", err)
}
tlsConfig.Certificates = []tls.Certificate{cert}
}
// Set Server Name Indication (SNI), if enabled by connection parameters.
// Per RFC 6066, do not set it if the host is a literal IP address (IPv4
// or IPv6).
if sslsni == "1" && net.ParseIP(host) == nil {
tlsConfig.ServerName = host
}
switch sslmode {
case "allow":
return []*tls.Config{nil, tlsConfig}, nil
case "prefer":
return []*tls.Config{tlsConfig, nil}, nil
case "require", "verify-ca", "verify-full":
return []*tls.Config{tlsConfig}, nil
default:
panic("BUG: bad sslmode should already have been caught")
}
}
func parsePort(s string) (uint16, error) {
port, err := strconv.ParseUint(s, 10, 16)
if err != nil {
return 0, err
}
if port < 1 || port > math.MaxUint16 {
return 0, errors.New("outside range")
}
return uint16(port), nil
}
func makeDefaultDialer() *net.Dialer {
// rely on GOLANG KeepAlive settings
return &net.Dialer{}
}
func makeDefaultResolver() *net.Resolver {
return net.DefaultResolver
}
func parseConnectTimeoutSetting(s string) (time.Duration, error) {
timeout, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return 0, err
}
if timeout < 0 {
return 0, errors.New("negative timeout")
}
return time.Duration(timeout) * time.Second, nil
}
func makeConnectTimeoutDialFunc(timeout time.Duration) DialFunc {
d := makeDefaultDialer()
d.Timeout = timeout
return d.DialContext
}
// ValidateConnectTargetSessionAttrsReadWrite is a ValidateConnectFunc that implements libpq compatible
// target_session_attrs=read-write.
func ValidateConnectTargetSessionAttrsReadWrite(ctx context.Context, pgConn *PgConn) error {
result, err := pgConn.Exec(ctx, "show transaction_read_only").ReadAll()
if err != nil {
return err
}
if string(result[0].Rows[0][0]) == "on" {
return errors.New("read only connection")
}
return nil
}
// ValidateConnectTargetSessionAttrsReadOnly is a ValidateConnectFunc that implements libpq compatible
// target_session_attrs=read-only.
func ValidateConnectTargetSessionAttrsReadOnly(ctx context.Context, pgConn *PgConn) error {
result, err := pgConn.Exec(ctx, "show transaction_read_only").ReadAll()
if err != nil {
return err
}
if string(result[0].Rows[0][0]) != "on" {
return errors.New("connection is not read only")
}
return nil
}
// ValidateConnectTargetSessionAttrsStandby is a ValidateConnectFunc that implements libpq compatible
// target_session_attrs=standby.
func ValidateConnectTargetSessionAttrsStandby(ctx context.Context, pgConn *PgConn) error {
result, err := pgConn.Exec(ctx, "select pg_is_in_recovery()").ReadAll()
if err != nil {
return err
}
if string(result[0].Rows[0][0]) != "t" {
return errors.New("server is not in hot standby mode")
}
return nil
}
// ValidateConnectTargetSessionAttrsPrimary is a ValidateConnectFunc that implements libpq compatible
// target_session_attrs=primary.
func ValidateConnectTargetSessionAttrsPrimary(ctx context.Context, pgConn *PgConn) error {
result, err := pgConn.Exec(ctx, "select pg_is_in_recovery()").ReadAll()
if err != nil {
return err
}
if string(result[0].Rows[0][0]) == "t" {
return errors.New("server is in standby mode")
}
return nil
}
// ValidateConnectTargetSessionAttrsPreferStandby is a ValidateConnectFunc that implements libpq compatible
// target_session_attrs=prefer-standby.
func ValidateConnectTargetSessionAttrsPreferStandby(ctx context.Context, pgConn *PgConn) error {
result, err := pgConn.Exec(ctx, "select pg_is_in_recovery()").ReadAll()
if err != nil {
return err
}
if string(result[0].Rows[0][0]) != "t" {
return &NotPreferredError{err: errors.New("server is not in hot standby mode")}
}
return nil
}

1141
pgconn/config_test.go Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,80 @@
package ctxwatch
import (
"context"
"sync"
)
// ContextWatcher watches a context and performs an action when the context is canceled. It can watch one context at a
// time.
type ContextWatcher struct {
handler Handler
unwatchChan chan struct{}
lock sync.Mutex
watchInProgress bool
onCancelWasCalled bool
}
// NewContextWatcher returns a ContextWatcher. onCancel will be called when a watched context is canceled.
// OnUnwatchAfterCancel will be called when Unwatch is called and the watched context had already been canceled and
// onCancel called.
func NewContextWatcher(handler Handler) *ContextWatcher {
cw := &ContextWatcher{
handler: handler,
unwatchChan: make(chan struct{}),
}
return cw
}
// Watch starts watching ctx. If ctx is canceled then the onCancel function passed to NewContextWatcher will be called.
func (cw *ContextWatcher) Watch(ctx context.Context) {
cw.lock.Lock()
defer cw.lock.Unlock()
if cw.watchInProgress {
panic("Watch already in progress")
}
cw.onCancelWasCalled = false
if ctx.Done() != nil {
cw.watchInProgress = true
go func() {
select {
case <-ctx.Done():
cw.handler.HandleCancel(ctx)
cw.onCancelWasCalled = true
<-cw.unwatchChan
case <-cw.unwatchChan:
}
}()
} else {
cw.watchInProgress = false
}
}
// Unwatch stops watching the previously watched context. If the onCancel function passed to NewContextWatcher was
// called then onUnwatchAfterCancel will also be called.
func (cw *ContextWatcher) Unwatch() {
cw.lock.Lock()
defer cw.lock.Unlock()
if cw.watchInProgress {
cw.unwatchChan <- struct{}{}
if cw.onCancelWasCalled {
cw.handler.HandleUnwatchAfterCancel()
}
cw.watchInProgress = false
}
}
type Handler interface {
// HandleCancel is called when the context that a ContextWatcher is currently watching is canceled. canceledCtx is the
// context that was canceled.
HandleCancel(canceledCtx context.Context)
// HandleUnwatchAfterCancel is called when a ContextWatcher that called HandleCancel on this Handler is unwatched.
HandleUnwatchAfterCancel()
}

View File

@ -0,0 +1,185 @@
package ctxwatch_test
import (
"context"
"sync/atomic"
"testing"
"time"
"github.com/jackc/pgx/v5/pgconn/ctxwatch"
"github.com/stretchr/testify/require"
)
type testHandler struct {
handleCancel func(context.Context)
handleUnwatchAfterCancel func()
}
func (h *testHandler) HandleCancel(ctx context.Context) {
h.handleCancel(ctx)
}
func (h *testHandler) HandleUnwatchAfterCancel() {
h.handleUnwatchAfterCancel()
}
func TestContextWatcherContextCancelled(t *testing.T) {
canceledChan := make(chan struct{})
cleanupCalled := false
cw := ctxwatch.NewContextWatcher(&testHandler{
handleCancel: func(context.Context) {
canceledChan <- struct{}{}
}, handleUnwatchAfterCancel: func() {
cleanupCalled = true
},
})
ctx, cancel := context.WithCancel(context.Background())
cw.Watch(ctx)
cancel()
select {
case <-canceledChan:
case <-time.NewTimer(time.Second).C:
t.Fatal("Timed out waiting for cancel func to be called")
}
cw.Unwatch()
require.True(t, cleanupCalled, "Cleanup func was not called")
}
func TestContextWatcherUnwatchedBeforeContextCancelled(t *testing.T) {
cw := ctxwatch.NewContextWatcher(&testHandler{
handleCancel: func(context.Context) {
t.Error("cancel func should not have been called")
}, handleUnwatchAfterCancel: func() {
t.Error("cleanup func should not have been called")
},
})
ctx, cancel := context.WithCancel(context.Background())
cw.Watch(ctx)
cw.Unwatch()
cancel()
}
func TestContextWatcherMultipleWatchPanics(t *testing.T) {
cw := ctxwatch.NewContextWatcher(&testHandler{handleCancel: func(context.Context) {}, handleUnwatchAfterCancel: func() {}})
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
cw.Watch(ctx)
defer cw.Unwatch()
ctx2, cancel2 := context.WithCancel(context.Background())
defer cancel2()
require.Panics(t, func() { cw.Watch(ctx2) }, "Expected panic when Watch called multiple times")
}
func TestContextWatcherUnwatchWhenNotWatchingIsSafe(t *testing.T) {
cw := ctxwatch.NewContextWatcher(&testHandler{handleCancel: func(context.Context) {}, handleUnwatchAfterCancel: func() {}})
cw.Unwatch() // unwatch when not / never watching
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
cw.Watch(ctx)
cw.Unwatch()
cw.Unwatch() // double unwatch
}
func TestContextWatcherUnwatchIsConcurrencySafe(t *testing.T) {
cw := ctxwatch.NewContextWatcher(&testHandler{handleCancel: func(context.Context) {}, handleUnwatchAfterCancel: func() {}})
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
defer cancel()
cw.Watch(ctx)
go cw.Unwatch()
go cw.Unwatch()
<-ctx.Done()
}
func TestContextWatcherStress(t *testing.T) {
var cancelFuncCalls int64
var cleanupFuncCalls int64
cw := ctxwatch.NewContextWatcher(&testHandler{
handleCancel: func(context.Context) {
atomic.AddInt64(&cancelFuncCalls, 1)
}, handleUnwatchAfterCancel: func() {
atomic.AddInt64(&cleanupFuncCalls, 1)
},
})
cycleCount := 100000
for i := 0; i < cycleCount; i++ {
ctx, cancel := context.WithCancel(context.Background())
cw.Watch(ctx)
if i%2 == 0 {
cancel()
}
// Without time.Sleep, cw.Unwatch will almost always run before the cancel func which means cancel will never happen. This gives us a better mix.
if i%333 == 0 {
// on Windows Sleep takes more time than expected so we try to get here less frequently to avoid
// the CI takes a long time
time.Sleep(time.Nanosecond)
}
cw.Unwatch()
if i%2 == 1 {
cancel()
}
}
actualCancelFuncCalls := atomic.LoadInt64(&cancelFuncCalls)
actualCleanupFuncCalls := atomic.LoadInt64(&cleanupFuncCalls)
if actualCancelFuncCalls == 0 {
t.Fatal("actualCancelFuncCalls == 0")
}
maxCancelFuncCalls := int64(cycleCount) / 2
if actualCancelFuncCalls > maxCancelFuncCalls {
t.Errorf("cancel func calls should be no more than %d but was %d", actualCancelFuncCalls, maxCancelFuncCalls)
}
if actualCancelFuncCalls != actualCleanupFuncCalls {
t.Errorf("cancel func calls (%d) should be equal to cleanup func calls (%d) but was not", actualCancelFuncCalls, actualCleanupFuncCalls)
}
}
func BenchmarkContextWatcherUncancellable(b *testing.B) {
cw := ctxwatch.NewContextWatcher(&testHandler{handleCancel: func(context.Context) {}, handleUnwatchAfterCancel: func() {}})
for i := 0; i < b.N; i++ {
cw.Watch(context.Background())
cw.Unwatch()
}
}
func BenchmarkContextWatcherCancelled(b *testing.B) {
cw := ctxwatch.NewContextWatcher(&testHandler{handleCancel: func(context.Context) {}, handleUnwatchAfterCancel: func() {}})
for i := 0; i < b.N; i++ {
ctx, cancel := context.WithCancel(context.Background())
cw.Watch(ctx)
cancel()
cw.Unwatch()
}
}
func BenchmarkContextWatcherCancellable(b *testing.B) {
cw := ctxwatch.NewContextWatcher(&testHandler{handleCancel: func(context.Context) {}, handleUnwatchAfterCancel: func() {}})
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
for i := 0; i < b.N; i++ {
cw.Watch(ctx)
cw.Unwatch()
}
}

63
pgconn/defaults.go Normal file
View File

@ -0,0 +1,63 @@
//go:build !windows
// +build !windows
package pgconn
import (
"os"
"os/user"
"path/filepath"
)
func defaultSettings() map[string]string {
settings := make(map[string]string)
settings["host"] = defaultHost()
settings["port"] = "5432"
// Default to the OS user name. Purposely ignoring err getting user name from
// OS. The client application will simply have to specify the user in that
// case (which they typically will be doing anyway).
user, err := user.Current()
if err == nil {
settings["user"] = user.Username
settings["passfile"] = filepath.Join(user.HomeDir, ".pgpass")
settings["servicefile"] = filepath.Join(user.HomeDir, ".pg_service.conf")
sslcert := filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt")
sslkey := filepath.Join(user.HomeDir, ".postgresql", "postgresql.key")
if _, err := os.Stat(sslcert); err == nil {
if _, err := os.Stat(sslkey); err == nil {
// Both the cert and key must be present to use them, or do not use either
settings["sslcert"] = sslcert
settings["sslkey"] = sslkey
}
}
sslrootcert := filepath.Join(user.HomeDir, ".postgresql", "root.crt")
if _, err := os.Stat(sslrootcert); err == nil {
settings["sslrootcert"] = sslrootcert
}
}
settings["target_session_attrs"] = "any"
return settings
}
// defaultHost attempts to mimic libpq's default host. libpq uses the default unix socket location on *nix and localhost
// on Windows. The default socket location is compiled into libpq. Since pgx does not have access to that default it
// checks the existence of common locations.
func defaultHost() string {
candidatePaths := []string{
"/var/run/postgresql", // Debian
"/private/tmp", // OSX - homebrew
"/tmp", // standard PostgreSQL
}
for _, path := range candidatePaths {
if _, err := os.Stat(path); err == nil {
return path
}
}
return "localhost"
}

View File

@ -0,0 +1,57 @@
package pgconn
import (
"os"
"os/user"
"path/filepath"
"strings"
)
func defaultSettings() map[string]string {
settings := make(map[string]string)
settings["host"] = defaultHost()
settings["port"] = "5432"
// Default to the OS user name. Purposely ignoring err getting user name from
// OS. The client application will simply have to specify the user in that
// case (which they typically will be doing anyway).
user, err := user.Current()
appData := os.Getenv("APPDATA")
if err == nil {
// Windows gives us the username here as `DOMAIN\user` or `LOCALPCNAME\user`,
// but the libpq default is just the `user` portion, so we strip off the first part.
username := user.Username
if strings.Contains(username, "\\") {
username = username[strings.LastIndex(username, "\\")+1:]
}
settings["user"] = username
settings["passfile"] = filepath.Join(appData, "postgresql", "pgpass.conf")
settings["servicefile"] = filepath.Join(user.HomeDir, ".pg_service.conf")
sslcert := filepath.Join(appData, "postgresql", "postgresql.crt")
sslkey := filepath.Join(appData, "postgresql", "postgresql.key")
if _, err := os.Stat(sslcert); err == nil {
if _, err := os.Stat(sslkey); err == nil {
// Both the cert and key must be present to use them, or do not use either
settings["sslcert"] = sslcert
settings["sslkey"] = sslkey
}
}
sslrootcert := filepath.Join(appData, "postgresql", "root.crt")
if _, err := os.Stat(sslrootcert); err == nil {
settings["sslrootcert"] = sslrootcert
}
}
settings["target_session_attrs"] = "any"
return settings
}
// defaultHost attempts to mimic libpq's default host. libpq uses the default unix socket location on *nix and localhost
// on Windows. The default socket location is compiled into libpq. Since pgx does not have access to that default it
// checks the existence of common locations.
func defaultHost() string {
return "localhost"
}

38
pgconn/doc.go Normal file
View File

@ -0,0 +1,38 @@
// Package pgconn is a low-level PostgreSQL database driver.
/*
pgconn provides lower level access to a PostgreSQL connection than a database/sql or pgx connection. It operates at
nearly the same level is the C library libpq.
Establishing a Connection
Use Connect to establish a connection. It accepts a connection string in URL or keyword/value format and will read the
environment for libpq style environment variables.
Executing a Query
ExecParams and ExecPrepared execute a single query. They return readers that iterate over each row. The Read method
reads all rows into memory.
Executing Multiple Queries in a Single Round Trip
Exec and ExecBatch can execute multiple queries in a single round trip. They return readers that iterate over each query
result. The ReadAll method reads all query results into memory.
Pipeline Mode
Pipeline mode allows sending queries without having read the results of previously sent queries. It allows control of
exactly how many and when network round trips occur.
Context Support
All potentially blocking operations take a context.Context. The default behavior when a context is canceled is for the
method to immediately return. In most circumstances, this will also close the underlying connection. This behavior can
be customized by using BuildContextWatcherHandler on the Config to create a ctxwatch.Handler with different behavior.
This can be especially useful when queries that are frequently canceled and the overhead of creating new connections is
a problem. DeadlineContextWatcherHandler and CancelRequestContextWatcherHandler can be used to introduce a delay before
interrupting the query in such a way as to close the connection.
The CancelRequest method may be used to request the PostgreSQL server cancel an in-progress query without forcing the
client to abort.
*/
package pgconn

256
pgconn/errors.go Normal file
View File

@ -0,0 +1,256 @@
package pgconn
import (
"context"
"errors"
"fmt"
"net"
"net/url"
"regexp"
"strings"
)
// SafeToRetry checks if the err is guaranteed to have occurred before sending any data to the server.
func SafeToRetry(err error) bool {
var retryableErr interface{ SafeToRetry() bool }
if errors.As(err, &retryableErr) {
return retryableErr.SafeToRetry()
}
return false
}
// Timeout checks if err was caused by a timeout. To be specific, it is true if err was caused within pgconn by a
// context.DeadlineExceeded or an implementer of net.Error where Timeout() is true.
func Timeout(err error) bool {
var timeoutErr *errTimeout
return errors.As(err, &timeoutErr)
}
// PgError represents an error reported by the PostgreSQL server. See
// http://www.postgresql.org/docs/current/static/protocol-error-fields.html for
// detailed field description.
type PgError struct {
Severity string
SeverityUnlocalized string
Code string
Message string
Detail string
Hint string
Position int32
InternalPosition int32
InternalQuery string
Where string
SchemaName string
TableName string
ColumnName string
DataTypeName string
ConstraintName string
File string
Line int32
Routine string
}
func (pe *PgError) Error() string {
return pe.Severity + ": " + pe.Message + " (SQLSTATE " + pe.Code + ")"
}
// SQLState returns the SQLState of the error.
func (pe *PgError) SQLState() string {
return pe.Code
}
// ConnectError is the error returned when a connection attempt fails.
type ConnectError struct {
Config *Config // The configuration that was used in the connection attempt.
err error
}
func (e *ConnectError) Error() string {
prefix := fmt.Sprintf("failed to connect to `user=%s database=%s`:", e.Config.User, e.Config.Database)
details := e.err.Error()
if strings.Contains(details, "\n") {
return prefix + "\n\t" + strings.ReplaceAll(details, "\n", "\n\t")
} else {
return prefix + " " + details
}
}
func (e *ConnectError) Unwrap() error {
return e.err
}
type perDialConnectError struct {
address string
originalHostname string
err error
}
func (e *perDialConnectError) Error() string {
return fmt.Sprintf("%s (%s): %s", e.address, e.originalHostname, e.err.Error())
}
func (e *perDialConnectError) Unwrap() error {
return e.err
}
type connLockError struct {
status string
}
func (e *connLockError) SafeToRetry() bool {
return true // a lock failure by definition happens before the connection is used.
}
func (e *connLockError) Error() string {
return e.status
}
// ParseConfigError is the error returned when a connection string cannot be parsed.
type ParseConfigError struct {
ConnString string // The connection string that could not be parsed.
msg string
err error
}
func NewParseConfigError(conn, msg string, err error) error {
return &ParseConfigError{
ConnString: conn,
msg: msg,
err: err,
}
}
func (e *ParseConfigError) Error() string {
// Now that ParseConfigError is public and ConnString is available to the developer, perhaps it would be better only
// return a static string. That would ensure that the error message cannot leak a password. The ConnString field would
// allow access to the original string if desired and Unwrap would allow access to the underlying error.
connString := redactPW(e.ConnString)
if e.err == nil {
return fmt.Sprintf("cannot parse `%s`: %s", connString, e.msg)
}
return fmt.Sprintf("cannot parse `%s`: %s (%s)", connString, e.msg, e.err.Error())
}
func (e *ParseConfigError) Unwrap() error {
return e.err
}
func normalizeTimeoutError(ctx context.Context, err error) error {
var netErr net.Error
if errors.As(err, &netErr) && netErr.Timeout() {
if ctx.Err() == context.Canceled {
// Since the timeout was caused by a context cancellation, the actual error is context.Canceled not the timeout error.
return context.Canceled
} else if ctx.Err() == context.DeadlineExceeded {
return &errTimeout{err: ctx.Err()}
} else {
return &errTimeout{err: netErr}
}
}
return err
}
type pgconnError struct {
msg string
err error
safeToRetry bool
}
func (e *pgconnError) Error() string {
if e.msg == "" {
return e.err.Error()
}
if e.err == nil {
return e.msg
}
return fmt.Sprintf("%s: %s", e.msg, e.err.Error())
}
func (e *pgconnError) SafeToRetry() bool {
return e.safeToRetry
}
func (e *pgconnError) Unwrap() error {
return e.err
}
// errTimeout occurs when an error was caused by a timeout. Specifically, it wraps an error which is
// context.Canceled, context.DeadlineExceeded, or an implementer of net.Error where Timeout() is true.
type errTimeout struct {
err error
}
func (e *errTimeout) Error() string {
return fmt.Sprintf("timeout: %s", e.err.Error())
}
func (e *errTimeout) SafeToRetry() bool {
return SafeToRetry(e.err)
}
func (e *errTimeout) Unwrap() error {
return e.err
}
type contextAlreadyDoneError struct {
err error
}
func (e *contextAlreadyDoneError) Error() string {
return fmt.Sprintf("context already done: %s", e.err.Error())
}
func (e *contextAlreadyDoneError) SafeToRetry() bool {
return true
}
func (e *contextAlreadyDoneError) Unwrap() error {
return e.err
}
// newContextAlreadyDoneError double-wraps a context error in `contextAlreadyDoneError` and `errTimeout`.
func newContextAlreadyDoneError(ctx context.Context) (err error) {
return &errTimeout{&contextAlreadyDoneError{err: ctx.Err()}}
}
func redactPW(connString string) string {
if strings.HasPrefix(connString, "postgres://") || strings.HasPrefix(connString, "postgresql://") {
if u, err := url.Parse(connString); err == nil {
return redactURL(u)
}
}
quotedKV := regexp.MustCompile(`password='[^']*'`)
connString = quotedKV.ReplaceAllLiteralString(connString, "password=xxxxx")
plainKV := regexp.MustCompile(`password=[^ ]*`)
connString = plainKV.ReplaceAllLiteralString(connString, "password=xxxxx")
brokenURL := regexp.MustCompile(`:[^:@]+?@`)
connString = brokenURL.ReplaceAllLiteralString(connString, ":xxxxxx@")
return connString
}
func redactURL(u *url.URL) string {
if u == nil {
return ""
}
if _, pwSet := u.User.Password(); pwSet {
u.User = url.UserPassword(u.User.Username(), "xxxxx")
}
return u.String()
}
type NotPreferredError struct {
err error
safeToRetry bool
}
func (e *NotPreferredError) Error() string {
return fmt.Sprintf("standby server not found: %s", e.err.Error())
}
func (e *NotPreferredError) SafeToRetry() bool {
return e.safeToRetry
}
func (e *NotPreferredError) Unwrap() error {
return e.err
}

54
pgconn/errors_test.go Normal file
View File

@ -0,0 +1,54 @@
package pgconn_test
import (
"testing"
"github.com/jackc/pgx/v5/pgconn"
"github.com/stretchr/testify/assert"
)
func TestConfigError(t *testing.T) {
tests := []struct {
name string
err error
expectedMsg string
}{
{
name: "url with password",
err: pgconn.NewParseConfigError("postgresql://foo:password@host", "msg", nil),
expectedMsg: "cannot parse `postgresql://foo:xxxxx@host`: msg",
},
{
name: "keyword/value with password unquoted",
err: pgconn.NewParseConfigError("host=host password=password user=user", "msg", nil),
expectedMsg: "cannot parse `host=host password=xxxxx user=user`: msg",
},
{
name: "keyword/value with password quoted",
err: pgconn.NewParseConfigError("host=host password='pass word' user=user", "msg", nil),
expectedMsg: "cannot parse `host=host password=xxxxx user=user`: msg",
},
{
name: "weird url",
err: pgconn.NewParseConfigError("postgresql://foo::password@host:1:", "msg", nil),
expectedMsg: "cannot parse `postgresql://foo:xxxxx@host:1:`: msg",
},
{
name: "weird url with slash in password",
err: pgconn.NewParseConfigError("postgres://user:pass/word@host:5432/db_name", "msg", nil),
expectedMsg: "cannot parse `postgres://user:xxxxxx@host:5432/db_name`: msg",
},
{
name: "url without password",
err: pgconn.NewParseConfigError("postgresql://other@host/db", "msg", nil),
expectedMsg: "cannot parse `postgresql://other@host/db`: msg",
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
assert.EqualError(t, tt.err, tt.expectedMsg)
})
}
}

3
pgconn/export_test.go Normal file
View File

@ -0,0 +1,3 @@
// File export_test exports some methods for better testing.
package pgconn

36
pgconn/helper_test.go Normal file
View File

@ -0,0 +1,36 @@
package pgconn_test
import (
"context"
"testing"
"time"
"github.com/jackc/pgx/v5/pgconn"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func closeConn(t testing.TB, conn *pgconn.PgConn) {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
require.NoError(t, conn.Close(ctx))
select {
case <-conn.CleanupDone():
case <-time.After(30 * time.Second):
t.Fatal("Connection cleanup exceeded maximum time")
}
}
// Do a simple query to ensure the connection is still usable
func ensureConnValid(t *testing.T, pgConn *pgconn.PgConn) {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
result := pgConn.ExecParams(ctx, "select generate_series(1,$1)", [][]byte{[]byte("3")}, nil, nil, nil).Read()
cancel()
require.Nil(t, result.Err)
assert.Equal(t, 3, len(result.Rows))
assert.Equal(t, "1", string(result.Rows[0][0]))
assert.Equal(t, "2", string(result.Rows[1][0]))
assert.Equal(t, "3", string(result.Rows[2][0]))
}

View File

@ -0,0 +1,139 @@
// Package bgreader provides a io.Reader that can optionally buffer reads in the background.
package bgreader
import (
"io"
"sync"
"github.com/jackc/pgx/v5/internal/iobufpool"
)
const (
StatusStopped = iota
StatusRunning
StatusStopping
)
// BGReader is an io.Reader that can optionally buffer reads in the background. It is safe for concurrent use.
type BGReader struct {
r io.Reader
cond *sync.Cond
status int32
readResults []readResult
}
type readResult struct {
buf *[]byte
err error
}
// Start starts the backgrounder reader. If the background reader is already running this is a no-op. The background
// reader will stop automatically when the underlying reader returns an error.
func (r *BGReader) Start() {
r.cond.L.Lock()
defer r.cond.L.Unlock()
switch r.status {
case StatusStopped:
r.status = StatusRunning
go r.bgRead()
case StatusRunning:
// no-op
case StatusStopping:
r.status = StatusRunning
}
}
// Stop tells the background reader to stop after the in progress Read returns. It is safe to call Stop when the
// background reader is not running.
func (r *BGReader) Stop() {
r.cond.L.Lock()
defer r.cond.L.Unlock()
switch r.status {
case StatusStopped:
// no-op
case StatusRunning:
r.status = StatusStopping
case StatusStopping:
// no-op
}
}
// Status returns the current status of the background reader.
func (r *BGReader) Status() int32 {
r.cond.L.Lock()
defer r.cond.L.Unlock()
return r.status
}
func (r *BGReader) bgRead() {
keepReading := true
for keepReading {
buf := iobufpool.Get(8192)
n, err := r.r.Read(*buf)
*buf = (*buf)[:n]
r.cond.L.Lock()
r.readResults = append(r.readResults, readResult{buf: buf, err: err})
if r.status == StatusStopping || err != nil {
r.status = StatusStopped
keepReading = false
}
r.cond.L.Unlock()
r.cond.Broadcast()
}
}
// Read implements the io.Reader interface.
func (r *BGReader) Read(p []byte) (int, error) {
r.cond.L.Lock()
defer r.cond.L.Unlock()
if len(r.readResults) > 0 {
return r.readFromReadResults(p)
}
// There are no unread background read results and the background reader is stopped.
if r.status == StatusStopped {
return r.r.Read(p)
}
// Wait for results from the background reader
for len(r.readResults) == 0 {
r.cond.Wait()
}
return r.readFromReadResults(p)
}
// readBackgroundResults reads a result previously read by the background reader. r.cond.L must be held.
func (r *BGReader) readFromReadResults(p []byte) (int, error) {
buf := r.readResults[0].buf
var err error
n := copy(p, *buf)
if n == len(*buf) {
err = r.readResults[0].err
iobufpool.Put(buf)
if len(r.readResults) == 1 {
r.readResults = nil
} else {
r.readResults = r.readResults[1:]
}
} else {
*buf = (*buf)[n:]
r.readResults[0].buf = buf
}
return n, err
}
func New(r io.Reader) *BGReader {
return &BGReader{
r: r,
cond: &sync.Cond{
L: &sync.Mutex{},
},
}
}

View File

@ -0,0 +1,140 @@
package bgreader_test
import (
"bytes"
"errors"
"io"
"math/rand"
"testing"
"time"
"github.com/jackc/pgx/v5/pgconn/internal/bgreader"
"github.com/stretchr/testify/require"
)
func TestBGReaderReadWhenStopped(t *testing.T) {
r := bytes.NewReader([]byte("foo bar baz"))
bgr := bgreader.New(r)
buf, err := io.ReadAll(bgr)
require.NoError(t, err)
require.Equal(t, []byte("foo bar baz"), buf)
}
func TestBGReaderReadWhenStarted(t *testing.T) {
r := bytes.NewReader([]byte("foo bar baz"))
bgr := bgreader.New(r)
bgr.Start()
buf, err := io.ReadAll(bgr)
require.NoError(t, err)
require.Equal(t, []byte("foo bar baz"), buf)
}
type mockReadFunc func(p []byte) (int, error)
type mockReader struct {
readFuncs []mockReadFunc
}
func (r *mockReader) Read(p []byte) (int, error) {
if len(r.readFuncs) == 0 {
return 0, io.EOF
}
fn := r.readFuncs[0]
r.readFuncs = r.readFuncs[1:]
return fn(p)
}
func TestBGReaderReadWaitsForBackgroundRead(t *testing.T) {
rr := &mockReader{
readFuncs: []mockReadFunc{
func(p []byte) (int, error) { time.Sleep(1 * time.Second); return copy(p, []byte("foo")), nil },
func(p []byte) (int, error) { return copy(p, []byte("bar")), nil },
func(p []byte) (int, error) { return copy(p, []byte("baz")), nil },
},
}
bgr := bgreader.New(rr)
bgr.Start()
buf := make([]byte, 3)
n, err := bgr.Read(buf)
require.NoError(t, err)
require.EqualValues(t, 3, n)
require.Equal(t, []byte("foo"), buf)
}
func TestBGReaderErrorWhenStarted(t *testing.T) {
rr := &mockReader{
readFuncs: []mockReadFunc{
func(p []byte) (int, error) { return copy(p, []byte("foo")), nil },
func(p []byte) (int, error) { return copy(p, []byte("bar")), nil },
func(p []byte) (int, error) { return copy(p, []byte("baz")), errors.New("oops") },
},
}
bgr := bgreader.New(rr)
bgr.Start()
buf, err := io.ReadAll(bgr)
require.Equal(t, []byte("foobarbaz"), buf)
require.EqualError(t, err, "oops")
}
func TestBGReaderErrorWhenStopped(t *testing.T) {
rr := &mockReader{
readFuncs: []mockReadFunc{
func(p []byte) (int, error) { return copy(p, []byte("foo")), nil },
func(p []byte) (int, error) { return copy(p, []byte("bar")), nil },
func(p []byte) (int, error) { return copy(p, []byte("baz")), errors.New("oops") },
},
}
bgr := bgreader.New(rr)
buf, err := io.ReadAll(bgr)
require.Equal(t, []byte("foobarbaz"), buf)
require.EqualError(t, err, "oops")
}
type numberReader struct {
v uint8
rng *rand.Rand
}
func (nr *numberReader) Read(p []byte) (int, error) {
n := nr.rng.Intn(len(p))
for i := 0; i < n; i++ {
p[i] = nr.v
nr.v++
}
return n, nil
}
// TestBGReaderStress stress tests BGReader by reading a lot of bytes in random sizes while randomly starting and
// stopping the background worker from other goroutines.
func TestBGReaderStress(t *testing.T) {
nr := &numberReader{rng: rand.New(rand.NewSource(0))}
bgr := bgreader.New(nr)
bytesRead := 0
var expected uint8
buf := make([]byte, 10_000)
rng := rand.New(rand.NewSource(0))
for bytesRead < 1_000_000 {
randomNumber := rng.Intn(100)
switch {
case randomNumber < 10:
go bgr.Start()
case randomNumber < 20:
go bgr.Stop()
default:
n, err := bgr.Read(buf)
require.NoError(t, err)
for i := 0; i < n; i++ {
require.Equal(t, expected, buf[i])
expected++
}
bytesRead += n
}
}
}

100
pgconn/krb5.go Normal file
View File

@ -0,0 +1,100 @@
package pgconn
import (
"errors"
"fmt"
"github.com/jackc/pgx/v5/pgproto3"
)
// NewGSSFunc creates a GSS authentication provider, for use with
// RegisterGSSProvider.
type NewGSSFunc func() (GSS, error)
var newGSS NewGSSFunc
// RegisterGSSProvider registers a GSS authentication provider. For example, if
// you need to use Kerberos to authenticate with your server, add this to your
// main package:
//
// import "github.com/otan/gopgkrb5"
//
// func init() {
// pgconn.RegisterGSSProvider(func() (pgconn.GSS, error) { return gopgkrb5.NewGSS() })
// }
func RegisterGSSProvider(newGSSArg NewGSSFunc) {
newGSS = newGSSArg
}
// GSS provides GSSAPI authentication (e.g., Kerberos).
type GSS interface {
GetInitToken(host, service string) ([]byte, error)
GetInitTokenFromSPN(spn string) ([]byte, error)
Continue(inToken []byte) (done bool, outToken []byte, err error)
}
func (c *PgConn) gssAuth() error {
if newGSS == nil {
return errors.New("kerberos error: no GSSAPI provider registered, see https://github.com/otan/gopgkrb5")
}
cli, err := newGSS()
if err != nil {
return err
}
var nextData []byte
if c.config.KerberosSpn != "" {
// Use the supplied SPN if provided.
nextData, err = cli.GetInitTokenFromSPN(c.config.KerberosSpn)
} else {
// Allow the kerberos service name to be overridden
service := "postgres"
if c.config.KerberosSrvName != "" {
service = c.config.KerberosSrvName
}
nextData, err = cli.GetInitToken(c.config.Host, service)
}
if err != nil {
return err
}
for {
gssResponse := &pgproto3.GSSResponse{
Data: nextData,
}
c.frontend.Send(gssResponse)
err = c.flushWithPotentialWriteReadDeadlock()
if err != nil {
return err
}
resp, err := c.rxGSSContinue()
if err != nil {
return err
}
var done bool
done, nextData, err = cli.Continue(resp.Data)
if err != nil {
return err
}
if done {
break
}
}
return nil
}
func (c *PgConn) rxGSSContinue() (*pgproto3.AuthenticationGSSContinue, error) {
msg, err := c.receiveMessage()
if err != nil {
return nil, err
}
switch m := msg.(type) {
case *pgproto3.AuthenticationGSSContinue:
return m, nil
case *pgproto3.ErrorResponse:
return nil, ErrorResponseToPgError(m)
}
return nil, fmt.Errorf("expected AuthenticationGSSContinue message but received unexpected message %T", msg)
}

2504
pgconn/pgconn.go Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,41 @@
package pgconn
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestCommandTag(t *testing.T) {
t.Parallel()
tests := []struct {
commandTag CommandTag
rowsAffected int64
isInsert bool
isUpdate bool
isDelete bool
isSelect bool
}{
{commandTag: CommandTag{s: "INSERT 0 5"}, rowsAffected: 5, isInsert: true},
{commandTag: CommandTag{s: "UPDATE 0"}, rowsAffected: 0, isUpdate: true},
{commandTag: CommandTag{s: "UPDATE 1"}, rowsAffected: 1, isUpdate: true},
{commandTag: CommandTag{s: "DELETE 0"}, rowsAffected: 0, isDelete: true},
{commandTag: CommandTag{s: "DELETE 1"}, rowsAffected: 1, isDelete: true},
{commandTag: CommandTag{s: "DELETE 1234567890"}, rowsAffected: 1234567890, isDelete: true},
{commandTag: CommandTag{s: "SELECT 1"}, rowsAffected: 1, isSelect: true},
{commandTag: CommandTag{s: "SELECT 99999999999"}, rowsAffected: 99999999999, isSelect: true},
{commandTag: CommandTag{s: "CREATE TABLE"}, rowsAffected: 0},
{commandTag: CommandTag{s: "ALTER TABLE"}, rowsAffected: 0},
{commandTag: CommandTag{s: "DROP TABLE"}, rowsAffected: 0},
}
for i, tt := range tests {
ct := tt.commandTag
assert.Equalf(t, tt.rowsAffected, ct.RowsAffected(), "%d. %v", i, tt.commandTag)
assert.Equalf(t, tt.isInsert, ct.Insert(), "%d. %v", i, tt.commandTag)
assert.Equalf(t, tt.isUpdate, ct.Update(), "%d. %v", i, tt.commandTag)
assert.Equalf(t, tt.isDelete, ct.Delete(), "%d. %v", i, tt.commandTag)
assert.Equalf(t, tt.isSelect, ct.Select(), "%d. %v", i, tt.commandTag)
}
}

View File

@ -0,0 +1,90 @@
package pgconn_test
import (
"context"
"math/rand"
"os"
"runtime"
"strconv"
"testing"
"github.com/jackc/pgx/v5/pgconn"
"github.com/stretchr/testify/require"
)
func TestConnStress(t *testing.T) {
pgConn, err := pgconn.Connect(context.Background(), os.Getenv("PGX_TEST_DATABASE"))
require.NoError(t, err)
defer closeConn(t, pgConn)
actionCount := 10000
if s := os.Getenv("PGX_TEST_STRESS_FACTOR"); s != "" {
stressFactor, err := strconv.ParseInt(s, 10, 64)
require.Nil(t, err, "Failed to parse PGX_TEST_STRESS_FACTOR")
actionCount *= int(stressFactor)
}
setupStressDB(t, pgConn)
actions := []struct {
name string
fn func(*pgconn.PgConn) error
}{
{"Exec Select", stressExecSelect},
{"ExecParams Select", stressExecParamsSelect},
{"Batch", stressBatch},
}
for i := 0; i < actionCount; i++ {
action := actions[rand.Intn(len(actions))]
err := action.fn(pgConn)
require.Nilf(t, err, "%d: %s", i, action.name)
}
// Each call with a context starts a goroutine. Ensure they are cleaned up when context is not canceled.
numGoroutine := runtime.NumGoroutine()
require.Truef(t, numGoroutine < 1000, "goroutines appear to be orphaned: %d in process", numGoroutine)
}
func setupStressDB(t *testing.T, pgConn *pgconn.PgConn) {
_, err := pgConn.Exec(context.Background(), `
create temporary table widgets(
id serial primary key,
name varchar not null,
description text,
creation_time timestamptz default now()
);
insert into widgets(name, description) values
('Foo', 'bar'),
('baz', 'Something really long Something really long Something really long Something really long Something really long'),
('a', 'b')`).ReadAll()
require.NoError(t, err)
}
func stressExecSelect(pgConn *pgconn.PgConn) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_, err := pgConn.Exec(ctx, "select * from widgets").ReadAll()
return err
}
func stressExecParamsSelect(pgConn *pgconn.PgConn) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
result := pgConn.ExecParams(ctx, "select * from widgets where id < $1", [][]byte{[]byte("10")}, nil, nil, nil).Read()
return result.Err
}
func stressBatch(pgConn *pgconn.PgConn) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
batch := &pgconn.Batch{}
batch.ExecParams("select * from widgets", nil, nil, nil, nil)
batch.ExecParams("select * from widgets where id < $1", [][]byte{[]byte("10")}, nil, nil, nil)
_, err := pgConn.ExecBatch(ctx, batch).ReadAll()
return err
}

4293
pgconn/pgconn_test.go Normal file

File diff suppressed because it is too large Load Diff

7
pgproto3/README.md Normal file
View File

@ -0,0 +1,7 @@
# pgproto3
Package pgproto3 is an encoder and decoder of the PostgreSQL wire protocol version 3.
pgproto3 can be used as a foundation for PostgreSQL drivers, proxies, mock servers, load balancers and more.
See example/pgfortune for a playful example of a fake PostgreSQL server.

View File

@ -0,0 +1,50 @@
package pgproto3
import (
"encoding/binary"
"encoding/json"
"errors"
"github.com/jackc/pgx/v5/internal/pgio"
)
// AuthenticationCleartextPassword is a message sent from the backend indicating that a clear-text password is required.
type AuthenticationCleartextPassword struct{}
// Backend identifies this message as sendable by the PostgreSQL backend.
func (*AuthenticationCleartextPassword) Backend() {}
// Backend identifies this message as an authentication response.
func (*AuthenticationCleartextPassword) AuthenticationResponse() {}
// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
// type identifier and 4 byte message length.
func (dst *AuthenticationCleartextPassword) Decode(src []byte) error {
if len(src) != 4 {
return errors.New("bad authentication message size")
}
authType := binary.BigEndian.Uint32(src)
if authType != AuthTypeCleartextPassword {
return errors.New("bad auth type")
}
return nil
}
// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
func (src *AuthenticationCleartextPassword) Encode(dst []byte) ([]byte, error) {
dst, sp := beginMessage(dst, 'R')
dst = pgio.AppendUint32(dst, AuthTypeCleartextPassword)
return finishMessage(dst, sp)
}
// MarshalJSON implements encoding/json.Marshaler.
func (src AuthenticationCleartextPassword) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Type string
}{
Type: "AuthenticationCleartextPassword",
})
}

View File

@ -0,0 +1,58 @@
package pgproto3
import (
"encoding/binary"
"encoding/json"
"errors"
"github.com/jackc/pgx/v5/internal/pgio"
)
type AuthenticationGSS struct{}
func (a *AuthenticationGSS) Backend() {}
func (a *AuthenticationGSS) AuthenticationResponse() {}
func (a *AuthenticationGSS) Decode(src []byte) error {
if len(src) < 4 {
return errors.New("authentication message too short")
}
authType := binary.BigEndian.Uint32(src)
if authType != AuthTypeGSS {
return errors.New("bad auth type")
}
return nil
}
func (a *AuthenticationGSS) Encode(dst []byte) ([]byte, error) {
dst, sp := beginMessage(dst, 'R')
dst = pgio.AppendUint32(dst, AuthTypeGSS)
return finishMessage(dst, sp)
}
func (a *AuthenticationGSS) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Type string
Data []byte
}{
Type: "AuthenticationGSS",
})
}
func (a *AuthenticationGSS) UnmarshalJSON(data []byte) error {
// Ignore null, like in the main JSON package.
if string(data) == "null" {
return nil
}
var msg struct {
Type string
}
if err := json.Unmarshal(data, &msg); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,67 @@
package pgproto3
import (
"encoding/binary"
"encoding/json"
"errors"
"github.com/jackc/pgx/v5/internal/pgio"
)
type AuthenticationGSSContinue struct {
Data []byte
}
func (a *AuthenticationGSSContinue) Backend() {}
func (a *AuthenticationGSSContinue) AuthenticationResponse() {}
func (a *AuthenticationGSSContinue) Decode(src []byte) error {
if len(src) < 4 {
return errors.New("authentication message too short")
}
authType := binary.BigEndian.Uint32(src)
if authType != AuthTypeGSSCont {
return errors.New("bad auth type")
}
a.Data = src[4:]
return nil
}
func (a *AuthenticationGSSContinue) Encode(dst []byte) ([]byte, error) {
dst, sp := beginMessage(dst, 'R')
dst = pgio.AppendUint32(dst, AuthTypeGSSCont)
dst = append(dst, a.Data...)
return finishMessage(dst, sp)
}
func (a *AuthenticationGSSContinue) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Type string
Data []byte
}{
Type: "AuthenticationGSSContinue",
Data: a.Data,
})
}
func (a *AuthenticationGSSContinue) UnmarshalJSON(data []byte) error {
// Ignore null, like in the main JSON package.
if string(data) == "null" {
return nil
}
var msg struct {
Type string
Data []byte
}
if err := json.Unmarshal(data, &msg); err != nil {
return err
}
a.Data = msg.Data
return nil
}

View File

@ -0,0 +1,76 @@
package pgproto3
import (
"encoding/binary"
"encoding/json"
"errors"
"github.com/jackc/pgx/v5/internal/pgio"
)
// AuthenticationMD5Password is a message sent from the backend indicating that an MD5 hashed password is required.
type AuthenticationMD5Password struct {
Salt [4]byte
}
// Backend identifies this message as sendable by the PostgreSQL backend.
func (*AuthenticationMD5Password) Backend() {}
// Backend identifies this message as an authentication response.
func (*AuthenticationMD5Password) AuthenticationResponse() {}
// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
// type identifier and 4 byte message length.
func (dst *AuthenticationMD5Password) Decode(src []byte) error {
if len(src) != 8 {
return errors.New("bad authentication message size")
}
authType := binary.BigEndian.Uint32(src)
if authType != AuthTypeMD5Password {
return errors.New("bad auth type")
}
copy(dst.Salt[:], src[4:8])
return nil
}
// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
func (src *AuthenticationMD5Password) Encode(dst []byte) ([]byte, error) {
dst, sp := beginMessage(dst, 'R')
dst = pgio.AppendUint32(dst, AuthTypeMD5Password)
dst = append(dst, src.Salt[:]...)
return finishMessage(dst, sp)
}
// MarshalJSON implements encoding/json.Marshaler.
func (src AuthenticationMD5Password) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Type string
Salt [4]byte
}{
Type: "AuthenticationMD5Password",
Salt: src.Salt,
})
}
// UnmarshalJSON implements encoding/json.Unmarshaler.
func (dst *AuthenticationMD5Password) UnmarshalJSON(data []byte) error {
// Ignore null, like in the main JSON package.
if string(data) == "null" {
return nil
}
var msg struct {
Type string
Salt [4]byte
}
if err := json.Unmarshal(data, &msg); err != nil {
return err
}
dst.Salt = msg.Salt
return nil
}

View File

@ -0,0 +1,50 @@
package pgproto3
import (
"encoding/binary"
"encoding/json"
"errors"
"github.com/jackc/pgx/v5/internal/pgio"
)
// AuthenticationOk is a message sent from the backend indicating that authentication was successful.
type AuthenticationOk struct{}
// Backend identifies this message as sendable by the PostgreSQL backend.
func (*AuthenticationOk) Backend() {}
// Backend identifies this message as an authentication response.
func (*AuthenticationOk) AuthenticationResponse() {}
// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
// type identifier and 4 byte message length.
func (dst *AuthenticationOk) Decode(src []byte) error {
if len(src) != 4 {
return errors.New("bad authentication message size")
}
authType := binary.BigEndian.Uint32(src)
if authType != AuthTypeOk {
return errors.New("bad auth type")
}
return nil
}
// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
func (src *AuthenticationOk) Encode(dst []byte) ([]byte, error) {
dst, sp := beginMessage(dst, 'R')
dst = pgio.AppendUint32(dst, AuthTypeOk)
return finishMessage(dst, sp)
}
// MarshalJSON implements encoding/json.Marshaler.
func (src AuthenticationOk) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Type string
}{
Type: "AuthenticationOK",
})
}

View File

@ -0,0 +1,72 @@
package pgproto3
import (
"bytes"
"encoding/binary"
"encoding/json"
"errors"
"github.com/jackc/pgx/v5/internal/pgio"
)
// AuthenticationSASL is a message sent from the backend indicating that SASL authentication is required.
type AuthenticationSASL struct {
AuthMechanisms []string
}
// Backend identifies this message as sendable by the PostgreSQL backend.
func (*AuthenticationSASL) Backend() {}
// Backend identifies this message as an authentication response.
func (*AuthenticationSASL) AuthenticationResponse() {}
// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
// type identifier and 4 byte message length.
func (dst *AuthenticationSASL) Decode(src []byte) error {
if len(src) < 4 {
return errors.New("authentication message too short")
}
authType := binary.BigEndian.Uint32(src)
if authType != AuthTypeSASL {
return errors.New("bad auth type")
}
authMechanisms := src[4:]
for len(authMechanisms) > 1 {
idx := bytes.IndexByte(authMechanisms, 0)
if idx == -1 {
return &invalidMessageFormatErr{messageType: "AuthenticationSASL", details: "unterminated string"}
}
dst.AuthMechanisms = append(dst.AuthMechanisms, string(authMechanisms[:idx]))
authMechanisms = authMechanisms[idx+1:]
}
return nil
}
// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
func (src *AuthenticationSASL) Encode(dst []byte) ([]byte, error) {
dst, sp := beginMessage(dst, 'R')
dst = pgio.AppendUint32(dst, AuthTypeSASL)
for _, s := range src.AuthMechanisms {
dst = append(dst, []byte(s)...)
dst = append(dst, 0)
}
dst = append(dst, 0)
return finishMessage(dst, sp)
}
// MarshalJSON implements encoding/json.Marshaler.
func (src AuthenticationSASL) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Type string
AuthMechanisms []string
}{
Type: "AuthenticationSASL",
AuthMechanisms: src.AuthMechanisms,
})
}

View File

@ -0,0 +1,75 @@
package pgproto3
import (
"encoding/binary"
"encoding/json"
"errors"
"github.com/jackc/pgx/v5/internal/pgio"
)
// AuthenticationSASLContinue is a message sent from the backend containing a SASL challenge.
type AuthenticationSASLContinue struct {
Data []byte
}
// Backend identifies this message as sendable by the PostgreSQL backend.
func (*AuthenticationSASLContinue) Backend() {}
// Backend identifies this message as an authentication response.
func (*AuthenticationSASLContinue) AuthenticationResponse() {}
// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
// type identifier and 4 byte message length.
func (dst *AuthenticationSASLContinue) Decode(src []byte) error {
if len(src) < 4 {
return errors.New("authentication message too short")
}
authType := binary.BigEndian.Uint32(src)
if authType != AuthTypeSASLContinue {
return errors.New("bad auth type")
}
dst.Data = src[4:]
return nil
}
// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
func (src *AuthenticationSASLContinue) Encode(dst []byte) ([]byte, error) {
dst, sp := beginMessage(dst, 'R')
dst = pgio.AppendUint32(dst, AuthTypeSASLContinue)
dst = append(dst, src.Data...)
return finishMessage(dst, sp)
}
// MarshalJSON implements encoding/json.Marshaler.
func (src AuthenticationSASLContinue) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Type string
Data string
}{
Type: "AuthenticationSASLContinue",
Data: string(src.Data),
})
}
// UnmarshalJSON implements encoding/json.Unmarshaler.
func (dst *AuthenticationSASLContinue) UnmarshalJSON(data []byte) error {
// Ignore null, like in the main JSON package.
if string(data) == "null" {
return nil
}
var msg struct {
Data string
}
if err := json.Unmarshal(data, &msg); err != nil {
return err
}
dst.Data = []byte(msg.Data)
return nil
}

View File

@ -0,0 +1,75 @@
package pgproto3
import (
"encoding/binary"
"encoding/json"
"errors"
"github.com/jackc/pgx/v5/internal/pgio"
)
// AuthenticationSASLFinal is a message sent from the backend indicating a SASL authentication has completed.
type AuthenticationSASLFinal struct {
Data []byte
}
// Backend identifies this message as sendable by the PostgreSQL backend.
func (*AuthenticationSASLFinal) Backend() {}
// Backend identifies this message as an authentication response.
func (*AuthenticationSASLFinal) AuthenticationResponse() {}
// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
// type identifier and 4 byte message length.
func (dst *AuthenticationSASLFinal) Decode(src []byte) error {
if len(src) < 4 {
return errors.New("authentication message too short")
}
authType := binary.BigEndian.Uint32(src)
if authType != AuthTypeSASLFinal {
return errors.New("bad auth type")
}
dst.Data = src[4:]
return nil
}
// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
func (src *AuthenticationSASLFinal) Encode(dst []byte) ([]byte, error) {
dst, sp := beginMessage(dst, 'R')
dst = pgio.AppendUint32(dst, AuthTypeSASLFinal)
dst = append(dst, src.Data...)
return finishMessage(dst, sp)
}
// MarshalJSON implements encoding/json.Unmarshaler.
func (src AuthenticationSASLFinal) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Type string
Data string
}{
Type: "AuthenticationSASLFinal",
Data: string(src.Data),
})
}
// UnmarshalJSON implements encoding/json.Unmarshaler.
func (dst *AuthenticationSASLFinal) UnmarshalJSON(data []byte) error {
// Ignore null, like in the main JSON package.
if string(data) == "null" {
return nil
}
var msg struct {
Data string
}
if err := json.Unmarshal(data, &msg); err != nil {
return err
}
dst.Data = []byte(msg.Data)
return nil
}

299
pgproto3/backend.go Normal file
View File

@ -0,0 +1,299 @@
package pgproto3
import (
"bytes"
"encoding/binary"
"fmt"
"io"
)
// Backend acts as a server for the PostgreSQL wire protocol version 3.
type Backend struct {
cr *chunkReader
w io.Writer
// tracer is used to trace messages when Send or Receive is called. This means an outbound message is traced
// before it is actually transmitted (i.e. before Flush).
tracer *tracer
wbuf []byte
encodeError error
// Frontend message flyweights
bind Bind
cancelRequest CancelRequest
_close Close
copyFail CopyFail
copyData CopyData
copyDone CopyDone
describe Describe
execute Execute
flush Flush
functionCall FunctionCall
gssEncRequest GSSEncRequest
parse Parse
query Query
sslRequest SSLRequest
startupMessage StartupMessage
sync Sync
terminate Terminate
bodyLen int
maxBodyLen int // maxBodyLen is the maximum length of a message body in octets. If a message body exceeds this length, Receive will return an error.
msgType byte
partialMsg bool
authType uint32
}
const (
minStartupPacketLen = 4 // minStartupPacketLen is a single 32-bit int version or code.
maxStartupPacketLen = 10000 // maxStartupPacketLen is MAX_STARTUP_PACKET_LENGTH from PG source.
)
// NewBackend creates a new Backend.
func NewBackend(r io.Reader, w io.Writer) *Backend {
cr := newChunkReader(r, 0)
return &Backend{cr: cr, w: w}
}
// Send sends a message to the frontend (i.e. the client). The message is buffered until Flush is called. Any error
// encountered will be returned from Flush.
func (b *Backend) Send(msg BackendMessage) {
if b.encodeError != nil {
return
}
prevLen := len(b.wbuf)
newBuf, err := msg.Encode(b.wbuf)
if err != nil {
b.encodeError = err
return
}
b.wbuf = newBuf
if b.tracer != nil {
b.tracer.traceMessage('B', int32(len(b.wbuf)-prevLen), msg)
}
}
// Flush writes any pending messages to the frontend (i.e. the client).
func (b *Backend) Flush() error {
if err := b.encodeError; err != nil {
b.encodeError = nil
b.wbuf = b.wbuf[:0]
return &writeError{err: err, safeToRetry: true}
}
n, err := b.w.Write(b.wbuf)
const maxLen = 1024
if len(b.wbuf) > maxLen {
b.wbuf = make([]byte, 0, maxLen)
} else {
b.wbuf = b.wbuf[:0]
}
if err != nil {
return &writeError{err: err, safeToRetry: n == 0}
}
return nil
}
// Trace starts tracing the message traffic to w. It writes in a similar format to that produced by the libpq function
// PQtrace.
func (b *Backend) Trace(w io.Writer, options TracerOptions) {
b.tracer = &tracer{
w: w,
buf: &bytes.Buffer{},
TracerOptions: options,
}
}
// Untrace stops tracing.
func (b *Backend) Untrace() {
b.tracer = nil
}
// ReceiveStartupMessage receives the initial connection message. This method is used of the normal Receive method
// because the initial connection message is "special" and does not include the message type as the first byte. This
// will return either a StartupMessage, SSLRequest, GSSEncRequest, or CancelRequest.
func (b *Backend) ReceiveStartupMessage() (FrontendMessage, error) {
buf, err := b.cr.Next(4)
if err != nil {
return nil, err
}
msgSize := int(binary.BigEndian.Uint32(buf) - 4)
if msgSize < minStartupPacketLen || msgSize > maxStartupPacketLen {
return nil, fmt.Errorf("invalid length of startup packet: %d", msgSize)
}
buf, err = b.cr.Next(msgSize)
if err != nil {
return nil, translateEOFtoErrUnexpectedEOF(err)
}
code := binary.BigEndian.Uint32(buf)
switch code {
case ProtocolVersionNumber:
err = b.startupMessage.Decode(buf)
if err != nil {
return nil, err
}
return &b.startupMessage, nil
case sslRequestNumber:
err = b.sslRequest.Decode(buf)
if err != nil {
return nil, err
}
return &b.sslRequest, nil
case cancelRequestCode:
err = b.cancelRequest.Decode(buf)
if err != nil {
return nil, err
}
return &b.cancelRequest, nil
case gssEncReqNumber:
err = b.gssEncRequest.Decode(buf)
if err != nil {
return nil, err
}
return &b.gssEncRequest, nil
default:
return nil, fmt.Errorf("unknown startup message code: %d", code)
}
}
// Receive receives a message from the frontend. The returned message is only valid until the next call to Receive.
func (b *Backend) Receive() (FrontendMessage, error) {
if !b.partialMsg {
header, err := b.cr.Next(5)
if err != nil {
return nil, translateEOFtoErrUnexpectedEOF(err)
}
b.msgType = header[0]
msgLength := int(binary.BigEndian.Uint32(header[1:]))
if msgLength < 4 {
return nil, fmt.Errorf("invalid message length: %d", msgLength)
}
b.bodyLen = msgLength - 4
if b.maxBodyLen > 0 && b.bodyLen > b.maxBodyLen {
return nil, &ExceededMaxBodyLenErr{b.maxBodyLen, b.bodyLen}
}
b.partialMsg = true
}
var msg FrontendMessage
switch b.msgType {
case 'B':
msg = &b.bind
case 'C':
msg = &b._close
case 'D':
msg = &b.describe
case 'E':
msg = &b.execute
case 'F':
msg = &b.functionCall
case 'f':
msg = &b.copyFail
case 'd':
msg = &b.copyData
case 'c':
msg = &b.copyDone
case 'H':
msg = &b.flush
case 'P':
msg = &b.parse
case 'p':
switch b.authType {
case AuthTypeSASL:
msg = &SASLInitialResponse{}
case AuthTypeSASLContinue:
msg = &SASLResponse{}
case AuthTypeSASLFinal:
msg = &SASLResponse{}
case AuthTypeGSS, AuthTypeGSSCont:
msg = &GSSResponse{}
case AuthTypeCleartextPassword, AuthTypeMD5Password:
fallthrough
default:
// to maintain backwards compatibility
msg = &PasswordMessage{}
}
case 'Q':
msg = &b.query
case 'S':
msg = &b.sync
case 'X':
msg = &b.terminate
default:
return nil, fmt.Errorf("unknown message type: %c", b.msgType)
}
msgBody, err := b.cr.Next(b.bodyLen)
if err != nil {
return nil, translateEOFtoErrUnexpectedEOF(err)
}
b.partialMsg = false
err = msg.Decode(msgBody)
if err != nil {
return nil, err
}
if b.tracer != nil {
b.tracer.traceMessage('F', int32(5+len(msgBody)), msg)
}
return msg, nil
}
// SetAuthType sets the authentication type in the backend.
// Since multiple message types can start with 'p', SetAuthType allows
// contextual identification of FrontendMessages. For example, in the
// PG message flow documentation for PasswordMessage:
//
// Byte1('p')
//
// Identifies the message as a password response. Note that this is also used for
// GSSAPI, SSPI and SASL response messages. The exact message type can be deduced from
// the context.
//
// Since the Frontend does not know about the state of a backend, it is important
// to call SetAuthType() after an authentication request is received by the Frontend.
func (b *Backend) SetAuthType(authType uint32) error {
switch authType {
case AuthTypeOk,
AuthTypeCleartextPassword,
AuthTypeMD5Password,
AuthTypeSCMCreds,
AuthTypeGSS,
AuthTypeGSSCont,
AuthTypeSSPI,
AuthTypeSASL,
AuthTypeSASLContinue,
AuthTypeSASLFinal:
b.authType = authType
default:
return fmt.Errorf("authType not recognized: %d", authType)
}
return nil
}
// SetMaxBodyLen sets the maximum length of a message body in octets.
// If a message body exceeds this length, Receive will return an error.
// This is useful for protecting against malicious clients that send
// large messages with the intent of causing memory exhaustion.
// The default value is 0.
// If maxBodyLen is 0, then no maximum is enforced.
func (b *Backend) SetMaxBodyLen(maxBodyLen int) {
b.maxBodyLen = maxBodyLen
}

View File

@ -0,0 +1,50 @@
package pgproto3
import (
"encoding/binary"
"encoding/json"
"github.com/jackc/pgx/v5/internal/pgio"
)
type BackendKeyData struct {
ProcessID uint32
SecretKey uint32
}
// Backend identifies this message as sendable by the PostgreSQL backend.
func (*BackendKeyData) Backend() {}
// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
// type identifier and 4 byte message length.
func (dst *BackendKeyData) Decode(src []byte) error {
if len(src) != 8 {
return &invalidMessageLenErr{messageType: "BackendKeyData", expectedLen: 8, actualLen: len(src)}
}
dst.ProcessID = binary.BigEndian.Uint32(src[:4])
dst.SecretKey = binary.BigEndian.Uint32(src[4:])
return nil
}
// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
func (src *BackendKeyData) Encode(dst []byte) ([]byte, error) {
dst, sp := beginMessage(dst, 'K')
dst = pgio.AppendUint32(dst, src.ProcessID)
dst = pgio.AppendUint32(dst, src.SecretKey)
return finishMessage(dst, sp)
}
// MarshalJSON implements encoding/json.Marshaler.
func (src BackendKeyData) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Type string
ProcessID uint32
SecretKey uint32
}{
Type: "BackendKeyData",
ProcessID: src.ProcessID,
SecretKey: src.SecretKey,
})
}

140
pgproto3/backend_test.go Normal file
View File

@ -0,0 +1,140 @@
package pgproto3_test
import (
"io"
"testing"
"github.com/jackc/pgx/v5/internal/pgio"
"github.com/jackc/pgx/v5/pgproto3"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestBackendReceiveInterrupted(t *testing.T) {
t.Parallel()
server := &interruptReader{}
server.push([]byte{'Q', 0, 0, 0, 6})
backend := pgproto3.NewBackend(server, nil)
msg, err := backend.Receive()
if err == nil {
t.Fatal("expected err")
}
if msg != nil {
t.Fatalf("did not expect msg, but %v", msg)
}
server.push([]byte{'I', 0})
msg, err = backend.Receive()
if err != nil {
t.Fatal(err)
}
if msg, ok := msg.(*pgproto3.Query); !ok || msg.String != "I" {
t.Fatalf("unexpected msg: %v", msg)
}
}
func TestBackendReceiveUnexpectedEOF(t *testing.T) {
t.Parallel()
server := &interruptReader{}
server.push([]byte{'Q', 0, 0, 0, 6})
backend := pgproto3.NewBackend(server, nil)
// Receive regular msg
msg, err := backend.Receive()
assert.Nil(t, msg)
assert.Equal(t, io.ErrUnexpectedEOF, err)
// Receive StartupMessage msg
dst := []byte{}
dst = pgio.AppendUint32(dst, 1000) // tell the backend we expect 1000 bytes to be read
dst = pgio.AppendUint32(dst, 1) // only send 1 byte
server.push(dst)
msg, err = backend.ReceiveStartupMessage()
assert.Nil(t, msg)
assert.Equal(t, io.ErrUnexpectedEOF, err)
}
func TestStartupMessage(t *testing.T) {
t.Parallel()
t.Run("valid StartupMessage", func(t *testing.T) {
want := &pgproto3.StartupMessage{
ProtocolVersion: pgproto3.ProtocolVersionNumber,
Parameters: map[string]string{
"username": "tester",
},
}
dst, err := want.Encode([]byte{})
require.NoError(t, err)
server := &interruptReader{}
server.push(dst)
backend := pgproto3.NewBackend(server, nil)
msg, err := backend.ReceiveStartupMessage()
require.NoError(t, err)
require.Equal(t, want, msg)
})
t.Run("invalid packet length", func(t *testing.T) {
wantErr := "invalid length of startup packet"
tests := []struct {
name string
packetLen uint32
}{
{
name: "large packet length",
// Since the StartupMessage contains the "Length of message contents
// in bytes, including self", the max startup packet length is actually
// 10000+4. Therefore, let's go past the limit with 10005
packetLen: 10005,
},
{
name: "short packet length",
packetLen: 3,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
server := &interruptReader{}
dst := []byte{}
dst = pgio.AppendUint32(dst, tt.packetLen)
dst = pgio.AppendUint32(dst, pgproto3.ProtocolVersionNumber)
server.push(dst)
backend := pgproto3.NewBackend(server, nil)
msg, err := backend.ReceiveStartupMessage()
require.Error(t, err)
require.Nil(t, msg)
require.Contains(t, err.Error(), wantErr)
})
}
})
}
func TestBackendReceiveExceededMaxBodyLen(t *testing.T) {
t.Parallel()
server := &interruptReader{}
server.push([]byte{'Q', 0, 0, 10, 10})
backend := pgproto3.NewBackend(server, nil)
// Set max body len to 5
backend.SetMaxBodyLen(5)
// Receive regular msg
msg, err := backend.Receive()
assert.Nil(t, msg)
var invalidBodyLenErr *pgproto3.ExceededMaxBodyLenErr
assert.ErrorAs(t, err, &invalidBodyLenErr)
}

37
pgproto3/big_endian.go Normal file
View File

@ -0,0 +1,37 @@
package pgproto3
import (
"encoding/binary"
)
type BigEndianBuf [8]byte
func (b BigEndianBuf) Int16(n int16) []byte {
buf := b[0:2]
binary.BigEndian.PutUint16(buf, uint16(n))
return buf
}
func (b BigEndianBuf) Uint16(n uint16) []byte {
buf := b[0:2]
binary.BigEndian.PutUint16(buf, n)
return buf
}
func (b BigEndianBuf) Int32(n int32) []byte {
buf := b[0:4]
binary.BigEndian.PutUint32(buf, uint32(n))
return buf
}
func (b BigEndianBuf) Uint32(n uint32) []byte {
buf := b[0:4]
binary.BigEndian.PutUint32(buf, n)
return buf
}
func (b BigEndianBuf) Int64(n int64) []byte {
buf := b[0:8]
binary.BigEndian.PutUint64(buf, uint64(n))
return buf
}

Some files were not shown because too many files have changed in this diff Show More