mirror of https://github.com/pressly/goose.git
parent
f4a495fe54
commit
ff32c04cfd
|
@ -10,8 +10,6 @@ jobs:
|
|||
test:
|
||||
name: Run unit tests
|
||||
timeout-minutes: 10
|
||||
env:
|
||||
GO111MODULE: off
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
|
@ -27,9 +25,9 @@ jobs:
|
|||
go-version: ${{ matrix.go-version }}
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Fetch deps
|
||||
run: go get -d ./...
|
||||
- name: Run tests
|
||||
run: |
|
||||
mkdir -p bin
|
||||
go vet ./...
|
||||
go test -v ./...
|
||||
go build ./...
|
||||
|
|
|
@ -5,9 +5,4 @@
|
|||
|
||||
# Files output by tests
|
||||
/bin
|
||||
/*.db
|
||||
|
||||
# We don't want to switch to Go modules, until we release v3.0.0 or later.
|
||||
/go.mod
|
||||
/go.sum
|
||||
|
||||
/*.db
|
|
@ -4,6 +4,8 @@ Goose is a database migration tool. Manage your database schema by creating incr
|
|||
|
||||
[![GoDoc Widget]][GoDoc] [![Travis Widget]][Travis]
|
||||
|
||||
Starting with [v3.0.0](https://github.com/pressly/goose/releases/tag/v3.0.0) this project adds Go module support, but maintains backwards compataibility with older `v2.x.y` tags.
|
||||
|
||||
### Goals of this fork
|
||||
|
||||
`github.com/pressly/goose` is a fork of `bitbucket.org/liamstask/goose` with the following changes:
|
||||
|
|
13
_go.mod
13
_go.mod
|
@ -1,13 +0,0 @@
|
|||
module github.com/pressly/goose
|
||||
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/ClickHouse/clickhouse-go v1.4.0
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20190514213226-23b29e59681b
|
||||
github.com/go-sql-driver/mysql v1.4.1
|
||||
github.com/lib/pq v1.1.0
|
||||
github.com/mattn/go-sqlite3 v1.10.0
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/ziutek/mymysql v1.5.4
|
||||
)
|
147
_go.sum
147
_go.sum
|
@ -1,147 +0,0 @@
|
|||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.37.4 h1:glPeL3BQJsbF6aIIYfZizMwc5LTYz250bDMjttbBGAU=
|
||||
cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/ClickHouse/clickhouse-go v1.4.0 h1:cC1DEZ1TL74QviZY4svlwow84X5r7/BGd78kf18swhI=
|
||||
github.com/ClickHouse/clickhouse-go v1.4.0/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/bkaradzic/go-lz4 v1.0.0 h1:RXc4wYsyz985CkXXeX04y4VnZFGG8Rd43pRaHsOXAKk=
|
||||
github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 h1:F1EaeKL/ta07PY/k9Os/UFtwERei2/XzGemhpGnBKNg=
|
||||
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20190514213226-23b29e59681b h1:IPnx9jcsslwYYmDIxCSjmvdhzRv3R9z8pfLC9htYoIY=
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20190514213226-23b29e59681b/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.1.0 h1:/5u4a+KGJptBRqGzPvYQL9p0d/tPR4S31+Tnzj9lEO4=
|
||||
github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o=
|
||||
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs=
|
||||
github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c h1:Vj5n4GlwjmQteupaxJ9+0FNOmBrHfq7vN4btdGoDZgI=
|
||||
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
|
@ -6,7 +6,7 @@ import (
|
|||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/pressly/goose"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
|
@ -3,7 +3,7 @@ package main
|
|||
import (
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/pressly/goose"
|
||||
"github.com/pressly/goose/v3"
|
||||
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
module github.com/pressly/goose/v3
|
||||
|
||||
go 1.14
|
||||
|
||||
require (
|
||||
github.com/ClickHouse/clickhouse-go v1.4.5
|
||||
github.com/denisenkom/go-mssqldb v0.10.0
|
||||
github.com/go-sql-driver/mysql v1.6.0
|
||||
github.com/lib/pq v1.10.2
|
||||
github.com/mattn/go-sqlite3 v1.14.8
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/ziutek/mymysql v1.5.4
|
||||
)
|
|
@ -0,0 +1,36 @@
|
|||
github.com/ClickHouse/clickhouse-go v1.4.5 h1:FfhyEnv6/BaWldyjgT2k4gDDmeNwJ9C4NbY/MXxJlXk=
|
||||
github.com/ClickHouse/clickhouse-go v1.4.5/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI=
|
||||
github.com/bkaradzic/go-lz4 v1.0.0 h1:RXc4wYsyz985CkXXeX04y4VnZFGG8Rd43pRaHsOXAKk=
|
||||
github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4=
|
||||
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 h1:F1EaeKL/ta07PY/k9Os/UFtwERei2/XzGemhpGnBKNg=
|
||||
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/denisenkom/go-mssqldb v0.10.0 h1:QykgLZBorFE95+gO3u9esLd0BmbvpWp0/waNNZfHBM8=
|
||||
github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
|
||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY=
|
||||
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
||||
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8=
|
||||
github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-sqlite3 v1.14.8 h1:gDp86IdQsN/xWjIEmr9MF6o9mpksUgh0fu+9ByFxzIU=
|
||||
github.com/mattn/go-sqlite3 v1.14.8/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs=
|
||||
github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
|
||||
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c h1:Vj5n4GlwjmQteupaxJ9+0FNOmBrHfq7vN4btdGoDZgI=
|
||||
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
10
goose.go
10
goose.go
|
@ -6,13 +6,13 @@ import (
|
|||
"strconv"
|
||||
)
|
||||
|
||||
const VERSION = "v2.7.0-rc3"
|
||||
const VERSION = "v3.0.0"
|
||||
|
||||
var (
|
||||
minVersion = int64(0)
|
||||
maxVersion = int64((1 << 63) - 1)
|
||||
timestampFormat = "20060102150405"
|
||||
verbose = false
|
||||
minVersion = int64(0)
|
||||
maxVersion = int64((1 << 63) - 1)
|
||||
timestampFormat = "20060102150405"
|
||||
verbose = false
|
||||
)
|
||||
|
||||
// SetVerbose set the goose verbosity mode
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
# This is the official list of cloud authors for copyright purposes.
|
||||
# This file is distinct from the CONTRIBUTORS files.
|
||||
# See the latter for an explanation.
|
||||
|
||||
# Names should be added to this file as:
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
|
||||
Filippo Valsorda <hi@filippo.io>
|
||||
Google Inc.
|
||||
Ingo Oeser <nightlyone@googlemail.com>
|
||||
Palm Stone Games, Inc.
|
||||
Paweł Knap <pawelknap88@gmail.com>
|
||||
Péter Szilágyi <peterke@gmail.com>
|
||||
Tyler Treat <ttreat31@gmail.com>
|
|
@ -1,40 +0,0 @@
|
|||
# People who have agreed to one of the CLAs and can contribute patches.
|
||||
# The AUTHORS file lists the copyright holders; this file
|
||||
# lists people. For example, Google employees are listed here
|
||||
# but not in AUTHORS, because Google holds the copyright.
|
||||
#
|
||||
# https://developers.google.com/open-source/cla/individual
|
||||
# https://developers.google.com/open-source/cla/corporate
|
||||
#
|
||||
# Names should be added to this file as:
|
||||
# Name <email address>
|
||||
|
||||
# Keep the list alphabetically sorted.
|
||||
|
||||
Alexis Hunt <lexer@google.com>
|
||||
Andreas Litt <andreas.litt@gmail.com>
|
||||
Andrew Gerrand <adg@golang.org>
|
||||
Brad Fitzpatrick <bradfitz@golang.org>
|
||||
Burcu Dogan <jbd@google.com>
|
||||
Dave Day <djd@golang.org>
|
||||
David Sansome <me@davidsansome.com>
|
||||
David Symonds <dsymonds@golang.org>
|
||||
Filippo Valsorda <hi@filippo.io>
|
||||
Glenn Lewis <gmlewis@google.com>
|
||||
Ingo Oeser <nightlyone@googlemail.com>
|
||||
James Hall <james.hall@shopify.com>
|
||||
Johan Euphrosine <proppy@google.com>
|
||||
Jonathan Amsterdam <jba@google.com>
|
||||
Kunpei Sakai <namusyaka@gmail.com>
|
||||
Luna Duclos <luna.duclos@palmstonegames.com>
|
||||
Magnus Hiie <magnus.hiie@gmail.com>
|
||||
Mario Castro <mariocaster@gmail.com>
|
||||
Michael McGreevy <mcgreevy@golang.org>
|
||||
Omar Jarjur <ojarjur@google.com>
|
||||
Paweł Knap <pawelknap88@gmail.com>
|
||||
Péter Szilágyi <peterke@gmail.com>
|
||||
Sarah Adams <shadams@google.com>
|
||||
Thanatat Tamtan <acoshift@gmail.com>
|
||||
Toby Burress <kurin@google.com>
|
||||
Tuo Shan <shantuo@google.com>
|
||||
Tyler Treat <ttreat31@gmail.com>
|
|
@ -1,202 +0,0 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -1,277 +0,0 @@
|
|||
// Copyright 2016 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package civil implements types for civil time, a time-zone-independent
|
||||
// representation of time that follows the rules of the proleptic
|
||||
// Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second
|
||||
// minutes.
|
||||
//
|
||||
// Because they lack location information, these types do not represent unique
|
||||
// moments or intervals of time. Use time.Time for that purpose.
|
||||
package civil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// A Date represents a date (year, month, day).
|
||||
//
|
||||
// This type does not include location information, and therefore does not
|
||||
// describe a unique 24-hour timespan.
|
||||
type Date struct {
|
||||
Year int // Year (e.g., 2014).
|
||||
Month time.Month // Month of the year (January = 1, ...).
|
||||
Day int // Day of the month, starting at 1.
|
||||
}
|
||||
|
||||
// DateOf returns the Date in which a time occurs in that time's location.
|
||||
func DateOf(t time.Time) Date {
|
||||
var d Date
|
||||
d.Year, d.Month, d.Day = t.Date()
|
||||
return d
|
||||
}
|
||||
|
||||
// ParseDate parses a string in RFC3339 full-date format and returns the date value it represents.
|
||||
func ParseDate(s string) (Date, error) {
|
||||
t, err := time.Parse("2006-01-02", s)
|
||||
if err != nil {
|
||||
return Date{}, err
|
||||
}
|
||||
return DateOf(t), nil
|
||||
}
|
||||
|
||||
// String returns the date in RFC3339 full-date format.
|
||||
func (d Date) String() string {
|
||||
return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day)
|
||||
}
|
||||
|
||||
// IsValid reports whether the date is valid.
|
||||
func (d Date) IsValid() bool {
|
||||
return DateOf(d.In(time.UTC)) == d
|
||||
}
|
||||
|
||||
// In returns the time corresponding to time 00:00:00 of the date in the location.
|
||||
//
|
||||
// In is always consistent with time.Date, even when time.Date returns a time
|
||||
// on a different day. For example, if loc is America/Indiana/Vincennes, then both
|
||||
// time.Date(1955, time.May, 1, 0, 0, 0, 0, loc)
|
||||
// and
|
||||
// civil.Date{Year: 1955, Month: time.May, Day: 1}.In(loc)
|
||||
// return 23:00:00 on April 30, 1955.
|
||||
//
|
||||
// In panics if loc is nil.
|
||||
func (d Date) In(loc *time.Location) time.Time {
|
||||
return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc)
|
||||
}
|
||||
|
||||
// AddDays returns the date that is n days in the future.
|
||||
// n can also be negative to go into the past.
|
||||
func (d Date) AddDays(n int) Date {
|
||||
return DateOf(d.In(time.UTC).AddDate(0, 0, n))
|
||||
}
|
||||
|
||||
// DaysSince returns the signed number of days between the date and s, not including the end day.
|
||||
// This is the inverse operation to AddDays.
|
||||
func (d Date) DaysSince(s Date) (days int) {
|
||||
// We convert to Unix time so we do not have to worry about leap seconds:
|
||||
// Unix time increases by exactly 86400 seconds per day.
|
||||
deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix()
|
||||
return int(deltaUnix / 86400)
|
||||
}
|
||||
|
||||
// Before reports whether d1 occurs before d2.
|
||||
func (d1 Date) Before(d2 Date) bool {
|
||||
if d1.Year != d2.Year {
|
||||
return d1.Year < d2.Year
|
||||
}
|
||||
if d1.Month != d2.Month {
|
||||
return d1.Month < d2.Month
|
||||
}
|
||||
return d1.Day < d2.Day
|
||||
}
|
||||
|
||||
// After reports whether d1 occurs after d2.
|
||||
func (d1 Date) After(d2 Date) bool {
|
||||
return d2.Before(d1)
|
||||
}
|
||||
|
||||
// MarshalText implements the encoding.TextMarshaler interface.
|
||||
// The output is the result of d.String().
|
||||
func (d Date) MarshalText() ([]byte, error) {
|
||||
return []byte(d.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements the encoding.TextUnmarshaler interface.
|
||||
// The date is expected to be a string in a format accepted by ParseDate.
|
||||
func (d *Date) UnmarshalText(data []byte) error {
|
||||
var err error
|
||||
*d, err = ParseDate(string(data))
|
||||
return err
|
||||
}
|
||||
|
||||
// A Time represents a time with nanosecond precision.
|
||||
//
|
||||
// This type does not include location information, and therefore does not
|
||||
// describe a unique moment in time.
|
||||
//
|
||||
// This type exists to represent the TIME type in storage-based APIs like BigQuery.
|
||||
// Most operations on Times are unlikely to be meaningful. Prefer the DateTime type.
|
||||
type Time struct {
|
||||
Hour int // The hour of the day in 24-hour format; range [0-23]
|
||||
Minute int // The minute of the hour; range [0-59]
|
||||
Second int // The second of the minute; range [0-59]
|
||||
Nanosecond int // The nanosecond of the second; range [0-999999999]
|
||||
}
|
||||
|
||||
// TimeOf returns the Time representing the time of day in which a time occurs
|
||||
// in that time's location. It ignores the date.
|
||||
func TimeOf(t time.Time) Time {
|
||||
var tm Time
|
||||
tm.Hour, tm.Minute, tm.Second = t.Clock()
|
||||
tm.Nanosecond = t.Nanosecond()
|
||||
return tm
|
||||
}
|
||||
|
||||
// ParseTime parses a string and returns the time value it represents.
|
||||
// ParseTime accepts an extended form of the RFC3339 partial-time format. After
|
||||
// the HH:MM:SS part of the string, an optional fractional part may appear,
|
||||
// consisting of a decimal point followed by one to nine decimal digits.
|
||||
// (RFC3339 admits only one digit after the decimal point).
|
||||
func ParseTime(s string) (Time, error) {
|
||||
t, err := time.Parse("15:04:05.999999999", s)
|
||||
if err != nil {
|
||||
return Time{}, err
|
||||
}
|
||||
return TimeOf(t), nil
|
||||
}
|
||||
|
||||
// String returns the date in the format described in ParseTime. If Nanoseconds
|
||||
// is zero, no fractional part will be generated. Otherwise, the result will
|
||||
// end with a fractional part consisting of a decimal point and nine digits.
|
||||
func (t Time) String() string {
|
||||
s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second)
|
||||
if t.Nanosecond == 0 {
|
||||
return s
|
||||
}
|
||||
return s + fmt.Sprintf(".%09d", t.Nanosecond)
|
||||
}
|
||||
|
||||
// IsValid reports whether the time is valid.
|
||||
func (t Time) IsValid() bool {
|
||||
// Construct a non-zero time.
|
||||
tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC)
|
||||
return TimeOf(tm) == t
|
||||
}
|
||||
|
||||
// MarshalText implements the encoding.TextMarshaler interface.
|
||||
// The output is the result of t.String().
|
||||
func (t Time) MarshalText() ([]byte, error) {
|
||||
return []byte(t.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements the encoding.TextUnmarshaler interface.
|
||||
// The time is expected to be a string in a format accepted by ParseTime.
|
||||
func (t *Time) UnmarshalText(data []byte) error {
|
||||
var err error
|
||||
*t, err = ParseTime(string(data))
|
||||
return err
|
||||
}
|
||||
|
||||
// A DateTime represents a date and time.
|
||||
//
|
||||
// This type does not include location information, and therefore does not
|
||||
// describe a unique moment in time.
|
||||
type DateTime struct {
|
||||
Date Date
|
||||
Time Time
|
||||
}
|
||||
|
||||
// Note: We deliberately do not embed Date into DateTime, to avoid promoting AddDays and Sub.
|
||||
|
||||
// DateTimeOf returns the DateTime in which a time occurs in that time's location.
|
||||
func DateTimeOf(t time.Time) DateTime {
|
||||
return DateTime{
|
||||
Date: DateOf(t),
|
||||
Time: TimeOf(t),
|
||||
}
|
||||
}
|
||||
|
||||
// ParseDateTime parses a string and returns the DateTime it represents.
|
||||
// ParseDateTime accepts a variant of the RFC3339 date-time format that omits
|
||||
// the time offset but includes an optional fractional time, as described in
|
||||
// ParseTime. Informally, the accepted format is
|
||||
// YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF]
|
||||
// where the 'T' may be a lower-case 't'.
|
||||
func ParseDateTime(s string) (DateTime, error) {
|
||||
t, err := time.Parse("2006-01-02T15:04:05.999999999", s)
|
||||
if err != nil {
|
||||
t, err = time.Parse("2006-01-02t15:04:05.999999999", s)
|
||||
if err != nil {
|
||||
return DateTime{}, err
|
||||
}
|
||||
}
|
||||
return DateTimeOf(t), nil
|
||||
}
|
||||
|
||||
// String returns the date in the format described in ParseDate.
|
||||
func (dt DateTime) String() string {
|
||||
return dt.Date.String() + "T" + dt.Time.String()
|
||||
}
|
||||
|
||||
// IsValid reports whether the datetime is valid.
|
||||
func (dt DateTime) IsValid() bool {
|
||||
return dt.Date.IsValid() && dt.Time.IsValid()
|
||||
}
|
||||
|
||||
// In returns the time corresponding to the DateTime in the given location.
|
||||
//
|
||||
// If the time is missing or ambigous at the location, In returns the same
|
||||
// result as time.Date. For example, if loc is America/Indiana/Vincennes, then
|
||||
// both
|
||||
// time.Date(1955, time.May, 1, 0, 30, 0, 0, loc)
|
||||
// and
|
||||
// civil.DateTime{
|
||||
// civil.Date{Year: 1955, Month: time.May, Day: 1}},
|
||||
// civil.Time{Minute: 30}}.In(loc)
|
||||
// return 23:30:00 on April 30, 1955.
|
||||
//
|
||||
// In panics if loc is nil.
|
||||
func (dt DateTime) In(loc *time.Location) time.Time {
|
||||
return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc)
|
||||
}
|
||||
|
||||
// Before reports whether dt1 occurs before dt2.
|
||||
func (dt1 DateTime) Before(dt2 DateTime) bool {
|
||||
return dt1.In(time.UTC).Before(dt2.In(time.UTC))
|
||||
}
|
||||
|
||||
// After reports whether dt1 occurs after dt2.
|
||||
func (dt1 DateTime) After(dt2 DateTime) bool {
|
||||
return dt2.Before(dt1)
|
||||
}
|
||||
|
||||
// MarshalText implements the encoding.TextMarshaler interface.
|
||||
// The output is the result of dt.String().
|
||||
func (dt DateTime) MarshalText() ([]byte, error) {
|
||||
return []byte(dt.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements the encoding.TextUnmarshaler interface.
|
||||
// The datetime is expected to be a string in a format accepted by ParseDateTime
|
||||
func (dt *DateTime) UnmarshalText(data []byte) error {
|
||||
var err error
|
||||
*dt, err = ParseDateTime(string(data))
|
||||
return err
|
||||
}
|
|
@ -1,28 +0,0 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.out
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
|
||||
coverage.txt
|
||||
.idea/**
|
|
@ -1,25 +0,0 @@
|
|||
sudo: required
|
||||
language: go
|
||||
go:
|
||||
- 1.12.x
|
||||
- 1.13.x
|
||||
- master
|
||||
go_import_path: github.com/ClickHouse/clickhouse-go
|
||||
services:
|
||||
- docker
|
||||
install:
|
||||
- go get github.com/mattn/goveralls
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get github.com/stretchr/testify/assert
|
||||
- go get github.com/cloudflare/golz4
|
||||
- go get github.com/bkaradzic/go-lz4
|
||||
- go get github.com/pierrec/lz4
|
||||
|
||||
before_install:
|
||||
- docker --version
|
||||
- docker-compose --version
|
||||
- docker-compose up -d
|
||||
script:
|
||||
- ./go.test.sh
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
|
@ -1,10 +0,0 @@
|
|||
# Contributing notes
|
||||
|
||||
## Local setup
|
||||
|
||||
The easiest way to run tests is to use Docker Compose:
|
||||
|
||||
```
|
||||
docker-compose up
|
||||
make
|
||||
```
|
|
@ -1,21 +0,0 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2017-2020 Kirill Shvakov
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
|
@ -1,8 +0,0 @@
|
|||
test:
|
||||
go install -race -v
|
||||
go test -i -v
|
||||
go test -race -timeout 30s -v .
|
||||
|
||||
coverage:
|
||||
go test -coverprofile=coverage.out -v .
|
||||
go tool cover -html=coverage.out
|
|
@ -1,190 +0,0 @@
|
|||
# ClickHouse [](https://travis-ci.org/ClickHouse/clickhouse-go) [](https://goreportcard.com/report/github.com/ClickHouse/clickhouse-go) [](https://codecov.io/gh/ClickHouse/clickhouse-go)
|
||||
|
||||
Golang SQL database driver for [Yandex ClickHouse](https://clickhouse.yandex/)
|
||||
|
||||
## Key features
|
||||
|
||||
* Uses native ClickHouse tcp client-server protocol
|
||||
* Compatibility with `database/sql`
|
||||
* Round Robin load-balancing
|
||||
* Bulk write support : `begin->prepare->(in loop exec)->commit`
|
||||
* LZ4 compression support (default to use pure go lz4, switch to use cgo lz4 by turn clz4 build tags on)
|
||||
|
||||
## DSN
|
||||
|
||||
* username/password - auth credentials
|
||||
* database - select the current default database
|
||||
* read_timeout/write_timeout - timeout in second
|
||||
* no_delay - disable/enable the Nagle Algorithm for tcp socket (default is 'true' - disable)
|
||||
* alt_hosts - comma separated list of single address host for load-balancing
|
||||
* connection_open_strategy - random/in_order (default random).
|
||||
* random - choose random server from set
|
||||
* in_order - first live server is choosen in specified order
|
||||
* time_random - choose random(based on current time) server from set. This option differs from `random` in that randomness is based on current time rather than on amount of previous connections.
|
||||
* block_size - maximum rows in block (default is 1000000). If the rows are larger then the data will be split into several blocks to send them to the server. If one block was sent to the server, the data will be persisted on the server disk, we can't rollback the transaction. So always keep in mind that the batch size no larger than the block_size if you want atomic batch insert.
|
||||
* pool_size - maximum amount of preallocated byte chunks used in queries (default is 100). Decrease this if you experience memory problems at the expense of more GC pressure and vice versa.
|
||||
* debug - enable debug output (boolean value)
|
||||
|
||||
SSL/TLS parameters:
|
||||
|
||||
* secure - establish secure connection (default is false)
|
||||
* skip_verify - skip certificate verification (default is false)
|
||||
* tls_config - name of a TLS config with client certificates, registered using `clickhouse.RegisterTLSConfig()`; implies secure to be true, unless explicitly specified
|
||||
|
||||
example:
|
||||
```
|
||||
tcp://host1:9000?username=user&password=qwerty&database=clicks&read_timeout=10&write_timeout=20&alt_hosts=host2:9000,host3:9000
|
||||
```
|
||||
|
||||
## Supported data types
|
||||
|
||||
* UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64
|
||||
* Float32, Float64
|
||||
* String
|
||||
* FixedString(N)
|
||||
* Date
|
||||
* DateTime
|
||||
* IPv4
|
||||
* IPv6
|
||||
* Enum
|
||||
* UUID
|
||||
* Nullable(T)
|
||||
* [Array(T) (one-dimensional)](https://clickhouse.yandex/reference_en.html#Array(T)) [godoc](https://godoc.org/github.com/ClickHouse/clickhouse-go#Array)
|
||||
|
||||
## TODO
|
||||
|
||||
* Support other compression methods(zstd ...)
|
||||
|
||||
## Install
|
||||
```
|
||||
go get -u github.com/ClickHouse/clickhouse-go
|
||||
```
|
||||
|
||||
## Example
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go"
|
||||
)
|
||||
|
||||
func main() {
|
||||
connect, err := sql.Open("clickhouse", "tcp://127.0.0.1:9000?debug=true")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := connect.Ping(); err != nil {
|
||||
if exception, ok := err.(*clickhouse.Exception); ok {
|
||||
fmt.Printf("[%d] %s \n%s\n", exception.Code, exception.Message, exception.StackTrace)
|
||||
} else {
|
||||
fmt.Println(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
_, err = connect.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS example (
|
||||
country_code FixedString(2),
|
||||
os_id UInt8,
|
||||
browser_id UInt8,
|
||||
categories Array(Int16),
|
||||
action_day Date,
|
||||
action_time DateTime
|
||||
) engine=Memory
|
||||
`)
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
var (
|
||||
tx, _ = connect.Begin()
|
||||
stmt, _ = tx.Prepare("INSERT INTO example (country_code, os_id, browser_id, categories, action_day, action_time) VALUES (?, ?, ?, ?, ?, ?)")
|
||||
)
|
||||
defer stmt.Close()
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
if _, err := stmt.Exec(
|
||||
"RU",
|
||||
10+i,
|
||||
100+i,
|
||||
clickhouse.Array([]int16{1, 2, 3}),
|
||||
time.Now(),
|
||||
time.Now(),
|
||||
); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
rows, err := connect.Query("SELECT country_code, os_id, browser_id, categories, action_day, action_time FROM example")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var (
|
||||
country string
|
||||
os, browser uint8
|
||||
categories []int16
|
||||
actionDay, actionTime time.Time
|
||||
)
|
||||
if err := rows.Scan(&country, &os, &browser, &categories, &actionDay, &actionTime); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Printf("country: %s, os: %d, browser: %d, categories: %v, action_day: %s, action_time: %s", country, os, browser, categories, actionDay, actionTime)
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := connect.Exec("DROP TABLE example"); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Use [sqlx](https://github.com/jmoiron/sqlx)
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
_ "github.com/ClickHouse/clickhouse-go"
|
||||
)
|
||||
|
||||
func main() {
|
||||
connect, err := sqlx.Open("clickhouse", "tcp://127.0.0.1:9000?debug=true")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
var items []struct {
|
||||
CountryCode string `db:"country_code"`
|
||||
OsID uint8 `db:"os_id"`
|
||||
BrowserID uint8 `db:"browser_id"`
|
||||
Categories []int16 `db:"categories"`
|
||||
ActionTime time.Time `db:"action_time"`
|
||||
}
|
||||
|
||||
if err := connect.Select(&items, "SELECT country_code, os_id, browser_id, categories, action_time FROM example"); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, item := range items {
|
||||
log.Printf("country: %s, os: %d, browser: %d, categories: %v, action_time: %s", item.CountryCode, item.OsID, item.BrowserID, item.Categories, item.ActionTime)
|
||||
}
|
||||
}
|
||||
```
|
|
@ -1,21 +0,0 @@
|
|||
package clickhouse
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func Array(v interface{}) interface{} {
|
||||
return v
|
||||
}
|
||||
|
||||
func ArrayFixedString(len int, v interface{}) interface{} {
|
||||
return v
|
||||
}
|
||||
|
||||
func ArrayDate(v []time.Time) interface{} {
|
||||
return v
|
||||
}
|
||||
|
||||
func ArrayDateTime(v []time.Time) interface{} {
|
||||
return v
|
||||
}
|
|
@ -1,254 +0,0 @@
|
|||
package clickhouse
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/leakypool"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/data"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/protocol"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultDatabase when connecting to ClickHouse
|
||||
DefaultDatabase = "default"
|
||||
// DefaultUsername when connecting to ClickHouse
|
||||
DefaultUsername = "default"
|
||||
// DefaultConnTimeout when connecting to ClickHouse
|
||||
DefaultConnTimeout = 5 * time.Second
|
||||
// DefaultReadTimeout when reading query results
|
||||
DefaultReadTimeout = time.Minute
|
||||
// DefaultWriteTimeout when sending queries
|
||||
DefaultWriteTimeout = time.Minute
|
||||
)
|
||||
|
||||
var (
|
||||
unixtime int64
|
||||
logOutput io.Writer = os.Stdout
|
||||
hostname, _ = os.Hostname()
|
||||
poolInit sync.Once
|
||||
)
|
||||
|
||||
func init() {
|
||||
sql.Register("clickhouse", &bootstrap{})
|
||||
go func() {
|
||||
for tick := time.Tick(time.Second); ; {
|
||||
select {
|
||||
case <-tick:
|
||||
atomic.AddInt64(&unixtime, int64(time.Second))
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func now() time.Time {
|
||||
return time.Unix(0, atomic.LoadInt64(&unixtime))
|
||||
}
|
||||
|
||||
type bootstrap struct{}
|
||||
|
||||
func (d *bootstrap) Open(dsn string) (driver.Conn, error) {
|
||||
return Open(dsn)
|
||||
}
|
||||
|
||||
// SetLogOutput allows to change output of the default logger
|
||||
func SetLogOutput(output io.Writer) {
|
||||
logOutput = output
|
||||
}
|
||||
|
||||
// Open the connection
|
||||
func Open(dsn string) (driver.Conn, error) {
|
||||
clickhouse, err := open(dsn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return clickhouse, err
|
||||
}
|
||||
|
||||
func open(dsn string) (*clickhouse, error) {
|
||||
url, err := url.Parse(dsn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var (
|
||||
hosts = []string{url.Host}
|
||||
query = url.Query()
|
||||
secure = false
|
||||
skipVerify = false
|
||||
tlsConfigName = query.Get("tls_config")
|
||||
noDelay = true
|
||||
compress = false
|
||||
database = query.Get("database")
|
||||
username = query.Get("username")
|
||||
password = query.Get("password")
|
||||
blockSize = 1000000
|
||||
connTimeout = DefaultConnTimeout
|
||||
readTimeout = DefaultReadTimeout
|
||||
writeTimeout = DefaultWriteTimeout
|
||||
connOpenStrategy = connOpenRandom
|
||||
poolSize = 100
|
||||
)
|
||||
if len(database) == 0 {
|
||||
database = DefaultDatabase
|
||||
}
|
||||
if len(username) == 0 {
|
||||
username = DefaultUsername
|
||||
}
|
||||
if v, err := strconv.ParseBool(query.Get("no_delay")); err == nil {
|
||||
noDelay = v
|
||||
}
|
||||
tlsConfig := getTLSConfigClone(tlsConfigName)
|
||||
if tlsConfigName != "" && tlsConfig == nil {
|
||||
return nil, fmt.Errorf("invalid tls_config - no config registered under name %s", tlsConfigName)
|
||||
}
|
||||
secure = tlsConfig != nil
|
||||
if v, err := strconv.ParseBool(query.Get("secure")); err == nil {
|
||||
secure = v
|
||||
}
|
||||
if v, err := strconv.ParseBool(query.Get("skip_verify")); err == nil {
|
||||
skipVerify = v
|
||||
}
|
||||
if duration, err := strconv.ParseFloat(query.Get("timeout"), 64); err == nil {
|
||||
connTimeout = time.Duration(duration * float64(time.Second))
|
||||
}
|
||||
if duration, err := strconv.ParseFloat(query.Get("read_timeout"), 64); err == nil {
|
||||
readTimeout = time.Duration(duration * float64(time.Second))
|
||||
}
|
||||
if duration, err := strconv.ParseFloat(query.Get("write_timeout"), 64); err == nil {
|
||||
writeTimeout = time.Duration(duration * float64(time.Second))
|
||||
}
|
||||
if size, err := strconv.ParseInt(query.Get("block_size"), 10, 64); err == nil {
|
||||
blockSize = int(size)
|
||||
}
|
||||
if size, err := strconv.ParseInt(query.Get("pool_size"), 10, 64); err == nil {
|
||||
poolSize = int(size)
|
||||
}
|
||||
poolInit.Do(func() {
|
||||
leakypool.InitBytePool(poolSize)
|
||||
})
|
||||
if altHosts := strings.Split(query.Get("alt_hosts"), ","); len(altHosts) != 0 {
|
||||
for _, host := range altHosts {
|
||||
if len(host) != 0 {
|
||||
hosts = append(hosts, host)
|
||||
}
|
||||
}
|
||||
}
|
||||
switch query.Get("connection_open_strategy") {
|
||||
case "random":
|
||||
connOpenStrategy = connOpenRandom
|
||||
case "in_order":
|
||||
connOpenStrategy = connOpenInOrder
|
||||
case "time_random":
|
||||
connOpenStrategy = connOpenTimeRandom
|
||||
}
|
||||
|
||||
settings, err := makeQuerySettings(query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if v, err := strconv.ParseBool(query.Get("compress")); err == nil {
|
||||
compress = v
|
||||
}
|
||||
|
||||
var (
|
||||
ch = clickhouse{
|
||||
logf: func(string, ...interface{}) {},
|
||||
settings: settings,
|
||||
compress: compress,
|
||||
blockSize: blockSize,
|
||||
ServerInfo: data.ServerInfo{
|
||||
Timezone: time.Local,
|
||||
},
|
||||
}
|
||||
logger = log.New(logOutput, "[clickhouse]", 0)
|
||||
)
|
||||
if debug, err := strconv.ParseBool(url.Query().Get("debug")); err == nil && debug {
|
||||
ch.logf = logger.Printf
|
||||
}
|
||||
ch.logf("host(s)=%s, database=%s, username=%s",
|
||||
strings.Join(hosts, ", "),
|
||||
database,
|
||||
username,
|
||||
)
|
||||
options := connOptions{
|
||||
secure: secure,
|
||||
tlsConfig: tlsConfig,
|
||||
skipVerify: skipVerify,
|
||||
hosts: hosts,
|
||||
connTimeout: connTimeout,
|
||||
readTimeout: readTimeout,
|
||||
writeTimeout: writeTimeout,
|
||||
noDelay: noDelay,
|
||||
openStrategy: connOpenStrategy,
|
||||
logf: ch.logf,
|
||||
}
|
||||
if ch.conn, err = dial(options); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logger.SetPrefix(fmt.Sprintf("[clickhouse][connect=%d]", ch.conn.ident))
|
||||
ch.buffer = bufio.NewWriter(ch.conn)
|
||||
|
||||
ch.decoder = binary.NewDecoderWithCompress(ch.conn)
|
||||
ch.encoder = binary.NewEncoderWithCompress(ch.buffer)
|
||||
|
||||
if err := ch.hello(database, username, password); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ch, nil
|
||||
}
|
||||
|
||||
func (ch *clickhouse) hello(database, username, password string) error {
|
||||
ch.logf("[hello] -> %s", ch.ClientInfo)
|
||||
{
|
||||
ch.encoder.Uvarint(protocol.ClientHello)
|
||||
if err := ch.ClientInfo.Write(ch.encoder); err != nil {
|
||||
return err
|
||||
}
|
||||
{
|
||||
ch.encoder.String(database)
|
||||
ch.encoder.String(username)
|
||||
ch.encoder.String(password)
|
||||
}
|
||||
if err := ch.encoder.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
{
|
||||
packet, err := ch.decoder.Uvarint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch packet {
|
||||
case protocol.ServerException:
|
||||
return ch.exception()
|
||||
case protocol.ServerHello:
|
||||
if err := ch.ServerInfo.Read(ch.decoder); err != nil {
|
||||
return err
|
||||
}
|
||||
case protocol.ServerEndOfStream:
|
||||
ch.logf("[bootstrap] <- end of stream")
|
||||
return nil
|
||||
default:
|
||||
ch.conn.Close()
|
||||
return fmt.Errorf("[hello] unexpected packet [%d] from server", packet)
|
||||
}
|
||||
}
|
||||
ch.logf("[hello] <- %s", ch.ServerInfo)
|
||||
return nil
|
||||
}
|
|
@ -1,320 +0,0 @@
|
|||
package clickhouse
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/column"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/data"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/protocol"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/types"
|
||||
)
|
||||
|
||||
type (
|
||||
Date = types.Date
|
||||
DateTime = types.DateTime
|
||||
UUID = types.UUID
|
||||
)
|
||||
|
||||
var (
|
||||
ErrInsertInNotBatchMode = errors.New("insert statement supported only in the batch mode (use begin/commit)")
|
||||
ErrLimitDataRequestInTx = errors.New("data request has already been prepared in transaction")
|
||||
)
|
||||
|
||||
var (
|
||||
splitInsertRe = regexp.MustCompile(`(?i)\sVALUES\s*\(`)
|
||||
)
|
||||
|
||||
type logger func(format string, v ...interface{})
|
||||
|
||||
type clickhouse struct {
|
||||
sync.Mutex
|
||||
data.ServerInfo
|
||||
data.ClientInfo
|
||||
logf logger
|
||||
conn *connect
|
||||
block *data.Block
|
||||
buffer *bufio.Writer
|
||||
decoder *binary.Decoder
|
||||
encoder *binary.Encoder
|
||||
settings *querySettings
|
||||
compress bool
|
||||
blockSize int
|
||||
inTransaction bool
|
||||
}
|
||||
|
||||
func (ch *clickhouse) Prepare(query string) (driver.Stmt, error) {
|
||||
return ch.prepareContext(context.Background(), query)
|
||||
}
|
||||
|
||||
func (ch *clickhouse) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
|
||||
return ch.prepareContext(ctx, query)
|
||||
}
|
||||
|
||||
func (ch *clickhouse) prepareContext(ctx context.Context, query string) (driver.Stmt, error) {
|
||||
ch.logf("[prepare] %s", query)
|
||||
switch {
|
||||
case ch.conn.closed:
|
||||
return nil, driver.ErrBadConn
|
||||
case ch.block != nil:
|
||||
return nil, ErrLimitDataRequestInTx
|
||||
case isInsert(query):
|
||||
if !ch.inTransaction {
|
||||
return nil, ErrInsertInNotBatchMode
|
||||
}
|
||||
return ch.insert(query)
|
||||
}
|
||||
return &stmt{
|
||||
ch: ch,
|
||||
query: query,
|
||||
numInput: numInput(query),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ch *clickhouse) insert(query string) (_ driver.Stmt, err error) {
|
||||
if err := ch.sendQuery(splitInsertRe.Split(query, -1)[0] + " VALUES "); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ch.block, err = ch.readMeta(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &stmt{
|
||||
ch: ch,
|
||||
isInsert: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ch *clickhouse) Begin() (driver.Tx, error) {
|
||||
return ch.beginTx(context.Background(), txOptions{})
|
||||
}
|
||||
|
||||
func (ch *clickhouse) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
|
||||
return ch.beginTx(ctx, txOptions{
|
||||
Isolation: int(opts.Isolation),
|
||||
ReadOnly: opts.ReadOnly,
|
||||
})
|
||||
}
|
||||
|
||||
type txOptions struct {
|
||||
Isolation int
|
||||
ReadOnly bool
|
||||
}
|
||||
|
||||
func (ch *clickhouse) beginTx(ctx context.Context, opts txOptions) (*clickhouse, error) {
|
||||
ch.logf("[begin] tx=%t, data=%t", ch.inTransaction, ch.block != nil)
|
||||
switch {
|
||||
case ch.inTransaction:
|
||||
return nil, sql.ErrTxDone
|
||||
case ch.conn.closed:
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
if finish := ch.watchCancel(ctx); finish != nil {
|
||||
defer finish()
|
||||
}
|
||||
ch.block = nil
|
||||
ch.inTransaction = true
|
||||
return ch, nil
|
||||
}
|
||||
|
||||
func (ch *clickhouse) Commit() error {
|
||||
ch.logf("[commit] tx=%t, data=%t", ch.inTransaction, ch.block != nil)
|
||||
defer func() {
|
||||
if ch.block != nil {
|
||||
ch.block.Reset()
|
||||
ch.block = nil
|
||||
}
|
||||
ch.inTransaction = false
|
||||
}()
|
||||
switch {
|
||||
case !ch.inTransaction:
|
||||
return sql.ErrTxDone
|
||||
case ch.conn.closed:
|
||||
return driver.ErrBadConn
|
||||
}
|
||||
if ch.block != nil {
|
||||
if err := ch.writeBlock(ch.block); err != nil {
|
||||
return err
|
||||
}
|
||||
// Send empty block as marker of end of data.
|
||||
if err := ch.writeBlock(&data.Block{}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ch.encoder.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
return ch.process()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ch *clickhouse) Rollback() error {
|
||||
ch.logf("[rollback] tx=%t, data=%t", ch.inTransaction, ch.block != nil)
|
||||
if !ch.inTransaction {
|
||||
return sql.ErrTxDone
|
||||
}
|
||||
if ch.block != nil {
|
||||
ch.block.Reset()
|
||||
}
|
||||
ch.block = nil
|
||||
ch.buffer = nil
|
||||
ch.inTransaction = false
|
||||
return ch.conn.Close()
|
||||
}
|
||||
|
||||
func (ch *clickhouse) CheckNamedValue(nv *driver.NamedValue) error {
|
||||
switch nv.Value.(type) {
|
||||
case column.IP, column.UUID:
|
||||
return nil
|
||||
case nil, []byte, int8, int16, int32, int64, uint8, uint16, uint32, uint64, float32, float64, string, time.Time:
|
||||
return nil
|
||||
}
|
||||
switch v := nv.Value.(type) {
|
||||
case
|
||||
[]int, []int8, []int16, []int32, []int64,
|
||||
[]uint, []uint8, []uint16, []uint32, []uint64,
|
||||
[]float32, []float64,
|
||||
[]string:
|
||||
return nil
|
||||
case net.IP, *net.IP:
|
||||
return nil
|
||||
case driver.Valuer:
|
||||
value, err := v.Value()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nv.Value = value
|
||||
default:
|
||||
switch value := reflect.ValueOf(nv.Value); value.Kind() {
|
||||
case reflect.Slice:
|
||||
return nil
|
||||
case reflect.Bool:
|
||||
nv.Value = uint8(0)
|
||||
if value.Bool() {
|
||||
nv.Value = uint8(1)
|
||||
}
|
||||
case reflect.Int8:
|
||||
nv.Value = int8(value.Int())
|
||||
case reflect.Int16:
|
||||
nv.Value = int16(value.Int())
|
||||
case reflect.Int32:
|
||||
nv.Value = int32(value.Int())
|
||||
case reflect.Int64:
|
||||
nv.Value = value.Int()
|
||||
case reflect.Uint8:
|
||||
nv.Value = uint8(value.Uint())
|
||||
case reflect.Uint16:
|
||||
nv.Value = uint16(value.Uint())
|
||||
case reflect.Uint32:
|
||||
nv.Value = uint32(value.Uint())
|
||||
case reflect.Uint64:
|
||||
nv.Value = uint64(value.Uint())
|
||||
case reflect.Float32:
|
||||
nv.Value = float32(value.Float())
|
||||
case reflect.Float64:
|
||||
nv.Value = float64(value.Float())
|
||||
case reflect.String:
|
||||
nv.Value = value.String()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ch *clickhouse) Close() error {
|
||||
ch.block = nil
|
||||
return ch.conn.Close()
|
||||
}
|
||||
|
||||
func (ch *clickhouse) process() error {
|
||||
packet, err := ch.decoder.Uvarint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
switch packet {
|
||||
case protocol.ServerPong:
|
||||
ch.logf("[process] <- pong")
|
||||
return nil
|
||||
case protocol.ServerException:
|
||||
ch.logf("[process] <- exception")
|
||||
return ch.exception()
|
||||
case protocol.ServerProgress:
|
||||
progress, err := ch.progress()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ch.logf("[process] <- progress: rows=%d, bytes=%d, total rows=%d",
|
||||
progress.rows,
|
||||
progress.bytes,
|
||||
progress.totalRows,
|
||||
)
|
||||
case protocol.ServerProfileInfo:
|
||||
profileInfo, err := ch.profileInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ch.logf("[process] <- profiling: rows=%d, bytes=%d, blocks=%d", profileInfo.rows, profileInfo.bytes, profileInfo.blocks)
|
||||
case protocol.ServerData:
|
||||
block, err := ch.readBlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ch.logf("[process] <- data: packet=%d, columns=%d, rows=%d", packet, block.NumColumns, block.NumRows)
|
||||
case protocol.ServerEndOfStream:
|
||||
ch.logf("[process] <- end of stream")
|
||||
return nil
|
||||
default:
|
||||
ch.conn.Close()
|
||||
return fmt.Errorf("[process] unexpected packet [%d] from server", packet)
|
||||
}
|
||||
if packet, err = ch.decoder.Uvarint(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ch *clickhouse) cancel() error {
|
||||
ch.logf("[cancel request]")
|
||||
// even if we fail to write the cancel, we still need to close
|
||||
err := ch.encoder.Uvarint(protocol.ClientCancel)
|
||||
if err == nil {
|
||||
err = ch.encoder.Flush()
|
||||
}
|
||||
// return the close error if there was one, otherwise return the write error
|
||||
if cerr := ch.conn.Close(); cerr != nil {
|
||||
return cerr
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (ch *clickhouse) watchCancel(ctx context.Context) func() {
|
||||
if done := ctx.Done(); done != nil {
|
||||
finished := make(chan struct{})
|
||||
go func() {
|
||||
select {
|
||||
case <-done:
|
||||
ch.cancel()
|
||||
finished <- struct{}{}
|
||||
ch.logf("[cancel] <- done")
|
||||
case <-finished:
|
||||
ch.logf("[cancel] <- finished")
|
||||
}
|
||||
}()
|
||||
return func() {
|
||||
select {
|
||||
case <-finished:
|
||||
case finished <- struct{}{}:
|
||||
}
|
||||
}
|
||||
}
|
||||
return func() {}
|
||||
}
|
|
@ -1,47 +0,0 @@
|
|||
package clickhouse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Exception struct {
|
||||
Code int32
|
||||
Name string
|
||||
Message string
|
||||
StackTrace string
|
||||
nested error
|
||||
}
|
||||
|
||||
func (e *Exception) Error() string {
|
||||
return fmt.Sprintf("code: %d, message: %s", e.Code, e.Message)
|
||||
}
|
||||
|
||||
func (ch *clickhouse) exception() error {
|
||||
defer ch.conn.Close()
|
||||
var (
|
||||
e Exception
|
||||
err error
|
||||
hasNested bool
|
||||
)
|
||||
if e.Code, err = ch.decoder.Int32(); err != nil {
|
||||
return err
|
||||
}
|
||||
if e.Name, err = ch.decoder.String(); err != nil {
|
||||
return err
|
||||
}
|
||||
if e.Message, err = ch.decoder.String(); err != nil {
|
||||
return err
|
||||
}
|
||||
e.Message = strings.TrimSpace(strings.TrimPrefix(e.Message, e.Name+":"))
|
||||
if e.StackTrace, err = ch.decoder.String(); err != nil {
|
||||
return err
|
||||
}
|
||||
if hasNested, err = ch.decoder.Bool(); err != nil {
|
||||
return err
|
||||
}
|
||||
if hasNested {
|
||||
e.nested = ch.exception()
|
||||
}
|
||||
return &e
|
||||
}
|
|
@ -1,28 +0,0 @@
|
|||
package clickhouse
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql/driver"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/protocol"
|
||||
)
|
||||
|
||||
func (ch *clickhouse) Ping(ctx context.Context) error {
|
||||
return ch.ping(ctx)
|
||||
}
|
||||
|
||||
func (ch *clickhouse) ping(ctx context.Context) error {
|
||||
if ch.conn.closed {
|
||||
return driver.ErrBadConn
|
||||
}
|
||||
ch.logf("-> ping")
|
||||
finish := ch.watchCancel(ctx)
|
||||
defer finish()
|
||||
if err := ch.encoder.Uvarint(protocol.ClientPing); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ch.encoder.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
return ch.process()
|
||||
}
|
|
@ -1,37 +0,0 @@
|
|||
package clickhouse
|
||||
|
||||
type profileInfo struct {
|
||||
rows uint64
|
||||
bytes uint64
|
||||
blocks uint64
|
||||
appliedLimit bool
|
||||
rowsBeforeLimit uint64
|
||||
calculatedRowsBeforeLimit bool
|
||||
}
|
||||
|
||||
func (ch *clickhouse) profileInfo() (*profileInfo, error) {
|
||||
var (
|
||||
p profileInfo
|
||||
err error
|
||||
)
|
||||
if p.rows, err = ch.decoder.Uvarint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if p.blocks, err = ch.decoder.Uvarint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if p.bytes, err = ch.decoder.Uvarint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if p.appliedLimit, err = ch.decoder.Bool(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if p.rowsBeforeLimit, err = ch.decoder.Uvarint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if p.calculatedRowsBeforeLimit, err = ch.decoder.Bool(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &p, nil
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
package clickhouse
|
||||
|
||||
type progress struct {
|
||||
rows uint64
|
||||
bytes uint64
|
||||
totalRows uint64
|
||||
}
|
||||
|
||||
func (ch *clickhouse) progress() (*progress, error) {
|
||||
var (
|
||||
p progress
|
||||
err error
|
||||
)
|
||||
if p.rows, err = ch.decoder.Uvarint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if p.bytes, err = ch.decoder.Uvarint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if p.totalRows, err = ch.decoder.Uvarint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &p, nil
|
||||
}
|
|
@ -1,19 +0,0 @@
|
|||
package clickhouse
|
||||
|
||||
import (
|
||||
"github.com/ClickHouse/clickhouse-go/lib/data"
|
||||
)
|
||||
|
||||
func (ch *clickhouse) readBlock() (*data.Block, error) {
|
||||
if _, err := ch.decoder.String(); err != nil { // temporary table
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ch.decoder.SelectCompress(ch.compress)
|
||||
var block data.Block
|
||||
if err := block.Read(&ch.ServerInfo, ch.decoder); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ch.decoder.SelectCompress(false)
|
||||
return &block, nil
|
||||
}
|
|
@ -1,53 +0,0 @@
|
|||
package clickhouse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/data"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/protocol"
|
||||
)
|
||||
|
||||
func (ch *clickhouse) readMeta() (*data.Block, error) {
|
||||
for {
|
||||
packet, err := ch.decoder.Uvarint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch packet {
|
||||
case protocol.ServerException:
|
||||
ch.logf("[read meta] <- exception")
|
||||
return nil, ch.exception()
|
||||
case protocol.ServerProgress:
|
||||
progress, err := ch.progress()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ch.logf("[read meta] <- progress: rows=%d, bytes=%d, total rows=%d",
|
||||
progress.rows,
|
||||
progress.bytes,
|
||||
progress.totalRows,
|
||||
)
|
||||
case protocol.ServerProfileInfo:
|
||||
profileInfo, err := ch.profileInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ch.logf("[read meta] <- profiling: rows=%d, bytes=%d, blocks=%d", profileInfo.rows, profileInfo.bytes, profileInfo.blocks)
|
||||
case protocol.ServerData:
|
||||
block, err := ch.readBlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ch.logf("[read meta] <- data: packet=%d, columns=%d, rows=%d", packet, block.NumColumns, block.NumRows)
|
||||
return block, nil
|
||||
case protocol.ServerEndOfStream:
|
||||
_, err := ch.readBlock()
|
||||
ch.logf("[process] <- end of stream")
|
||||
return nil, err
|
||||
default:
|
||||
ch.conn.Close()
|
||||
return nil, fmt.Errorf("[read meta] unexpected packet [%d] from server", packet)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,60 +0,0 @@
|
|||
package clickhouse
|
||||
|
||||
import (
|
||||
"github.com/ClickHouse/clickhouse-go/lib/data"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/protocol"
|
||||
)
|
||||
|
||||
func (ch *clickhouse) sendQuery(query string) error {
|
||||
ch.logf("[send query] %s", query)
|
||||
if err := ch.encoder.Uvarint(protocol.ClientQuery); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ch.encoder.String(""); err != nil {
|
||||
return err
|
||||
}
|
||||
{ // client info
|
||||
ch.encoder.Uvarint(1)
|
||||
ch.encoder.String("")
|
||||
ch.encoder.String("") //initial_query_id
|
||||
ch.encoder.String("[::ffff:127.0.0.1]:0")
|
||||
ch.encoder.Uvarint(1) // iface type TCP
|
||||
ch.encoder.String(hostname)
|
||||
ch.encoder.String(hostname)
|
||||
}
|
||||
if err := ch.ClientInfo.Write(ch.encoder); err != nil {
|
||||
return err
|
||||
}
|
||||
if ch.ServerInfo.Revision >= protocol.DBMS_MIN_REVISION_WITH_QUOTA_KEY_IN_CLIENT_INFO {
|
||||
ch.encoder.String("")
|
||||
}
|
||||
|
||||
// the settings are written as list of contiguous name-value pairs, finished with empty name
|
||||
if !ch.settings.IsEmpty() {
|
||||
ch.logf("[query settings] %s", ch.settings.settingsStr)
|
||||
if err := ch.settings.Serialize(ch.encoder); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// empty string is a marker of the end of the settings
|
||||
if err := ch.encoder.String(""); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ch.encoder.Uvarint(protocol.StateComplete); err != nil {
|
||||
return err
|
||||
}
|
||||
compress := protocol.CompressDisable
|
||||
if ch.compress {
|
||||
compress = protocol.CompressEnable
|
||||
}
|
||||
if err := ch.encoder.Uvarint(compress); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ch.encoder.String(query); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ch.writeBlock(&data.Block{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return ch.encoder.Flush()
|
||||
}
|
|
@ -1,40 +0,0 @@
|
|||
package clickhouse
|
||||
|
||||
import (
|
||||
"github.com/ClickHouse/clickhouse-go/lib/data"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/protocol"
|
||||
)
|
||||
|
||||
func (ch *clickhouse) writeBlock(block *data.Block) error {
|
||||
ch.Lock()
|
||||
defer ch.Unlock()
|
||||
if err := ch.encoder.Uvarint(protocol.ClientData); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ch.encoder.String(""); err != nil { // temporary table
|
||||
return err
|
||||
}
|
||||
|
||||
// implement CityHash v 1.0.2 and add LZ4 compression
|
||||
/*
|
||||
From Alexey Milovidov
|
||||
Насколько я помню, сжимаются блоки с данными Native формата, а всё остальное (всякие номера пакетов и т. п.) передаётся без сжатия.
|
||||
|
||||
Сжатые данные устроены так. Они представляют собой набор сжатых фреймов.
|
||||
Каждый фрейм имеет следующий вид:
|
||||
чексумма (16 байт),
|
||||
идентификатор алгоритма сжатия (1 байт),
|
||||
размер сжатых данных (4 байта, little endian, размер не включает в себя чексумму, но включает в себя остальные 9 байт заголовка),
|
||||
размер несжатых данных (4 байта, little endian), затем сжатые данные.
|
||||
Идентификатор алгоритма: 0x82 - lz4, 0x90 - zstd.
|
||||
Чексумма - CityHash128 из CityHash версии 1.0.2, вычисленный от сжатых данных с учётом 9 байт заголовка.
|
||||
|
||||
См. CompressedReadBufferBase, CompressedWriteBuffer,
|
||||
utils/compressor, TCPHandler.
|
||||
*/
|
||||
ch.encoder.SelectCompress(ch.compress)
|
||||
err := block.Write(&ch.ServerInfo, ch.encoder)
|
||||
ch.encoder.SelectCompress(false)
|
||||
return err
|
||||
}
|
|
@ -1,191 +0,0 @@
|
|||
package clickhouse
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"database/sql/driver"
|
||||
"net"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
var tick int32
|
||||
|
||||
type openStrategy int8
|
||||
|
||||
func (s openStrategy) String() string {
|
||||
switch s {
|
||||
case connOpenInOrder:
|
||||
return "in_order"
|
||||
case connOpenTimeRandom:
|
||||
return "time_random"
|
||||
}
|
||||
return "random"
|
||||
}
|
||||
|
||||
const (
|
||||
connOpenRandom openStrategy = iota + 1
|
||||
connOpenInOrder
|
||||
connOpenTimeRandom
|
||||
)
|
||||
|
||||
type connOptions struct {
|
||||
secure, skipVerify bool
|
||||
tlsConfig *tls.Config
|
||||
hosts []string
|
||||
connTimeout, readTimeout, writeTimeout time.Duration
|
||||
noDelay bool
|
||||
openStrategy openStrategy
|
||||
logf func(string, ...interface{})
|
||||
}
|
||||
|
||||
func dial(options connOptions) (*connect, error) {
|
||||
var (
|
||||
err error
|
||||
abs = func(v int) int {
|
||||
if v < 0 {
|
||||
return -1 * v
|
||||
}
|
||||
return v
|
||||
}
|
||||
conn net.Conn
|
||||
ident = abs(int(atomic.AddInt32(&tick, 1)))
|
||||
)
|
||||
tlsConfig := options.tlsConfig
|
||||
if options.secure {
|
||||
if tlsConfig == nil {
|
||||
tlsConfig = &tls.Config{}
|
||||
}
|
||||
tlsConfig.InsecureSkipVerify = options.skipVerify
|
||||
}
|
||||
checkedHosts := make(map[int]struct{}, len(options.hosts))
|
||||
for i := range options.hosts {
|
||||
var num int
|
||||
switch options.openStrategy {
|
||||
case connOpenInOrder:
|
||||
num = i
|
||||
case connOpenRandom:
|
||||
num = (ident + i) % len(options.hosts)
|
||||
case connOpenTimeRandom:
|
||||
// select host based on milliseconds
|
||||
num = int((time.Now().UnixNano()/1000)%1000) % len(options.hosts)
|
||||
for _, ok := checkedHosts[num]; ok; _, ok = checkedHosts[num] {
|
||||
num = int(time.Now().UnixNano()) % len(options.hosts)
|
||||
}
|
||||
checkedHosts[num] = struct{}{}
|
||||
}
|
||||
switch {
|
||||
case options.secure:
|
||||
conn, err = tls.DialWithDialer(
|
||||
&net.Dialer{
|
||||
Timeout: options.connTimeout,
|
||||
},
|
||||
"tcp",
|
||||
options.hosts[num],
|
||||
tlsConfig,
|
||||
)
|
||||
default:
|
||||
conn, err = net.DialTimeout("tcp", options.hosts[num], options.connTimeout)
|
||||
}
|
||||
if err == nil {
|
||||
options.logf(
|
||||
"[dial] secure=%t, skip_verify=%t, strategy=%s, ident=%d, server=%d -> %s",
|
||||
options.secure,
|
||||
options.skipVerify,
|
||||
options.openStrategy,
|
||||
ident,
|
||||
num,
|
||||
conn.RemoteAddr(),
|
||||
)
|
||||
if tcp, ok := conn.(*net.TCPConn); ok {
|
||||
err = tcp.SetNoDelay(options.noDelay) // Disable or enable the Nagle Algorithm for this tcp socket
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &connect{
|
||||
Conn: conn,
|
||||
logf: options.logf,
|
||||
ident: ident,
|
||||
buffer: bufio.NewReader(conn),
|
||||
readTimeout: options.readTimeout,
|
||||
writeTimeout: options.writeTimeout,
|
||||
}, nil
|
||||
} else {
|
||||
options.logf(
|
||||
"[dial err] secure=%t, skip_verify=%t, strategy=%s, ident=%d, addr=%s\n%#v",
|
||||
options.secure,
|
||||
options.skipVerify,
|
||||
options.openStrategy,
|
||||
ident,
|
||||
options.hosts[num],
|
||||
err,
|
||||
)
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
type connect struct {
|
||||
net.Conn
|
||||
logf func(string, ...interface{})
|
||||
ident int
|
||||
buffer *bufio.Reader
|
||||
closed bool
|
||||
readTimeout time.Duration
|
||||
writeTimeout time.Duration
|
||||
lastReadDeadlineTime time.Time
|
||||
lastWriteDeadlineTime time.Time
|
||||
}
|
||||
|
||||
func (conn *connect) Read(b []byte) (int, error) {
|
||||
var (
|
||||
n int
|
||||
err error
|
||||
total int
|
||||
dstLen = len(b)
|
||||
)
|
||||
if currentTime := now(); conn.readTimeout != 0 && currentTime.Sub(conn.lastReadDeadlineTime) > (conn.readTimeout>>2) {
|
||||
conn.SetReadDeadline(time.Now().Add(conn.readTimeout))
|
||||
conn.lastReadDeadlineTime = currentTime
|
||||
}
|
||||
for total < dstLen {
|
||||
if n, err = conn.buffer.Read(b[total:]); err != nil {
|
||||
conn.logf("[connect] read error: %v", err)
|
||||
conn.Close()
|
||||
return n, driver.ErrBadConn
|
||||
}
|
||||
total += n
|
||||
}
|
||||
return total, nil
|
||||
}
|
||||
|
||||
func (conn *connect) Write(b []byte) (int, error) {
|
||||
var (
|
||||
n int
|
||||
err error
|
||||
total int
|
||||
srcLen = len(b)
|
||||
)
|
||||
if currentTime := now(); conn.writeTimeout != 0 && currentTime.Sub(conn.lastWriteDeadlineTime) > (conn.writeTimeout>>2) {
|
||||
conn.SetWriteDeadline(time.Now().Add(conn.writeTimeout))
|
||||
conn.lastWriteDeadlineTime = currentTime
|
||||
}
|
||||
for total < srcLen {
|
||||
if n, err = conn.Conn.Write(b[total:]); err != nil {
|
||||
conn.logf("[connect] write error: %v", err)
|
||||
conn.Close()
|
||||
return n, driver.ErrBadConn
|
||||
}
|
||||
total += n
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (conn *connect) Close() error {
|
||||
if !conn.closed {
|
||||
conn.closed = true
|
||||
return conn.Conn.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
---
|
||||
version: '3'
|
||||
services:
|
||||
clickhouse:
|
||||
image: yandex/clickhouse-server
|
||||
ports:
|
||||
- 127.0.0.1:8123:8123
|
||||
- 127.0.0.1:9000:9000
|
||||
- 127.0.0.1:9009:9009
|
|
@ -1,11 +0,0 @@
|
|||
module github.com/ClickHouse/clickhouse-go
|
||||
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/bkaradzic/go-lz4 v1.0.0
|
||||
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58
|
||||
github.com/jmoiron/sqlx v1.2.0
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible
|
||||
github.com/stretchr/testify v1.3.0
|
||||
)
|
|
@ -1,18 +0,0 @@
|
|||
github.com/bkaradzic/go-lz4 v1.0.0 h1:RXc4wYsyz985CkXXeX04y4VnZFGG8Rd43pRaHsOXAKk=
|
||||
github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4=
|
||||
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 h1:F1EaeKL/ta07PY/k9Os/UFtwERei2/XzGemhpGnBKNg=
|
||||
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA=
|
||||
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
|
@ -1,12 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
echo "" > coverage.txt
|
||||
|
||||
for d in $(go list ./... | grep -v vendor | grep -v examples); do
|
||||
go test -race -coverprofile=profile.out -covermode=atomic $d
|
||||
if [ -f profile.out ]; then
|
||||
cat profile.out >> coverage.txt
|
||||
rm profile.out
|
||||
fi
|
||||
done
|
|
@ -1,129 +0,0 @@
|
|||
package clickhouse
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func numInput(query string) int {
|
||||
|
||||
var (
|
||||
count int
|
||||
args = make(map[string]struct{})
|
||||
reader = bytes.NewReader([]byte(query))
|
||||
quote, gravis bool
|
||||
keyword bool
|
||||
inBetween bool
|
||||
like = newMatcher("like")
|
||||
limit = newMatcher("limit")
|
||||
between = newMatcher("between")
|
||||
and = newMatcher("and")
|
||||
)
|
||||
for {
|
||||
if char, _, err := reader.ReadRune(); err == nil {
|
||||
switch char {
|
||||
case '\'':
|
||||
if !gravis {
|
||||
quote = !quote
|
||||
}
|
||||
case '`':
|
||||
if !quote {
|
||||
gravis = !gravis
|
||||
}
|
||||
}
|
||||
if quote || gravis {
|
||||
continue
|
||||
}
|
||||
switch {
|
||||
case char == '?' && keyword:
|
||||
count++
|
||||
case char == '@':
|
||||
if param := paramParser(reader); len(param) != 0 {
|
||||
if _, found := args[param]; !found {
|
||||
args[param] = struct{}{}
|
||||
count++
|
||||
}
|
||||
}
|
||||
case
|
||||
char == '=',
|
||||
char == '<',
|
||||
char == '>',
|
||||
char == '(',
|
||||
char == ',',
|
||||
char == '[':
|
||||
keyword = true
|
||||
default:
|
||||
if limit.matchRune(char) || like.matchRune(char) {
|
||||
keyword = true
|
||||
} else if between.matchRune(char) {
|
||||
keyword = true
|
||||
inBetween = true
|
||||
} else if inBetween && and.matchRune(char) {
|
||||
keyword = true
|
||||
inBetween = false
|
||||
} else {
|
||||
keyword = keyword && (char == ' ' || char == '\t' || char == '\n')
|
||||
}
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func paramParser(reader *bytes.Reader) string {
|
||||
var name bytes.Buffer
|
||||
for {
|
||||
if char, _, err := reader.ReadRune(); err == nil {
|
||||
if char == '_' || char >= '0' && char <= '9' || 'a' <= char && char <= 'z' || 'A' <= char && char <= 'Z' {
|
||||
name.WriteRune(char)
|
||||
} else {
|
||||
reader.UnreadRune()
|
||||
break
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return name.String()
|
||||
}
|
||||
|
||||
var selectRe = regexp.MustCompile(`\s+SELECT\s+`)
|
||||
|
||||
func isInsert(query string) bool {
|
||||
if f := strings.Fields(query); len(f) > 2 {
|
||||
return strings.EqualFold("INSERT", f[0]) && strings.EqualFold("INTO", f[1]) && !selectRe.MatchString(strings.ToUpper(query))
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func quote(v driver.Value) string {
|
||||
switch v := reflect.ValueOf(v); v.Kind() {
|
||||
case reflect.Slice:
|
||||
values := make([]string, 0, v.Len())
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
values = append(values, quote(v.Index(i).Interface()))
|
||||
}
|
||||
return strings.Join(values, ", ")
|
||||
}
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
return "'" + strings.NewReplacer(`\`, `\\`, `'`, `\'`).Replace(v) + "'"
|
||||
case time.Time:
|
||||
return formatTime(v)
|
||||
}
|
||||
return fmt.Sprint(v)
|
||||
}
|
||||
|
||||
func formatTime(value time.Time) string {
|
||||
if (value.Hour() + value.Minute() + value.Second() + value.Nanosecond()) == 0 {
|
||||
return fmt.Sprintf("toDate(%d)", int(int16(value.Unix()/24/3600)))
|
||||
}
|
||||
return fmt.Sprintf("toDateTime(%d)", int(uint32(value.Unix())))
|
||||
}
|
|
@ -1,107 +0,0 @@
|
|||
// +build !clz4
|
||||
|
||||
package binary
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/lz4"
|
||||
)
|
||||
|
||||
type compressReader struct {
|
||||
reader io.Reader
|
||||
// data uncompressed
|
||||
data []byte
|
||||
// data position
|
||||
pos int
|
||||
// data compressed
|
||||
zdata []byte
|
||||
// lz4 headers
|
||||
header []byte
|
||||
}
|
||||
|
||||
// NewCompressReader wrap the io.Reader
|
||||
func NewCompressReader(r io.Reader) *compressReader {
|
||||
p := &compressReader{
|
||||
reader: r,
|
||||
header: make([]byte, HeaderSize),
|
||||
}
|
||||
p.data = make([]byte, BlockMaxSize, BlockMaxSize)
|
||||
|
||||
zlen := lz4.CompressBound(BlockMaxSize) + HeaderSize
|
||||
p.zdata = make([]byte, zlen, zlen)
|
||||
|
||||
p.pos = len(p.data)
|
||||
return p
|
||||
}
|
||||
|
||||
func (cr *compressReader) Read(buf []byte) (n int, err error) {
|
||||
var bytesRead = 0
|
||||
n = len(buf)
|
||||
|
||||
if cr.pos < len(cr.data) {
|
||||
copyedSize := copy(buf, cr.data[cr.pos:])
|
||||
|
||||
bytesRead += copyedSize
|
||||
cr.pos += copyedSize
|
||||
}
|
||||
|
||||
for bytesRead < n {
|
||||
if err = cr.readCompressedData(); err != nil {
|
||||
return bytesRead, err
|
||||
}
|
||||
copyedSize := copy(buf[bytesRead:], cr.data)
|
||||
|
||||
bytesRead += copyedSize
|
||||
cr.pos = copyedSize
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (cr *compressReader) readCompressedData() (err error) {
|
||||
cr.pos = 0
|
||||
var n int
|
||||
n, err = cr.reader.Read(cr.header)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if n != len(cr.header) {
|
||||
return fmt.Errorf("Lz4 decompression header EOF")
|
||||
}
|
||||
|
||||
compressedSize := int(binary.LittleEndian.Uint32(cr.header[17:])) - 9
|
||||
decompressedSize := int(binary.LittleEndian.Uint32(cr.header[21:]))
|
||||
|
||||
if compressedSize > cap(cr.zdata) {
|
||||
cr.zdata = make([]byte, compressedSize)
|
||||
}
|
||||
if decompressedSize > cap(cr.data) {
|
||||
cr.data = make([]byte, decompressedSize)
|
||||
}
|
||||
|
||||
cr.zdata = cr.zdata[:compressedSize]
|
||||
cr.data = cr.data[:decompressedSize]
|
||||
|
||||
// @TODO checksum
|
||||
if cr.header[16] == LZ4 {
|
||||
n, err = cr.reader.Read(cr.zdata)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if n != len(cr.zdata) {
|
||||
return fmt.Errorf("Decompress read size not match")
|
||||
}
|
||||
|
||||
_, err = lz4.Decode(cr.data, cr.zdata)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("Unknown compression method: 0x%02x ", cr.header[16])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
107
vendor/github.com/ClickHouse/clickhouse-go/lib/binary/compress_reader_clz4.go
generated
vendored
107
vendor/github.com/ClickHouse/clickhouse-go/lib/binary/compress_reader_clz4.go
generated
vendored
|
@ -1,107 +0,0 @@
|
|||
// +build clz4
|
||||
|
||||
package binary
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
lz4 "github.com/cloudflare/golz4"
|
||||
)
|
||||
|
||||
type compressReader struct {
|
||||
reader io.Reader
|
||||
// data uncompressed
|
||||
data []byte
|
||||
// data position
|
||||
pos int
|
||||
// data compressed
|
||||
zdata []byte
|
||||
// lz4 headers
|
||||
header []byte
|
||||
}
|
||||
|
||||
// NewCompressReader wrap the io.Reader
|
||||
func NewCompressReader(r io.Reader) *compressReader {
|
||||
p := &compressReader{
|
||||
reader: r,
|
||||
header: make([]byte, HeaderSize),
|
||||
}
|
||||
p.data = make([]byte, BlockMaxSize, BlockMaxSize)
|
||||
|
||||
zlen := lz4.CompressBound(p.data) + HeaderSize
|
||||
p.zdata = make([]byte, zlen, zlen)
|
||||
|
||||
p.pos = len(p.data)
|
||||
return p
|
||||
}
|
||||
|
||||
func (cr *compressReader) Read(buf []byte) (n int, err error) {
|
||||
var bytesRead = 0
|
||||
n = len(buf)
|
||||
|
||||
if cr.pos < len(cr.data) {
|
||||
copyedSize := copy(buf, cr.data[cr.pos:])
|
||||
|
||||
bytesRead += copyedSize
|
||||
cr.pos += copyedSize
|
||||
}
|
||||
|
||||
for bytesRead < n {
|
||||
if err = cr.readCompressedData(); err != nil {
|
||||
return bytesRead, err
|
||||
}
|
||||
copyedSize := copy(buf[bytesRead:], cr.data)
|
||||
|
||||
bytesRead += copyedSize
|
||||
cr.pos = copyedSize
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (cr *compressReader) readCompressedData() (err error) {
|
||||
cr.pos = 0
|
||||
var n int
|
||||
n, err = cr.reader.Read(cr.header)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if n != len(cr.header) {
|
||||
return fmt.Errorf("Lz4 decompression header EOF")
|
||||
}
|
||||
|
||||
compressedSize := int(binary.LittleEndian.Uint32(cr.header[17:])) - 9
|
||||
decompressedSize := int(binary.LittleEndian.Uint32(cr.header[21:]))
|
||||
|
||||
if compressedSize > cap(cr.zdata) {
|
||||
cr.zdata = make([]byte, compressedSize)
|
||||
}
|
||||
if decompressedSize > cap(cr.data) {
|
||||
cr.data = make([]byte, decompressedSize)
|
||||
}
|
||||
|
||||
cr.zdata = cr.zdata[:compressedSize]
|
||||
cr.data = cr.data[:decompressedSize]
|
||||
|
||||
// @TODO checksum
|
||||
if cr.header[16] == LZ4 {
|
||||
n, err = cr.reader.Read(cr.zdata)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if n != len(cr.zdata) {
|
||||
return fmt.Errorf("Decompress read size not match")
|
||||
}
|
||||
|
||||
err = lz4.Uncompress(cr.zdata, cr.data)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("Unknown compression method: 0x%02x ", cr.header[16])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,21 +0,0 @@
|
|||
package binary
|
||||
|
||||
type CompressionMethodByte byte
|
||||
|
||||
const (
|
||||
NONE CompressionMethodByte = 0x02
|
||||
LZ4 = 0x82
|
||||
ZSTD = 0x90
|
||||
)
|
||||
|
||||
const (
|
||||
// ChecksumSize is 128bits for cityhash102 checksum
|
||||
ChecksumSize = 16
|
||||
// CompressHeader magic + compressed_size + uncompressed_size
|
||||
CompressHeaderSize = 1 + 4 + 4
|
||||
|
||||
// HeaderSize
|
||||
HeaderSize = ChecksumSize + CompressHeaderSize
|
||||
// BlockMaxSize 1MB
|
||||
BlockMaxSize = 1 << 20
|
||||
)
|
|
@ -1,79 +0,0 @@
|
|||
// +build !clz4
|
||||
|
||||
package binary
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/cityhash102"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/lz4"
|
||||
)
|
||||
|
||||
type compressWriter struct {
|
||||
writer io.Writer
|
||||
// data uncompressed
|
||||
data []byte
|
||||
// data position
|
||||
pos int
|
||||
// data compressed
|
||||
zdata []byte
|
||||
}
|
||||
|
||||
// NewCompressWriter wrap the io.Writer
|
||||
func NewCompressWriter(w io.Writer) *compressWriter {
|
||||
p := &compressWriter{writer: w}
|
||||
p.data = make([]byte, BlockMaxSize, BlockMaxSize)
|
||||
|
||||
zlen := lz4.CompressBound(BlockMaxSize) + HeaderSize
|
||||
p.zdata = make([]byte, zlen, zlen)
|
||||
return p
|
||||
}
|
||||
|
||||
func (cw *compressWriter) Write(buf []byte) (int, error) {
|
||||
var n int
|
||||
for len(buf) > 0 {
|
||||
// Accumulate the data to be compressed.
|
||||
m := copy(cw.data[cw.pos:], buf)
|
||||
cw.pos += m
|
||||
buf = buf[m:]
|
||||
|
||||
if cw.pos == len(cw.data) {
|
||||
err := cw.Flush()
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
n += m
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (cw *compressWriter) Flush() (err error) {
|
||||
if cw.pos == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// write the headers
|
||||
compressedSize, err := lz4.Encode(cw.zdata[HeaderSize:], cw.data[:cw.pos])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
compressedSize += CompressHeaderSize
|
||||
// fill the header, compressed_size_32 + uncompressed_size_32
|
||||
cw.zdata[16] = LZ4
|
||||
binary.LittleEndian.PutUint32(cw.zdata[17:], uint32(compressedSize))
|
||||
binary.LittleEndian.PutUint32(cw.zdata[21:], uint32(cw.pos))
|
||||
|
||||
// fill the checksum
|
||||
checkSum := cityhash102.CityHash128(cw.zdata[16:], uint32(compressedSize))
|
||||
binary.LittleEndian.PutUint64(cw.zdata[0:], checkSum.Lower64())
|
||||
binary.LittleEndian.PutUint64(cw.zdata[8:], checkSum.Higher64())
|
||||
|
||||
cw.writer.Write(cw.zdata[:compressedSize+ChecksumSize])
|
||||
if w, ok := cw.writer.(WriteFlusher); ok {
|
||||
err = w.Flush()
|
||||
}
|
||||
cw.pos = 0
|
||||
return
|
||||
}
|
|
@ -1,78 +0,0 @@
|
|||
// +build clz4
|
||||
|
||||
package binary
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
|
||||
lz4 "github.com/cloudflare/golz4"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/cityhash102"
|
||||
)
|
||||
|
||||
type compressWriter struct {
|
||||
writer io.Writer
|
||||
// data uncompressed
|
||||
data []byte
|
||||
// data position
|
||||
pos int
|
||||
// data compressed
|
||||
zdata []byte
|
||||
}
|
||||
|
||||
// NewCompressWriter wrap the io.Writer
|
||||
func NewCompressWriter(w io.Writer) *compressWriter {
|
||||
p := &compressWriter{writer: w}
|
||||
p.data = make([]byte, BlockMaxSize, BlockMaxSize)
|
||||
|
||||
zlen := lz4.CompressBound(p.data) + HeaderSize
|
||||
p.zdata = make([]byte, zlen, zlen)
|
||||
return p
|
||||
}
|
||||
|
||||
func (cw *compressWriter) Write(buf []byte) (int, error) {
|
||||
var n int
|
||||
for len(buf) > 0 {
|
||||
// Accumulate the data to be compressed.
|
||||
m := copy(cw.data[cw.pos:], buf)
|
||||
cw.pos += m
|
||||
buf = buf[m:]
|
||||
|
||||
if cw.pos == len(cw.data) {
|
||||
err := cw.Flush()
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
n += m
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (cw *compressWriter) Flush() (err error) {
|
||||
if cw.pos == 0 {
|
||||
return
|
||||
}
|
||||
// write the headers
|
||||
compressedSize, err := lz4.Compress(cw.data[:cw.pos], cw.zdata[HeaderSize:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
compressedSize += CompressHeaderSize
|
||||
// fill the header, compressed_size_32 + uncompressed_size_32
|
||||
cw.zdata[16] = LZ4
|
||||
binary.LittleEndian.PutUint32(cw.zdata[17:], uint32(compressedSize))
|
||||
binary.LittleEndian.PutUint32(cw.zdata[21:], uint32(cw.pos))
|
||||
|
||||
// fill the checksum
|
||||
checkSum := cityhash102.CityHash128(cw.zdata[16:], uint32(compressedSize))
|
||||
binary.LittleEndian.PutUint64(cw.zdata[0:], checkSum.Lower64())
|
||||
binary.LittleEndian.PutUint64(cw.zdata[8:], checkSum.Higher64())
|
||||
|
||||
cw.writer.Write(cw.zdata[:compressedSize+ChecksumSize])
|
||||
if w, ok := cw.writer.(WriteFlusher); ok {
|
||||
err = w.Flush()
|
||||
}
|
||||
cw.pos = 0
|
||||
return
|
||||
}
|
|
@ -1,171 +0,0 @@
|
|||
package binary
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"math"
|
||||
)
|
||||
|
||||
func NewDecoder(input io.Reader) *Decoder {
|
||||
return &Decoder{
|
||||
input: input,
|
||||
}
|
||||
}
|
||||
|
||||
func NewDecoderWithCompress(input io.Reader) *Decoder {
|
||||
return &Decoder{
|
||||
input: input,
|
||||
compressInput: NewCompressReader(input),
|
||||
}
|
||||
}
|
||||
|
||||
type Decoder struct {
|
||||
compress bool
|
||||
input io.Reader
|
||||
compressInput io.Reader
|
||||
scratch [binary.MaxVarintLen64]byte
|
||||
}
|
||||
|
||||
func (decoder *Decoder) SelectCompress(compress bool) {
|
||||
decoder.compress = compress
|
||||
}
|
||||
|
||||
func (decoder *Decoder) Get() io.Reader {
|
||||
if decoder.compress && decoder.compressInput != nil {
|
||||
return decoder.compressInput
|
||||
}
|
||||
return decoder.input
|
||||
}
|
||||
|
||||
func (decoder *Decoder) Bool() (bool, error) {
|
||||
v, err := decoder.ReadByte()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return v == 1, nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) Uvarint() (uint64, error) {
|
||||
return binary.ReadUvarint(decoder)
|
||||
}
|
||||
|
||||
func (decoder *Decoder) Int8() (int8, error) {
|
||||
v, err := decoder.ReadByte()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int8(v), nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) Int16() (int16, error) {
|
||||
v, err := decoder.UInt16()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int16(v), nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) Int32() (int32, error) {
|
||||
v, err := decoder.UInt32()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int32(v), nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) Int64() (int64, error) {
|
||||
v, err := decoder.UInt64()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int64(v), nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) UInt8() (uint8, error) {
|
||||
v, err := decoder.ReadByte()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint8(v), nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) UInt16() (uint16, error) {
|
||||
if _, err := decoder.Get().Read(decoder.scratch[:2]); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint16(decoder.scratch[0]) | uint16(decoder.scratch[1])<<8, nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) UInt32() (uint32, error) {
|
||||
if _, err := decoder.Get().Read(decoder.scratch[:4]); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint32(decoder.scratch[0]) |
|
||||
uint32(decoder.scratch[1])<<8 |
|
||||
uint32(decoder.scratch[2])<<16 |
|
||||
uint32(decoder.scratch[3])<<24, nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) UInt64() (uint64, error) {
|
||||
if _, err := decoder.Get().Read(decoder.scratch[:8]); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint64(decoder.scratch[0]) |
|
||||
uint64(decoder.scratch[1])<<8 |
|
||||
uint64(decoder.scratch[2])<<16 |
|
||||
uint64(decoder.scratch[3])<<24 |
|
||||
uint64(decoder.scratch[4])<<32 |
|
||||
uint64(decoder.scratch[5])<<40 |
|
||||
uint64(decoder.scratch[6])<<48 |
|
||||
uint64(decoder.scratch[7])<<56, nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) Float32() (float32, error) {
|
||||
v, err := decoder.UInt32()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return math.Float32frombits(v), nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) Float64() (float64, error) {
|
||||
v, err := decoder.UInt64()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return math.Float64frombits(v), nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) Fixed(ln int) ([]byte, error) {
|
||||
if reader, ok := decoder.Get().(FixedReader); ok {
|
||||
return reader.Fixed(ln)
|
||||
}
|
||||
buf := make([]byte, ln)
|
||||
if _, err := decoder.Get().Read(buf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) String() (string, error) {
|
||||
strlen, err := decoder.Uvarint()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
str, err := decoder.Fixed(int(strlen))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(str), nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) ReadByte() (byte, error) {
|
||||
if _, err := decoder.Get().Read(decoder.scratch[:1]); err != nil {
|
||||
return 0x0, err
|
||||
}
|
||||
return decoder.scratch[0], nil
|
||||
}
|
||||
|
||||
type FixedReader interface {
|
||||
Fixed(ln int) ([]byte, error)
|
||||
}
|
|
@ -1,171 +0,0 @@
|
|||
package binary
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"math"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{
|
||||
output: w,
|
||||
}
|
||||
}
|
||||
|
||||
func NewEncoderWithCompress(w io.Writer) *Encoder {
|
||||
return &Encoder{
|
||||
output: w,
|
||||
compressOutput: NewCompressWriter(w),
|
||||
}
|
||||
}
|
||||
|
||||
type Encoder struct {
|
||||
compress bool
|
||||
output io.Writer
|
||||
compressOutput io.Writer
|
||||
scratch [binary.MaxVarintLen64]byte
|
||||
}
|
||||
|
||||
func (enc *Encoder) SelectCompress(compress bool) {
|
||||
if enc.compressOutput == nil {
|
||||
return
|
||||
}
|
||||
if enc.compress && !compress {
|
||||
enc.Flush()
|
||||
}
|
||||
enc.compress = compress
|
||||
}
|
||||
|
||||
func (enc *Encoder) Get() io.Writer {
|
||||
if enc.compress && enc.compressOutput != nil {
|
||||
return enc.compressOutput
|
||||
}
|
||||
return enc.output
|
||||
}
|
||||
|
||||
func (enc *Encoder) Uvarint(v uint64) error {
|
||||
ln := binary.PutUvarint(enc.scratch[:binary.MaxVarintLen64], v)
|
||||
if _, err := enc.Get().Write(enc.scratch[0:ln]); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (enc *Encoder) Bool(v bool) error {
|
||||
if v {
|
||||
return enc.UInt8(1)
|
||||
}
|
||||
return enc.UInt8(0)
|
||||
}
|
||||
|
||||
func (enc *Encoder) Int8(v int8) error {
|
||||
return enc.UInt8(uint8(v))
|
||||
}
|
||||
|
||||
func (enc *Encoder) Int16(v int16) error {
|
||||
return enc.UInt16(uint16(v))
|
||||
}
|
||||
|
||||
func (enc *Encoder) Int32(v int32) error {
|
||||
return enc.UInt32(uint32(v))
|
||||
}
|
||||
|
||||
func (enc *Encoder) Int64(v int64) error {
|
||||
return enc.UInt64(uint64(v))
|
||||
}
|
||||
|
||||
func (enc *Encoder) UInt8(v uint8) error {
|
||||
enc.scratch[0] = v
|
||||
if _, err := enc.Get().Write(enc.scratch[:1]); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (enc *Encoder) UInt16(v uint16) error {
|
||||
enc.scratch[0] = byte(v)
|
||||
enc.scratch[1] = byte(v >> 8)
|
||||
if _, err := enc.Get().Write(enc.scratch[:2]); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (enc *Encoder) UInt32(v uint32) error {
|
||||
enc.scratch[0] = byte(v)
|
||||
enc.scratch[1] = byte(v >> 8)
|
||||
enc.scratch[2] = byte(v >> 16)
|
||||
enc.scratch[3] = byte(v >> 24)
|
||||
if _, err := enc.Get().Write(enc.scratch[:4]); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (enc *Encoder) UInt64(v uint64) error {
|
||||
enc.scratch[0] = byte(v)
|
||||
enc.scratch[1] = byte(v >> 8)
|
||||
enc.scratch[2] = byte(v >> 16)
|
||||
enc.scratch[3] = byte(v >> 24)
|
||||
enc.scratch[4] = byte(v >> 32)
|
||||
enc.scratch[5] = byte(v >> 40)
|
||||
enc.scratch[6] = byte(v >> 48)
|
||||
enc.scratch[7] = byte(v >> 56)
|
||||
if _, err := enc.Get().Write(enc.scratch[:8]); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (enc *Encoder) Float32(v float32) error {
|
||||
return enc.UInt32(math.Float32bits(v))
|
||||
}
|
||||
|
||||
func (enc *Encoder) Float64(v float64) error {
|
||||
return enc.UInt64(math.Float64bits(v))
|
||||
}
|
||||
|
||||
func (enc *Encoder) String(v string) error {
|
||||
str := Str2Bytes(v)
|
||||
if err := enc.Uvarint(uint64(len(str))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := enc.Get().Write(str); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (enc *Encoder) RawString(str []byte) error {
|
||||
if err := enc.Uvarint(uint64(len(str))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := enc.Get().Write(str); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (enc *Encoder) Write(b []byte) (int, error) {
|
||||
return enc.Get().Write(b)
|
||||
}
|
||||
|
||||
func (enc *Encoder) Flush() error {
|
||||
if w, ok := enc.Get().(WriteFlusher); ok {
|
||||
return w.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type WriteFlusher interface {
|
||||
Flush() error
|
||||
}
|
||||
|
||||
func Str2Bytes(str string) []byte {
|
||||
header := (*reflect.SliceHeader)(unsafe.Pointer(&str))
|
||||
header.Len = len(str)
|
||||
header.Cap = header.Len
|
||||
return *(*[]byte)(unsafe.Pointer(header))
|
||||
}
|
|
@ -1,45 +0,0 @@
|
|||
package cityhash102
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"hash"
|
||||
)
|
||||
|
||||
type City64 struct {
|
||||
s []byte
|
||||
}
|
||||
|
||||
var _ hash.Hash64 = (*City64)(nil)
|
||||
var _ hash.Hash = (*City64)(nil)
|
||||
|
||||
func New64() hash.Hash64 {
|
||||
return &City64{}
|
||||
}
|
||||
|
||||
func (this *City64) Sum(b []byte) []byte {
|
||||
b2 := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(b2, this.Sum64())
|
||||
b = append(b, b2...)
|
||||
return b
|
||||
}
|
||||
|
||||
func (this *City64) Sum64() uint64 {
|
||||
return CityHash64(this.s, uint32(len(this.s)))
|
||||
}
|
||||
|
||||
func (this *City64) Reset() {
|
||||
this.s = this.s[0:0]
|
||||
}
|
||||
|
||||
func (this *City64) BlockSize() int {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (this *City64) Write(s []byte) (n int, err error) {
|
||||
this.s = append(this.s, s...)
|
||||
return len(s), nil
|
||||
}
|
||||
|
||||
func (this *City64) Size() int {
|
||||
return 8
|
||||
}
|
|
@ -1,383 +0,0 @@
|
|||
/*
|
||||
* Go implementation of Google city hash (MIT license)
|
||||
* https://code.google.com/p/cityhash/
|
||||
*
|
||||
* MIT License http://www.opensource.org/licenses/mit-license.php
|
||||
*
|
||||
* I don't even want to pretend to understand the details of city hash.
|
||||
* I am only reproducing the logic in Go as faithfully as I can.
|
||||
*
|
||||
*/
|
||||
|
||||
package cityhash102
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
const (
|
||||
k0 uint64 = 0xc3a5c85c97cb3127
|
||||
k1 uint64 = 0xb492b66fbe98f273
|
||||
k2 uint64 = 0x9ae16a3b2f90404f
|
||||
k3 uint64 = 0xc949d7c7509e6557
|
||||
|
||||
kMul uint64 = 0x9ddfea08eb382d69
|
||||
)
|
||||
|
||||
func fetch64(p []byte) uint64 {
|
||||
return binary.LittleEndian.Uint64(p)
|
||||
//return uint64InExpectedOrder(unalignedLoad64(p))
|
||||
}
|
||||
|
||||
func fetch32(p []byte) uint32 {
|
||||
return binary.LittleEndian.Uint32(p)
|
||||
//return uint32InExpectedOrder(unalignedLoad32(p))
|
||||
}
|
||||
|
||||
func rotate64(val uint64, shift uint32) uint64 {
|
||||
if shift != 0 {
|
||||
return ((val >> shift) | (val << (64 - shift)))
|
||||
}
|
||||
|
||||
return val
|
||||
}
|
||||
|
||||
func rotate32(val uint32, shift uint32) uint32 {
|
||||
if shift != 0 {
|
||||
return ((val >> shift) | (val << (32 - shift)))
|
||||
}
|
||||
|
||||
return val
|
||||
}
|
||||
|
||||
func swap64(a, b *uint64) {
|
||||
*a, *b = *b, *a
|
||||
}
|
||||
|
||||
func swap32(a, b *uint32) {
|
||||
*a, *b = *b, *a
|
||||
}
|
||||
|
||||
func permute3(a, b, c *uint32) {
|
||||
swap32(a, b)
|
||||
swap32(a, c)
|
||||
}
|
||||
|
||||
func rotate64ByAtLeast1(val uint64, shift uint32) uint64 {
|
||||
return (val >> shift) | (val << (64 - shift))
|
||||
}
|
||||
|
||||
func shiftMix(val uint64) uint64 {
|
||||
return val ^ (val >> 47)
|
||||
}
|
||||
|
||||
type Uint128 [2]uint64
|
||||
|
||||
func (this *Uint128) setLower64(l uint64) {
|
||||
this[0] = l
|
||||
}
|
||||
|
||||
func (this *Uint128) setHigher64(h uint64) {
|
||||
this[1] = h
|
||||
}
|
||||
|
||||
func (this Uint128) Lower64() uint64 {
|
||||
return this[0]
|
||||
}
|
||||
|
||||
func (this Uint128) Higher64() uint64 {
|
||||
return this[1]
|
||||
}
|
||||
|
||||
func (this Uint128) Bytes() []byte {
|
||||
b := make([]byte, 16)
|
||||
binary.LittleEndian.PutUint64(b, this[0])
|
||||
binary.LittleEndian.PutUint64(b[8:], this[1])
|
||||
return b
|
||||
}
|
||||
|
||||
func hash128to64(x Uint128) uint64 {
|
||||
// Murmur-inspired hashing.
|
||||
var a = (x.Lower64() ^ x.Higher64()) * kMul
|
||||
a ^= (a >> 47)
|
||||
var b = (x.Higher64() ^ a) * kMul
|
||||
b ^= (b >> 47)
|
||||
b *= kMul
|
||||
return b
|
||||
}
|
||||
|
||||
func hashLen16(u, v uint64) uint64 {
|
||||
return hash128to64(Uint128{u, v})
|
||||
}
|
||||
|
||||
func hashLen16_3(u, v, mul uint64) uint64 {
|
||||
// Murmur-inspired hashing.
|
||||
var a = (u ^ v) * mul
|
||||
a ^= (a >> 47)
|
||||
var b = (v ^ a) * mul
|
||||
b ^= (b >> 47)
|
||||
b *= mul
|
||||
return b
|
||||
}
|
||||
|
||||
func hashLen0to16(s []byte, length uint32) uint64 {
|
||||
if length > 8 {
|
||||
var a = fetch64(s)
|
||||
var b = fetch64(s[length-8:])
|
||||
|
||||
return hashLen16(a, rotate64ByAtLeast1(b+uint64(length), length)) ^ b
|
||||
}
|
||||
|
||||
if length >= 4 {
|
||||
var a = fetch32(s)
|
||||
return hashLen16(uint64(length)+(uint64(a)<<3), uint64(fetch32(s[length-4:])))
|
||||
}
|
||||
|
||||
if length > 0 {
|
||||
var a uint8 = uint8(s[0])
|
||||
var b uint8 = uint8(s[length>>1])
|
||||
var c uint8 = uint8(s[length-1])
|
||||
|
||||
var y uint32 = uint32(a) + (uint32(b) << 8)
|
||||
var z uint32 = length + (uint32(c) << 2)
|
||||
|
||||
return shiftMix(uint64(y)*k2^uint64(z)*k3) * k2
|
||||
}
|
||||
|
||||
return k2
|
||||
}
|
||||
|
||||
// This probably works well for 16-byte strings as well, but it may be overkill
|
||||
func hashLen17to32(s []byte, length uint32) uint64 {
|
||||
var a = fetch64(s) * k1
|
||||
var b = fetch64(s[8:])
|
||||
var c = fetch64(s[length-8:]) * k2
|
||||
var d = fetch64(s[length-16:]) * k0
|
||||
|
||||
return hashLen16(rotate64(a-b, 43)+rotate64(c, 30)+d,
|
||||
a+rotate64(b^k3, 20)-c+uint64(length))
|
||||
}
|
||||
|
||||
func weakHashLen32WithSeeds(w, x, y, z, a, b uint64) Uint128 {
|
||||
a += w
|
||||
b = rotate64(b+a+z, 21)
|
||||
var c uint64 = a
|
||||
a += x
|
||||
a += y
|
||||
b += rotate64(a, 44)
|
||||
return Uint128{a + z, b + c}
|
||||
}
|
||||
|
||||
func weakHashLen32WithSeeds_3(s []byte, a, b uint64) Uint128 {
|
||||
return weakHashLen32WithSeeds(fetch64(s), fetch64(s[8:]), fetch64(s[16:]), fetch64(s[24:]), a, b)
|
||||
}
|
||||
|
||||
func hashLen33to64(s []byte, length uint32) uint64 {
|
||||
var z uint64 = fetch64(s[24:])
|
||||
var a uint64 = fetch64(s) + (uint64(length)+fetch64(s[length-16:]))*k0
|
||||
var b uint64 = rotate64(a+z, 52)
|
||||
var c uint64 = rotate64(a, 37)
|
||||
|
||||
a += fetch64(s[8:])
|
||||
c += rotate64(a, 7)
|
||||
a += fetch64(s[16:])
|
||||
|
||||
var vf uint64 = a + z
|
||||
var vs = b + rotate64(a, 31) + c
|
||||
|
||||
a = fetch64(s[16:]) + fetch64(s[length-32:])
|
||||
z = fetch64(s[length-8:])
|
||||
b = rotate64(a+z, 52)
|
||||
c = rotate64(a, 37)
|
||||
a += fetch64(s[length-24:])
|
||||
c += rotate64(a, 7)
|
||||
a += fetch64(s[length-16:])
|
||||
|
||||
wf := a + z
|
||||
ws := b + rotate64(a, 31) + c
|
||||
r := shiftMix((vf+ws)*k2 + (wf+vs)*k0)
|
||||
return shiftMix(r*k0+vs) * k2
|
||||
}
|
||||
|
||||
func CityHash64(s []byte, length uint32) uint64 {
|
||||
if length <= 32 {
|
||||
if length <= 16 {
|
||||
return hashLen0to16(s, length)
|
||||
} else {
|
||||
return hashLen17to32(s, length)
|
||||
}
|
||||
} else if length <= 64 {
|
||||
return hashLen33to64(s, length)
|
||||
}
|
||||
|
||||
var x uint64 = fetch64(s)
|
||||
var y uint64 = fetch64(s[length-16:]) ^ k1
|
||||
var z uint64 = fetch64(s[length-56:]) ^ k0
|
||||
|
||||
var v Uint128 = weakHashLen32WithSeeds_3(s[length-64:], uint64(length), y)
|
||||
var w Uint128 = weakHashLen32WithSeeds_3(s[length-32:], uint64(length)*k1, k0)
|
||||
|
||||
z += shiftMix(v.Higher64()) * k1
|
||||
x = rotate64(z+x, 39) * k1
|
||||
y = rotate64(y, 33) * k1
|
||||
|
||||
length = (length - 1) & ^uint32(63)
|
||||
for {
|
||||
x = rotate64(x+y+v.Lower64()+fetch64(s[16:]), 37) * k1
|
||||
y = rotate64(y+v.Higher64()+fetch64(s[48:]), 42) * k1
|
||||
|
||||
x ^= w.Higher64()
|
||||
y ^= v.Lower64()
|
||||
|
||||
z = rotate64(z^w.Lower64(), 33)
|
||||
v = weakHashLen32WithSeeds_3(s, v.Higher64()*k1, x+w.Lower64())
|
||||
w = weakHashLen32WithSeeds_3(s[32:], z+w.Higher64(), y)
|
||||
|
||||
swap64(&z, &x)
|
||||
s = s[64:]
|
||||
length -= 64
|
||||
|
||||
if length == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return hashLen16(hashLen16(v.Lower64(), w.Lower64())+shiftMix(y)*k1+z, hashLen16(v.Higher64(), w.Higher64())+x)
|
||||
}
|
||||
|
||||
func CityHash64WithSeed(s []byte, length uint32, seed uint64) uint64 {
|
||||
return CityHash64WithSeeds(s, length, k2, seed)
|
||||
}
|
||||
|
||||
func CityHash64WithSeeds(s []byte, length uint32, seed0, seed1 uint64) uint64 {
|
||||
return hashLen16(CityHash64(s, length)-seed0, seed1)
|
||||
}
|
||||
|
||||
func cityMurmur(s []byte, length uint32, seed Uint128) Uint128 {
|
||||
var a uint64 = seed.Lower64()
|
||||
var b uint64 = seed.Higher64()
|
||||
var c uint64 = 0
|
||||
var d uint64 = 0
|
||||
var l int32 = int32(length) - 16
|
||||
|
||||
if l <= 0 { // len <= 16
|
||||
a = shiftMix(a*k1) * k1
|
||||
c = b*k1 + hashLen0to16(s, length)
|
||||
|
||||
if length >= 8 {
|
||||
d = shiftMix(a + fetch64(s))
|
||||
} else {
|
||||
d = shiftMix(a + c)
|
||||
}
|
||||
|
||||
} else { // len > 16
|
||||
c = hashLen16(fetch64(s[length-8:])+k1, a)
|
||||
d = hashLen16(b+uint64(length), c+fetch64(s[length-16:]))
|
||||
a += d
|
||||
|
||||
for {
|
||||
a ^= shiftMix(fetch64(s)*k1) * k1
|
||||
a *= k1
|
||||
b ^= a
|
||||
c ^= shiftMix(fetch64(s[8:])*k1) * k1
|
||||
c *= k1
|
||||
d ^= c
|
||||
s = s[16:]
|
||||
l -= 16
|
||||
|
||||
if l <= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
a = hashLen16(a, c)
|
||||
b = hashLen16(d, b)
|
||||
return Uint128{a ^ b, hashLen16(b, a)}
|
||||
}
|
||||
|
||||
func CityHash128WithSeed(s []byte, length uint32, seed Uint128) Uint128 {
|
||||
if length < 128 {
|
||||
return cityMurmur(s, length, seed)
|
||||
}
|
||||
|
||||
// We expect length >= 128 to be the common case. Keep 56 bytes of state:
|
||||
// v, w, x, y, and z.
|
||||
var v, w Uint128
|
||||
var x uint64 = seed.Lower64()
|
||||
var y uint64 = seed.Higher64()
|
||||
var z uint64 = uint64(length) * k1
|
||||
|
||||
var pos uint32
|
||||
var t = s
|
||||
|
||||
v.setLower64(rotate64(y^k1, 49)*k1 + fetch64(s))
|
||||
v.setHigher64(rotate64(v.Lower64(), 42)*k1 + fetch64(s[8:]))
|
||||
w.setLower64(rotate64(y+z, 35)*k1 + x)
|
||||
w.setHigher64(rotate64(x+fetch64(s[88:]), 53) * k1)
|
||||
|
||||
// This is the same inner loop as CityHash64(), manually unrolled.
|
||||
for {
|
||||
x = rotate64(x+y+v.Lower64()+fetch64(s[16:]), 37) * k1
|
||||
y = rotate64(y+v.Higher64()+fetch64(s[48:]), 42) * k1
|
||||
|
||||
x ^= w.Higher64()
|
||||
y ^= v.Lower64()
|
||||
z = rotate64(z^w.Lower64(), 33)
|
||||
v = weakHashLen32WithSeeds_3(s, v.Higher64()*k1, x+w.Lower64())
|
||||
w = weakHashLen32WithSeeds_3(s[32:], z+w.Higher64(), y)
|
||||
swap64(&z, &x)
|
||||
s = s[64:]
|
||||
pos += 64
|
||||
|
||||
x = rotate64(x+y+v.Lower64()+fetch64(s[16:]), 37) * k1
|
||||
y = rotate64(y+v.Higher64()+fetch64(s[48:]), 42) * k1
|
||||
x ^= w.Higher64()
|
||||
y ^= v.Lower64()
|
||||
z = rotate64(z^w.Lower64(), 33)
|
||||
v = weakHashLen32WithSeeds_3(s, v.Higher64()*k1, x+w.Lower64())
|
||||
w = weakHashLen32WithSeeds_3(s[32:], z+w.Higher64(), y)
|
||||
swap64(&z, &x)
|
||||
s = s[64:]
|
||||
pos += 64
|
||||
length -= 128
|
||||
|
||||
if length < 128 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
y += rotate64(w.Lower64(), 37)*k0 + z
|
||||
x += rotate64(v.Lower64()+z, 49) * k0
|
||||
|
||||
// If 0 < length < 128, hash up to 4 chunks of 32 bytes each from the end of s.
|
||||
var tailDone uint32
|
||||
for tailDone = 0; tailDone < length; {
|
||||
tailDone += 32
|
||||
y = rotate64(y-x, 42)*k0 + v.Higher64()
|
||||
|
||||
//TODO why not use origin_len ?
|
||||
w.setLower64(w.Lower64() + fetch64(t[pos+length-tailDone+16:]))
|
||||
x = rotate64(x, 49)*k0 + w.Lower64()
|
||||
w.setLower64(w.Lower64() + v.Lower64())
|
||||
v = weakHashLen32WithSeeds_3(t[pos+length-tailDone:], v.Lower64(), v.Higher64())
|
||||
}
|
||||
// At this point our 48 bytes of state should contain more than
|
||||
// enough information for a strong 128-bit hash. We use two
|
||||
// different 48-byte-to-8-byte hashes to get a 16-byte final result.
|
||||
x = hashLen16(x, v.Lower64())
|
||||
y = hashLen16(y, w.Lower64())
|
||||
|
||||
return Uint128{hashLen16(x+v.Higher64(), w.Higher64()) + y,
|
||||
hashLen16(x+w.Higher64(), y+v.Higher64())}
|
||||
}
|
||||
|
||||
func CityHash128(s []byte, length uint32) (result Uint128) {
|
||||
if length >= 16 {
|
||||
result = CityHash128WithSeed(s[16:length], length-16, Uint128{fetch64(s) ^ k3, fetch64(s[8:])})
|
||||
} else if length >= 8 {
|
||||
result = CityHash128WithSeed(nil, 0, Uint128{fetch64(s) ^ (uint64(length) * k0), fetch64(s[length-8:]) ^ k1})
|
||||
} else {
|
||||
result = CityHash128WithSeed(s, length, Uint128{k0, k1})
|
||||
}
|
||||
return
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
/** COPY from https://github.com/zentures/cityhash/
|
||||
|
||||
NOTE: The code is modified to be compatible with CityHash128 used in ClickHouse
|
||||
*/
|
||||
package cityhash102
|
|
@ -1,131 +0,0 @@
|
|||
package column
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type Array struct {
|
||||
base
|
||||
depth int
|
||||
column Column
|
||||
}
|
||||
|
||||
func (array *Array) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
return nil, fmt.Errorf("do not use Read method for Array(T) column")
|
||||
}
|
||||
|
||||
func (array *Array) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
return array.column.Write(encoder, v)
|
||||
}
|
||||
|
||||
func (array *Array) ReadArray(decoder *binary.Decoder, rows int) (_ []interface{}, err error) {
|
||||
var (
|
||||
values = make([]interface{}, rows)
|
||||
offsets = make([]uint64, rows)
|
||||
)
|
||||
for i := 0; i < rows; i++ {
|
||||
offset, err := decoder.UInt64()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
offsets[i] = offset
|
||||
}
|
||||
for n, offset := range offsets {
|
||||
ln := offset
|
||||
if n != 0 {
|
||||
ln = ln - offsets[n-1]
|
||||
}
|
||||
if values[n], err = array.read(decoder, int(ln)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func (array *Array) read(decoder *binary.Decoder, ln int) (interface{}, error) {
|
||||
slice := reflect.MakeSlice(array.valueOf.Type(), 0, ln)
|
||||
for i := 0; i < ln; i++ {
|
||||
value, err := array.column.Read(decoder)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
slice = reflect.Append(slice, reflect.ValueOf(value))
|
||||
}
|
||||
return slice.Interface(), nil
|
||||
}
|
||||
|
||||
func (array *Array) Depth() int {
|
||||
return array.depth
|
||||
}
|
||||
|
||||
func parseArray(name, chType string, timezone *time.Location) (*Array, error) {
|
||||
if len(chType) < 11 {
|
||||
return nil, fmt.Errorf("invalid Array column type: %s", chType)
|
||||
}
|
||||
var (
|
||||
depth int
|
||||
columnType = chType
|
||||
)
|
||||
|
||||
loop:
|
||||
for _, str := range strings.Split(chType, "Array(") {
|
||||
switch {
|
||||
case len(str) == 0:
|
||||
depth++
|
||||
default:
|
||||
chType = str[:len(str)-depth]
|
||||
break loop
|
||||
}
|
||||
}
|
||||
column, err := Factory(name, chType, timezone)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Array(T): %v", err)
|
||||
}
|
||||
|
||||
var scanType interface{}
|
||||
switch t := column.ScanType(); t {
|
||||
case arrayBaseTypes[int8(0)]:
|
||||
scanType = []int8{}
|
||||
case arrayBaseTypes[int16(0)]:
|
||||
scanType = []int16{}
|
||||
case arrayBaseTypes[int32(0)]:
|
||||
scanType = []int32{}
|
||||
case arrayBaseTypes[int64(0)]:
|
||||
scanType = []int64{}
|
||||
case arrayBaseTypes[uint8(0)]:
|
||||
scanType = []uint8{}
|
||||
case arrayBaseTypes[uint16(0)]:
|
||||
scanType = []uint16{}
|
||||
case arrayBaseTypes[uint32(0)]:
|
||||
scanType = []uint32{}
|
||||
case arrayBaseTypes[uint64(0)]:
|
||||
scanType = []uint64{}
|
||||
case arrayBaseTypes[float32(0)]:
|
||||
scanType = []float32{}
|
||||
case arrayBaseTypes[float64(0)]:
|
||||
scanType = []float64{}
|
||||
case arrayBaseTypes[string("")]:
|
||||
scanType = []string{}
|
||||
case arrayBaseTypes[time.Time{}]:
|
||||
scanType = []time.Time{}
|
||||
case arrayBaseTypes[IPv4{}], arrayBaseTypes[IPv6{}]:
|
||||
scanType = []net.IP{}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported Array type '%s'", column.ScanType().Name())
|
||||
}
|
||||
return &Array{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: columnType,
|
||||
valueOf: reflect.ValueOf(scanType),
|
||||
},
|
||||
depth: depth,
|
||||
column: column,
|
||||
}, nil
|
||||
}
|
|
@ -1,189 +0,0 @@
|
|||
package column
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type Column interface {
|
||||
Name() string
|
||||
CHType() string
|
||||
ScanType() reflect.Type
|
||||
Read(*binary.Decoder) (interface{}, error)
|
||||
Write(*binary.Encoder, interface{}) error
|
||||
defaultValue() interface{}
|
||||
Depth() int
|
||||
}
|
||||
|
||||
func Factory(name, chType string, timezone *time.Location) (Column, error) {
|
||||
switch chType {
|
||||
case "Int8":
|
||||
return &Int8{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[int8(0)],
|
||||
},
|
||||
}, nil
|
||||
case "Int16":
|
||||
return &Int16{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[int16(0)],
|
||||
},
|
||||
}, nil
|
||||
case "Int32":
|
||||
return &Int32{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[int32(0)],
|
||||
},
|
||||
}, nil
|
||||
case "Int64":
|
||||
return &Int64{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[int64(0)],
|
||||
},
|
||||
}, nil
|
||||
case "UInt8":
|
||||
return &UInt8{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[uint8(0)],
|
||||
},
|
||||
}, nil
|
||||
case "UInt16":
|
||||
return &UInt16{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[uint16(0)],
|
||||
},
|
||||
}, nil
|
||||
case "UInt32":
|
||||
return &UInt32{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[uint32(0)],
|
||||
},
|
||||
}, nil
|
||||
case "UInt64":
|
||||
return &UInt64{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[uint64(0)],
|
||||
},
|
||||
}, nil
|
||||
case "Float32":
|
||||
return &Float32{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[float32(0)],
|
||||
},
|
||||
}, nil
|
||||
case "Float64":
|
||||
return &Float64{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[float64(0)],
|
||||
},
|
||||
}, nil
|
||||
case "String":
|
||||
return &String{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[string("")],
|
||||
},
|
||||
}, nil
|
||||
case "UUID":
|
||||
return &UUID{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[string("")],
|
||||
},
|
||||
}, nil
|
||||
case "Date":
|
||||
_, offset := time.Unix(0, 0).In(timezone).Zone()
|
||||
return &Date{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[time.Time{}],
|
||||
},
|
||||
Timezone: timezone,
|
||||
offset: int64(offset),
|
||||
}, nil
|
||||
case "IPv4":
|
||||
return &IPv4{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[IPv4{}],
|
||||
},
|
||||
}, nil
|
||||
case "IPv6":
|
||||
return &IPv6{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[IPv6{}],
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
switch {
|
||||
case strings.HasPrefix(chType, "DateTime"):
|
||||
return &DateTime{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: "DateTime",
|
||||
valueOf: columnBaseTypes[time.Time{}],
|
||||
},
|
||||
Timezone: timezone,
|
||||
}, nil
|
||||
case strings.HasPrefix(chType, "Array"):
|
||||
return parseArray(name, chType, timezone)
|
||||
case strings.HasPrefix(chType, "Nullable"):
|
||||
return parseNullable(name, chType, timezone)
|
||||
case strings.HasPrefix(chType, "FixedString"):
|
||||
return parseFixedString(name, chType)
|
||||
case strings.HasPrefix(chType, "Enum8"), strings.HasPrefix(chType, "Enum16"):
|
||||
return parseEnum(name, chType)
|
||||
case strings.HasPrefix(chType, "Decimal"):
|
||||
return parseDecimal(name, chType)
|
||||
case strings.HasPrefix(chType, "SimpleAggregateFunction"):
|
||||
if nestedType, err := getNestedType(chType, "SimpleAggregateFunction"); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return Factory(name, nestedType, timezone)
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("column: unhandled type %v", chType)
|
||||
}
|
||||
|
||||
func getNestedType(chType string, wrapType string) (string, error) {
|
||||
prefixLen := len(wrapType) + 1
|
||||
suffixLen := 1
|
||||
|
||||
if len(chType) > prefixLen+suffixLen {
|
||||
nested := strings.Split(chType[prefixLen:len(chType)-suffixLen], ",")
|
||||
if len(nested) == 2 {
|
||||
return strings.TrimSpace(nested[1]), nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("column: invalid %s type (%s)", wrapType, chType)
|
||||
}
|
|
@ -1,80 +0,0 @@
|
|||
package column
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ErrUnexpectedType struct {
|
||||
Column Column
|
||||
T interface{}
|
||||
}
|
||||
|
||||
func (err *ErrUnexpectedType) Error() string {
|
||||
return fmt.Sprintf("%s: unexpected type %T", err.Column, err.T)
|
||||
}
|
||||
|
||||
var columnBaseTypes = map[interface{}]reflect.Value{
|
||||
int8(0): reflect.ValueOf(int8(0)),
|
||||
int16(0): reflect.ValueOf(int16(0)),
|
||||
int32(0): reflect.ValueOf(int32(0)),
|
||||
int64(0): reflect.ValueOf(int64(0)),
|
||||
uint8(0): reflect.ValueOf(uint8(0)),
|
||||
uint16(0): reflect.ValueOf(uint16(0)),
|
||||
uint32(0): reflect.ValueOf(uint32(0)),
|
||||
uint64(0): reflect.ValueOf(uint64(0)),
|
||||
float32(0): reflect.ValueOf(float32(0)),
|
||||
float64(0): reflect.ValueOf(float64(0)),
|
||||
string(""): reflect.ValueOf(string("")),
|
||||
time.Time{}: reflect.ValueOf(time.Time{}),
|
||||
IPv4{}: reflect.ValueOf(net.IP{}),
|
||||
IPv6{}: reflect.ValueOf(net.IP{}),
|
||||
}
|
||||
|
||||
var arrayBaseTypes = map[interface{}]reflect.Type{
|
||||
int8(0): reflect.ValueOf(int8(0)).Type(),
|
||||
int16(0): reflect.ValueOf(int16(0)).Type(),
|
||||
int32(0): reflect.ValueOf(int32(0)).Type(),
|
||||
int64(0): reflect.ValueOf(int64(0)).Type(),
|
||||
uint8(0): reflect.ValueOf(uint8(0)).Type(),
|
||||
uint16(0): reflect.ValueOf(uint16(0)).Type(),
|
||||
uint32(0): reflect.ValueOf(uint32(0)).Type(),
|
||||
uint64(0): reflect.ValueOf(uint64(0)).Type(),
|
||||
float32(0): reflect.ValueOf(float32(0)).Type(),
|
||||
float64(0): reflect.ValueOf(float64(0)).Type(),
|
||||
string(""): reflect.ValueOf(string("")).Type(),
|
||||
time.Time{}: reflect.ValueOf(time.Time{}).Type(),
|
||||
IPv4{}: reflect.ValueOf(net.IP{}).Type(),
|
||||
IPv6{}: reflect.ValueOf(net.IP{}).Type(),
|
||||
}
|
||||
|
||||
type base struct {
|
||||
name, chType string
|
||||
valueOf reflect.Value
|
||||
}
|
||||
|
||||
func (base *base) Name() string {
|
||||
return base.name
|
||||
}
|
||||
|
||||
func (base *base) CHType() string {
|
||||
return base.chType
|
||||
}
|
||||
|
||||
func (base *base) ScanType() reflect.Type {
|
||||
return base.valueOf.Type()
|
||||
}
|
||||
|
||||
func (base *base) defaultValue() interface{} {
|
||||
return base.valueOf.Interface()
|
||||
}
|
||||
|
||||
func (base *base) String() string {
|
||||
return fmt.Sprintf("%s (%s)", base.name, base.chType)
|
||||
}
|
||||
|
||||
func (base *base) Depth() int {
|
||||
return 0
|
||||
}
|
|
@ -1,84 +0,0 @@
|
|||
package column
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type Date struct {
|
||||
base
|
||||
Timezone *time.Location
|
||||
offset int64
|
||||
}
|
||||
|
||||
func (dt *Date) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
sec, err := decoder.Int16()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return time.Unix(int64(sec)*24*3600-dt.offset, 0).In(dt.Timezone), nil
|
||||
}
|
||||
|
||||
func (dt *Date) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
var timestamp int64
|
||||
switch value := v.(type) {
|
||||
case time.Time:
|
||||
_, offset := value.Zone()
|
||||
timestamp = value.Unix() + int64(offset)
|
||||
case int16:
|
||||
return encoder.Int16(value)
|
||||
case int32:
|
||||
timestamp = int64(value) + dt.offset
|
||||
case uint32:
|
||||
timestamp = int64(value) + dt.offset
|
||||
case uint64:
|
||||
timestamp = int64(value) + dt.offset
|
||||
case int64:
|
||||
timestamp = value + dt.offset
|
||||
case string:
|
||||
var err error
|
||||
timestamp, err = dt.parse(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *time.Time:
|
||||
_, offset := value.Zone()
|
||||
timestamp = (*value).Unix() + int64(offset)
|
||||
case *int16:
|
||||
return encoder.Int16(*value)
|
||||
case *int32:
|
||||
timestamp = int64(*value) + dt.offset
|
||||
case *int64:
|
||||
timestamp = *value + dt.offset
|
||||
case *string:
|
||||
var err error
|
||||
timestamp, err = dt.parse(*value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: dt,
|
||||
}
|
||||
}
|
||||
|
||||
return encoder.Int16(int16(timestamp / 24 / 3600))
|
||||
}
|
||||
|
||||
func (dt *Date) parse(value string) (int64, error) {
|
||||
tv, err := time.Parse("2006-01-02", value)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return time.Date(
|
||||
time.Time(tv).Year(),
|
||||
time.Time(tv).Month(),
|
||||
time.Time(tv).Day(),
|
||||
0, 0, 0, 0, time.UTC,
|
||||
).Unix(), nil
|
||||
}
|
|
@ -1,87 +0,0 @@
|
|||
package column
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type DateTime struct {
|
||||
base
|
||||
Timezone *time.Location
|
||||
}
|
||||
|
||||
func (dt *DateTime) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
sec, err := decoder.Int32()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return time.Unix(int64(sec), 0).In(dt.Timezone), nil
|
||||
}
|
||||
|
||||
func (dt *DateTime) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
var timestamp int64
|
||||
switch value := v.(type) {
|
||||
case time.Time:
|
||||
if !value.IsZero() {
|
||||
timestamp = value.Unix()
|
||||
}
|
||||
case int16:
|
||||
timestamp = int64(value)
|
||||
case int32:
|
||||
timestamp = int64(value)
|
||||
case uint32:
|
||||
timestamp = int64(value)
|
||||
case uint64:
|
||||
timestamp = int64(value)
|
||||
case int64:
|
||||
timestamp = value
|
||||
case string:
|
||||
var err error
|
||||
timestamp, err = dt.parse(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *time.Time:
|
||||
if value != nil && !(*value).IsZero() {
|
||||
timestamp = (*value).Unix()
|
||||
}
|
||||
case *int16:
|
||||
timestamp = int64(*value)
|
||||
case *int32:
|
||||
timestamp = int64(*value)
|
||||
case *int64:
|
||||
timestamp = *value
|
||||
case *string:
|
||||
var err error
|
||||
timestamp, err = dt.parse(*value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: dt,
|
||||
}
|
||||
}
|
||||
|
||||
return encoder.Int32(int32(timestamp))
|
||||
}
|
||||
|
||||
func (dt *DateTime) parse(value string) (int64, error) {
|
||||
tv, err := time.Parse("2006-01-02 15:04:05", value)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return time.Date(
|
||||
time.Time(tv).Year(),
|
||||
time.Time(tv).Month(),
|
||||
time.Time(tv).Day(),
|
||||
time.Time(tv).Hour(),
|
||||
time.Time(tv).Minute(),
|
||||
time.Time(tv).Second(),
|
||||
0, time.UTC,
|
||||
).Unix(), nil
|
||||
}
|
|
@ -1,249 +0,0 @@
|
|||
package column
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
// Table of powers of 10 for fast casting from floating types to decimal type
|
||||
// representations.
|
||||
var factors10 = []float64{
|
||||
1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10, 1e11, 1e12, 1e13,
|
||||
1e14, 1e15, 1e16, 1e17, 1e18,
|
||||
}
|
||||
|
||||
// Decimal represents Decimal(P, S) ClickHouse. Since there is support for
|
||||
// int128 in Golang, the implementation does not support to 128-bits decimals
|
||||
// as well. Decimal is represented as integral. Also floating-point types are
|
||||
// supported for query parameters.
|
||||
type Decimal struct {
|
||||
base
|
||||
nobits int // its domain is {32, 64}
|
||||
precision int
|
||||
scale int
|
||||
}
|
||||
|
||||
func (d *Decimal) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
switch d.nobits {
|
||||
case 32:
|
||||
return decoder.Int32()
|
||||
case 64:
|
||||
return decoder.Int64()
|
||||
default:
|
||||
return nil, errors.New("unachievable execution path")
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Decimal) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch d.nobits {
|
||||
case 32:
|
||||
return d.write32(encoder, v)
|
||||
case 64:
|
||||
return d.write64(encoder, v)
|
||||
default:
|
||||
return errors.New("unachievable execution path")
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Decimal) float2int32(floating float64) int32 {
|
||||
fixed := int32(floating * factors10[d.scale])
|
||||
return fixed
|
||||
}
|
||||
|
||||
func (d *Decimal) float2int64(floating float64) int64 {
|
||||
fixed := int64(floating * factors10[d.scale])
|
||||
return fixed
|
||||
}
|
||||
|
||||
func (d *Decimal) write32(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case int8:
|
||||
return encoder.Int32(int32(v))
|
||||
case int16:
|
||||
return encoder.Int32(int32(v))
|
||||
case int32:
|
||||
return encoder.Int32(int32(v))
|
||||
case int64:
|
||||
return errors.New("narrowing type conversion from int64 to int32")
|
||||
|
||||
case uint8:
|
||||
return encoder.Int32(int32(v))
|
||||
case uint16:
|
||||
return encoder.Int32(int32(v))
|
||||
case uint32:
|
||||
return errors.New("narrowing type conversion from uint32 to int32")
|
||||
case uint64:
|
||||
return errors.New("narrowing type conversion from uint64 to int32")
|
||||
|
||||
case float32:
|
||||
fixed := d.float2int32(float64(v))
|
||||
return encoder.Int32(fixed)
|
||||
case float64:
|
||||
fixed := d.float2int32(float64(v))
|
||||
return encoder.Int32(fixed)
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *int8:
|
||||
return encoder.Int32(int32(*v))
|
||||
case *int16:
|
||||
return encoder.Int32(int32(*v))
|
||||
case *int32:
|
||||
return encoder.Int32(int32(*v))
|
||||
case *int64:
|
||||
return errors.New("narrowing type conversion from int64 to int32")
|
||||
|
||||
case *uint8:
|
||||
return encoder.Int32(int32(*v))
|
||||
case *uint16:
|
||||
return encoder.Int32(int32(*v))
|
||||
case *uint32:
|
||||
return errors.New("narrowing type conversion from uint32 to int32")
|
||||
case *uint64:
|
||||
return errors.New("narrowing type conversion from uint64 to int32")
|
||||
|
||||
case *float32:
|
||||
fixed := d.float2int32(float64(*v))
|
||||
return encoder.Int32(fixed)
|
||||
case *float64:
|
||||
fixed := d.float2int32(float64(*v))
|
||||
return encoder.Int32(fixed)
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: d,
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Decimal) write64(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case int:
|
||||
return encoder.Int64(int64(v))
|
||||
case int8:
|
||||
return encoder.Int64(int64(v))
|
||||
case int16:
|
||||
return encoder.Int64(int64(v))
|
||||
case int32:
|
||||
return encoder.Int64(int64(v))
|
||||
case int64:
|
||||
return encoder.Int64(int64(v))
|
||||
|
||||
case uint8:
|
||||
return encoder.Int64(int64(v))
|
||||
case uint16:
|
||||
return encoder.Int64(int64(v))
|
||||
case uint32:
|
||||
return encoder.Int64(int64(v))
|
||||
case uint64:
|
||||
return errors.New("narrowing type conversion from uint64 to int64")
|
||||
|
||||
case float32:
|
||||
fixed := d.float2int64(float64(v))
|
||||
return encoder.Int64(fixed)
|
||||
case float64:
|
||||
fixed := d.float2int64(float64(v))
|
||||
return encoder.Int64(fixed)
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *int:
|
||||
return encoder.Int64(int64(*v))
|
||||
case *int8:
|
||||
return encoder.Int64(int64(*v))
|
||||
case *int16:
|
||||
return encoder.Int64(int64(*v))
|
||||
case *int32:
|
||||
return encoder.Int64(int64(*v))
|
||||
case *int64:
|
||||
return encoder.Int64(int64(*v))
|
||||
|
||||
case *uint8:
|
||||
return encoder.Int64(int64(*v))
|
||||
case *uint16:
|
||||
return encoder.Int64(int64(*v))
|
||||
case *uint32:
|
||||
return encoder.Int64(int64(*v))
|
||||
case *uint64:
|
||||
return errors.New("narrowing type conversion from uint64 to int64")
|
||||
|
||||
case *float32:
|
||||
fixed := d.float2int64(float64(*v))
|
||||
return encoder.Int64(fixed)
|
||||
case *float64:
|
||||
fixed := d.float2int64(float64(*v))
|
||||
return encoder.Int64(fixed)
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: d,
|
||||
}
|
||||
}
|
||||
|
||||
func parseDecimal(name, chType string) (Column, error) {
|
||||
switch {
|
||||
case len(chType) < 12:
|
||||
fallthrough
|
||||
case !strings.HasPrefix(chType, "Decimal"):
|
||||
fallthrough
|
||||
case chType[7] != '(':
|
||||
fallthrough
|
||||
case chType[len(chType)-1] != ')':
|
||||
return nil, fmt.Errorf("invalid Decimal format: '%s'", chType)
|
||||
}
|
||||
|
||||
var params = strings.Split(chType[8:len(chType)-1], ",")
|
||||
|
||||
if len(params) != 2 {
|
||||
return nil, fmt.Errorf("invalid Decimal format: '%s'", chType)
|
||||
}
|
||||
|
||||
params[0] = strings.TrimSpace(params[0])
|
||||
params[1] = strings.TrimSpace(params[1])
|
||||
|
||||
var err error
|
||||
var decimal = &Decimal{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
},
|
||||
}
|
||||
|
||||
if decimal.precision, err = strconv.Atoi(params[0]); err != nil {
|
||||
return nil, fmt.Errorf("'%s' is not Decimal type: %s", chType, err)
|
||||
} else if decimal.precision < 1 {
|
||||
return nil, errors.New("wrong precision of Decimal type")
|
||||
}
|
||||
|
||||
if decimal.scale, err = strconv.Atoi(params[1]); err != nil {
|
||||
return nil, fmt.Errorf("'%s' is not Decimal type: %s", chType, err)
|
||||
} else if decimal.scale < 0 || decimal.scale > decimal.precision {
|
||||
return nil, errors.New("wrong scale of Decimal type")
|
||||
}
|
||||
|
||||
switch {
|
||||
case decimal.precision <= 9:
|
||||
decimal.nobits = 32
|
||||
decimal.valueOf = columnBaseTypes[int32(0)]
|
||||
case decimal.precision <= 18:
|
||||
decimal.nobits = 64
|
||||
decimal.valueOf = columnBaseTypes[int64(0)]
|
||||
case decimal.precision <= 38:
|
||||
return nil, errors.New("Decimal128 is not supported")
|
||||
default:
|
||||
return nil, errors.New("precision of Decimal exceeds max bound")
|
||||
}
|
||||
|
||||
return decimal, nil
|
||||
}
|
||||
|
||||
func (d *Decimal) GetPrecision() int {
|
||||
return d.precision
|
||||
}
|
||||
|
||||
func (d *Decimal) GetScale() int {
|
||||
return d.scale
|
||||
}
|
|
@ -1,140 +0,0 @@
|
|||
package column
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type Enum struct {
|
||||
iv map[string]interface{}
|
||||
vi map[interface{}]string
|
||||
base
|
||||
baseType interface{}
|
||||
}
|
||||
|
||||
func (enum *Enum) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
var (
|
||||
err error
|
||||
ident interface{}
|
||||
)
|
||||
switch enum.baseType.(type) {
|
||||
case int16:
|
||||
if ident, err = decoder.Int16(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
if ident, err = decoder.Int8(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if ident, found := enum.vi[ident]; found {
|
||||
return ident, nil
|
||||
}
|
||||
return nil, fmt.Errorf("invalid Enum value: %v", ident)
|
||||
}
|
||||
|
||||
func (enum *Enum) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
ident, found := enum.iv[v]
|
||||
if !found {
|
||||
return fmt.Errorf("invalid Enum ident: %s", v)
|
||||
}
|
||||
switch ident := ident.(type) {
|
||||
case int8:
|
||||
return encoder.Int8(ident)
|
||||
case int16:
|
||||
return encoder.Int16(ident)
|
||||
}
|
||||
case uint8:
|
||||
if _, ok := enum.baseType.(int8); ok {
|
||||
return encoder.Int8(int8(v))
|
||||
}
|
||||
case int8:
|
||||
if _, ok := enum.baseType.(int8); ok {
|
||||
return encoder.Int8(v)
|
||||
}
|
||||
case uint16:
|
||||
if _, ok := enum.baseType.(int16); ok {
|
||||
return encoder.Int16(int16(v))
|
||||
}
|
||||
case int16:
|
||||
if _, ok := enum.baseType.(int16); ok {
|
||||
return encoder.Int16(v)
|
||||
}
|
||||
case int64:
|
||||
switch enum.baseType.(type) {
|
||||
case int8:
|
||||
return encoder.Int8(int8(v))
|
||||
case int16:
|
||||
return encoder.Int16(int16(v))
|
||||
}
|
||||
}
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: enum,
|
||||
}
|
||||
}
|
||||
|
||||
func (enum *Enum) defaultValue() interface{} {
|
||||
return enum.baseType
|
||||
}
|
||||
|
||||
func parseEnum(name, chType string) (*Enum, error) {
|
||||
var (
|
||||
data string
|
||||
isEnum16 bool
|
||||
)
|
||||
if len(chType) < 8 {
|
||||
return nil, fmt.Errorf("invalid Enum format: %s", chType)
|
||||
}
|
||||
switch {
|
||||
case strings.HasPrefix(chType, "Enum8"):
|
||||
data = chType[6:]
|
||||
case strings.HasPrefix(chType, "Enum16"):
|
||||
data = chType[7:]
|
||||
isEnum16 = true
|
||||
default:
|
||||
return nil, fmt.Errorf("'%s' is not Enum type", chType)
|
||||
}
|
||||
enum := Enum{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[string("")],
|
||||
},
|
||||
iv: make(map[string]interface{}),
|
||||
vi: make(map[interface{}]string),
|
||||
}
|
||||
for _, block := range strings.Split(data[:len(data)-1], ",") {
|
||||
parts := strings.Split(block, "=")
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("invalid Enum format: %s", chType)
|
||||
}
|
||||
var (
|
||||
ident = strings.TrimSpace(parts[0])
|
||||
value, err = strconv.ParseInt(strings.TrimSpace(parts[1]), 10, 16)
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid Enum value: %v", chType)
|
||||
}
|
||||
{
|
||||
var (
|
||||
ident = ident[1 : len(ident)-1]
|
||||
value interface{} = int16(value)
|
||||
)
|
||||
if !isEnum16 {
|
||||
value = int8(value.(int16))
|
||||
}
|
||||
if enum.baseType == nil {
|
||||
enum.baseType = value
|
||||
}
|
||||
enum.iv[ident] = value
|
||||
enum.vi[value] = ident
|
||||
}
|
||||
}
|
||||
return &enum, nil
|
||||
}
|
|
@ -1,71 +0,0 @@
|
|||
package column
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type FixedString struct {
|
||||
base
|
||||
len int
|
||||
scanType reflect.Type
|
||||
}
|
||||
|
||||
func (str *FixedString) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
v, err := decoder.Fixed(str.len)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(v), nil
|
||||
}
|
||||
|
||||
func (str *FixedString) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
var fixedString []byte
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
fixedString = binary.Str2Bytes(v)
|
||||
case []byte:
|
||||
fixedString = v
|
||||
case encoding.BinaryMarshaler:
|
||||
bytes, err := v.MarshalBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fixedString = bytes
|
||||
default:
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: str,
|
||||
}
|
||||
}
|
||||
switch {
|
||||
case len(fixedString) > str.len:
|
||||
return fmt.Errorf("too large value '%s' (expected %d, got %d)", fixedString, str.len, len(fixedString))
|
||||
case len(fixedString) < str.len:
|
||||
tmp := make([]byte, str.len)
|
||||
copy(tmp, fixedString)
|
||||
fixedString = tmp
|
||||
}
|
||||
if _, err := encoder.Write(fixedString); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseFixedString(name, chType string) (*FixedString, error) {
|
||||
var strLen int
|
||||
if _, err := fmt.Sscanf(chType, "FixedString(%d)", &strLen); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FixedString{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[string("")],
|
||||
},
|
||||
len: strLen,
|
||||
}, nil
|
||||
}
|
|
@ -1,35 +0,0 @@
|
|||
package column
|
||||
|
||||
import (
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type Float32 struct{ base }
|
||||
|
||||
func (Float32) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
v, err := decoder.Float32()
|
||||
if err != nil {
|
||||
return float32(0), err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (float *Float32) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case float32:
|
||||
return encoder.Float32(v)
|
||||
case float64:
|
||||
return encoder.Float32(float32(v))
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *float32:
|
||||
return encoder.Float32(*v)
|
||||
case *float64:
|
||||
return encoder.Float32(float32(*v))
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: float,
|
||||
}
|
||||
}
|
|
@ -1,35 +0,0 @@
|
|||
package column
|
||||
|
||||
import (
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type Float64 struct{ base }
|
||||
|
||||
func (Float64) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
v, err := decoder.Float64()
|
||||
if err != nil {
|
||||
return float64(0), err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (float *Float64) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case float32:
|
||||
return encoder.Float64(float64(v))
|
||||
case float64:
|
||||
return encoder.Float64(v)
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *float32:
|
||||
return encoder.Float64(float64(*v))
|
||||
case *float64:
|
||||
return encoder.Float64(*v)
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: float,
|
||||
}
|
||||
}
|
|
@ -1,39 +0,0 @@
|
|||
package column
|
||||
|
||||
import (
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type Int16 struct{ base }
|
||||
|
||||
func (Int16) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
v, err := decoder.Int16()
|
||||
if err != nil {
|
||||
return int16(0), err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (i *Int16) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case int16:
|
||||
return encoder.Int16(v)
|
||||
case int64:
|
||||
return encoder.Int16(int16(v))
|
||||
case int:
|
||||
return encoder.Int16(int16(v))
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *int16:
|
||||
return encoder.Int16(*v)
|
||||
case *int64:
|
||||
return encoder.Int16(int16(*v))
|
||||
case *int:
|
||||
return encoder.Int16(int16(*v))
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: i,
|
||||
}
|
||||
}
|
|
@ -1,39 +0,0 @@
|
|||
package column
|
||||
|
||||
import (
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type Int32 struct{ base }
|
||||
|
||||
func (Int32) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
v, err := decoder.Int32()
|
||||
if err != nil {
|
||||
return int32(0), err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (i *Int32) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case int32:
|
||||
return encoder.Int32(v)
|
||||
case int64:
|
||||
return encoder.Int32(int32(v))
|
||||
case int:
|
||||
return encoder.Int32(int32(v))
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *int32:
|
||||
return encoder.Int32(*v)
|
||||
case *int64:
|
||||
return encoder.Int32(int32(*v))
|
||||
case *int:
|
||||
return encoder.Int32(int32(*v))
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: i,
|
||||
}
|
||||
}
|
|
@ -1,40 +0,0 @@
|
|||
package column
|
||||
|
||||
import (
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type Int64 struct{ base }
|
||||
|
||||
func (Int64) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
v, err := decoder.Int64()
|
||||
if err != nil {
|
||||
return int64(0), err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (i *Int64) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case int:
|
||||
return encoder.Int64(int64(v))
|
||||
case int64:
|
||||
return encoder.Int64(v)
|
||||
case []byte:
|
||||
if _, err := encoder.Write(v); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *int:
|
||||
return encoder.Int64(int64(*v))
|
||||
case *int64:
|
||||
return encoder.Int64(*v)
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: i,
|
||||
}
|
||||
}
|
|
@ -1,49 +0,0 @@
|
|||
package column
|
||||
|
||||
import (
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type Int8 struct{ base }
|
||||
|
||||
func (Int8) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
v, err := decoder.Int8()
|
||||
if err != nil {
|
||||
return int8(0), err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (i *Int8) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case int8:
|
||||
return encoder.Int8(v)
|
||||
case int64:
|
||||
return encoder.Int8(int8(v))
|
||||
case int:
|
||||
return encoder.Int8(int8(v))
|
||||
case bool:
|
||||
if v {
|
||||
return encoder.Int8(int8(1))
|
||||
}
|
||||
return encoder.Int8(int8(0))
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *int8:
|
||||
return encoder.Int8(*v)
|
||||
case *int64:
|
||||
return encoder.Int8(int8(*v))
|
||||
case *int:
|
||||
return encoder.Int8(int8(*v))
|
||||
case *bool:
|
||||
if *v {
|
||||
return encoder.Int8(int8(1))
|
||||
}
|
||||
return encoder.Int8(int8(0))
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: i,
|
||||
}
|
||||
}
|
|
@ -1,84 +0,0 @@
|
|||
/*
|
||||
IP type supporting for clickhouse as FixedString(16)
|
||||
*/
|
||||
|
||||
package column
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
"net"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
errInvalidScanType = errors.New("Invalid scan types")
|
||||
errInvalidScanValue = errors.New("Invalid scan value")
|
||||
)
|
||||
|
||||
// IP column type
|
||||
type IP net.IP
|
||||
|
||||
// Value implements the driver.Valuer interface, json field interface
|
||||
// Alignment on the right side
|
||||
func (ip IP) Value() (driver.Value, error) {
|
||||
return ip.MarshalBinary()
|
||||
}
|
||||
|
||||
func (ip IP) MarshalBinary() ([]byte, error) {
|
||||
if len(ip) < 16 {
|
||||
var (
|
||||
buff = make([]byte, 16)
|
||||
j = 0
|
||||
)
|
||||
for i := 16 - len(ip); i < 16; i++ {
|
||||
buff[i] = ip[j]
|
||||
j++
|
||||
}
|
||||
for i := 0; i < 16-len(ip); i++ {
|
||||
buff[i] = '\x00'
|
||||
}
|
||||
if len(ip) == 4 {
|
||||
buff[11] = '\xff'
|
||||
buff[10] = '\xff'
|
||||
}
|
||||
return buff, nil
|
||||
}
|
||||
return []byte(ip), nil
|
||||
}
|
||||
|
||||
// Scan implements the driver.Valuer interface, json field interface
|
||||
func (ip *IP) Scan(value interface{}) (err error) {
|
||||
switch v := value.(type) {
|
||||
case []byte:
|
||||
if len(v) == 4 || len(v) == 16 {
|
||||
*ip = IP(v)
|
||||
} else {
|
||||
err = errInvalidScanValue
|
||||
}
|
||||
case string:
|
||||
if v == "" {
|
||||
err = errInvalidScanValue
|
||||
return
|
||||
}
|
||||
if (len(v) == 4 || len(v) == 16) && !strings.Contains(v, ".") && !strings.Contains(v, ":"){
|
||||
*ip = IP([]byte(v))
|
||||
return
|
||||
}
|
||||
if strings.Contains(v, ":") {
|
||||
*ip = IP(net.ParseIP(v))
|
||||
return
|
||||
}
|
||||
*ip = IP(net.ParseIP(v).To4())
|
||||
case net.IP:
|
||||
*ip = IP(v)
|
||||
default:
|
||||
err = errInvalidScanType
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// String implements the fmt.Stringer interface
|
||||
func (ip IP) String() string {
|
||||
return net.IP(ip).String()
|
||||
}
|
|
@ -1,54 +0,0 @@
|
|||
package column
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type IPv4 struct {
|
||||
base
|
||||
}
|
||||
|
||||
func (*IPv4) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
v, err := decoder.Fixed(4)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return net.IPv4(v[3], v[2], v[1], v[0]), nil
|
||||
}
|
||||
|
||||
func (ip *IPv4) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
var netIP net.IP
|
||||
switch v.(type) {
|
||||
case string:
|
||||
netIP = net.ParseIP(v.(string))
|
||||
case net.IP:
|
||||
netIP = v.(net.IP)
|
||||
case *net.IP:
|
||||
netIP = *(v.(*net.IP))
|
||||
default:
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: ip,
|
||||
}
|
||||
}
|
||||
|
||||
if netIP == nil {
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: ip,
|
||||
}
|
||||
}
|
||||
ip4 := netIP.To4()
|
||||
if ip4 == nil {
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: ip,
|
||||
}
|
||||
}
|
||||
if _, err := encoder.Write([]byte{ip4[3], ip4[2], ip4[1], ip4[0]}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,47 +0,0 @@
|
|||
package column
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type IPv6 struct {
|
||||
base
|
||||
}
|
||||
|
||||
func (*IPv6) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
v, err := decoder.Fixed(16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return net.IP(v), nil
|
||||
}
|
||||
|
||||
func (ip *IPv6) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
var netIP net.IP
|
||||
switch v.(type) {
|
||||
case string:
|
||||
netIP = net.ParseIP(v.(string))
|
||||
case net.IP:
|
||||
netIP = v.(net.IP)
|
||||
case *net.IP:
|
||||
netIP = *(v.(*net.IP))
|
||||
default:
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: ip,
|
||||
}
|
||||
}
|
||||
|
||||
if netIP == nil {
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: ip,
|
||||
}
|
||||
}
|
||||
if _, err := encoder.Write([]byte(netIP.To16())); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,85 +0,0 @@
|
|||
package column
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type Nullable struct {
|
||||
base
|
||||
column Column
|
||||
}
|
||||
|
||||
func (null *Nullable) ScanType() reflect.Type {
|
||||
return null.column.ScanType()
|
||||
}
|
||||
|
||||
func (null *Nullable) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
return null.column.Read(decoder)
|
||||
}
|
||||
|
||||
func (null *Nullable) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (null *Nullable) ReadNull(decoder *binary.Decoder, rows int) (_ []interface{}, err error) {
|
||||
var (
|
||||
isNull byte
|
||||
value interface{}
|
||||
nulls = make([]byte, rows)
|
||||
values = make([]interface{}, rows)
|
||||
)
|
||||
for i := 0; i < rows; i++ {
|
||||
if isNull, err = decoder.ReadByte(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nulls[i] = isNull
|
||||
}
|
||||
for i, isNull := range nulls {
|
||||
switch value, err = null.column.Read(decoder); true {
|
||||
case err != nil:
|
||||
return nil, err
|
||||
case isNull == 0:
|
||||
values[i] = value
|
||||
default:
|
||||
values[i] = nil
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
func (null *Nullable) WriteNull(nulls, encoder *binary.Encoder, v interface{}) error {
|
||||
if value := reflect.ValueOf(v); v == nil || (value.Kind() == reflect.Ptr && value.IsNil()) {
|
||||
if _, err := nulls.Write([]byte{1}); err != nil {
|
||||
return err
|
||||
}
|
||||
return null.column.Write(encoder, null.column.defaultValue())
|
||||
}
|
||||
if _, err := nulls.Write([]byte{0}); err != nil {
|
||||
return err
|
||||
}
|
||||
return null.column.Write(encoder, v)
|
||||
}
|
||||
|
||||
func parseNullable(name, chType string, timezone *time.Location) (*Nullable, error) {
|
||||
if len(chType) < 14 {
|
||||
return nil, fmt.Errorf("invalid Nullable column type: %s", chType)
|
||||
}
|
||||
column, err := Factory(name, chType[9:][:len(chType)-10], timezone)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Nullable(T): %v", err)
|
||||
}
|
||||
return &Nullable{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
},
|
||||
column: column,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (null *Nullable) GetColumn() Column {
|
||||
return null.column
|
||||
}
|
|
@ -1,35 +0,0 @@
|
|||
package column
|
||||
|
||||
import (
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type String struct{ base }
|
||||
|
||||
func (String) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
v, err := decoder.String()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (str *String) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
return encoder.String(v)
|
||||
case []byte:
|
||||
return encoder.RawString(v)
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *string:
|
||||
return encoder.String(*v)
|
||||
case *[]byte:
|
||||
return encoder.RawString(*v)
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: str,
|
||||
}
|
||||
}
|
|
@ -1,43 +0,0 @@
|
|||
package column
|
||||
|
||||
import (
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type UInt16 struct{ base }
|
||||
|
||||
func (UInt16) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
v, err := decoder.UInt16()
|
||||
if err != nil {
|
||||
return uint16(0), err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (u *UInt16) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case uint16:
|
||||
return encoder.UInt16(v)
|
||||
case int64:
|
||||
return encoder.UInt16(uint16(v))
|
||||
case uint64:
|
||||
return encoder.UInt16(uint16(v))
|
||||
case int:
|
||||
return encoder.UInt16(uint16(v))
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *uint16:
|
||||
return encoder.UInt16(*v)
|
||||
case *int64:
|
||||
return encoder.UInt16(uint16(*v))
|
||||
case *uint64:
|
||||
return encoder.UInt16(uint16(*v))
|
||||
case *int:
|
||||
return encoder.UInt16(uint16(*v))
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: u,
|
||||
}
|
||||
}
|
|
@ -1,43 +0,0 @@
|
|||
package column
|
||||
|
||||
import (
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type UInt32 struct{ base }
|
||||
|
||||
func (UInt32) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
v, err := decoder.UInt32()
|
||||
if err != nil {
|
||||
return uint32(0), err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (u *UInt32) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case uint32:
|
||||
return encoder.UInt32(v)
|
||||
case uint64:
|
||||
return encoder.UInt32(uint32(v))
|
||||
case int64:
|
||||
return encoder.UInt32(uint32(v))
|
||||
case int:
|
||||
return encoder.UInt32(uint32(v))
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *uint64:
|
||||
return encoder.UInt32(uint32(*v))
|
||||
case *uint32:
|
||||
return encoder.UInt32(*v)
|
||||
case *int64:
|
||||
return encoder.UInt32(uint32(*v))
|
||||
case *int:
|
||||
return encoder.UInt32(uint32(*v))
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: u,
|
||||
}
|
||||
}
|
|
@ -1,44 +0,0 @@
|
|||
package column
|
||||
|
||||
import (
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type UInt64 struct{ base }
|
||||
|
||||
func (UInt64) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
v, err := decoder.UInt64()
|
||||
if err != nil {
|
||||
return uint64(0), err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (u *UInt64) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case []byte:
|
||||
if _, err := encoder.Write(v); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
case uint64:
|
||||
return encoder.UInt64(v)
|
||||
case int64:
|
||||
return encoder.UInt64(uint64(v))
|
||||
case int:
|
||||
return encoder.UInt64(uint64(v))
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *uint64:
|
||||
return encoder.UInt64(*v)
|
||||
case *int64:
|
||||
return encoder.UInt64(uint64(*v))
|
||||
case *int:
|
||||
return encoder.UInt64(uint64(*v))
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: u,
|
||||
}
|
||||
}
|
|
@ -1,47 +0,0 @@
|
|||
package column
|
||||
|
||||
import (
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type UInt8 struct{ base }
|
||||
|
||||
func (UInt8) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
v, err := decoder.UInt8()
|
||||
if err != nil {
|
||||
return uint8(0), err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (u *UInt8) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case bool:
|
||||
return encoder.Bool(v)
|
||||
case uint8:
|
||||
return encoder.UInt8(v)
|
||||
case int64:
|
||||
return encoder.UInt8(uint8(v))
|
||||
case uint64:
|
||||
return encoder.UInt8(uint8(v))
|
||||
case int:
|
||||
return encoder.UInt8(uint8(v))
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *bool:
|
||||
return encoder.Bool(*v)
|
||||
case *uint8:
|
||||
return encoder.UInt8(*v)
|
||||
case *int64:
|
||||
return encoder.UInt8(uint8(*v))
|
||||
case *uint64:
|
||||
return encoder.UInt8(uint8(*v))
|
||||
case *int:
|
||||
return encoder.UInt8(uint8(*v))
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: u,
|
||||
}
|
||||
}
|
|
@ -1,139 +0,0 @@
|
|||
package column
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
const (
|
||||
UUIDLen = 16
|
||||
NullUUID = "00000000-0000-0000-0000-000000000000"
|
||||
)
|
||||
|
||||
var ErrInvalidUUIDFormat = errors.New("invalid UUID format")
|
||||
|
||||
type UUID struct {
|
||||
base
|
||||
scanType reflect.Type
|
||||
}
|
||||
|
||||
func (*UUID) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
src, err := decoder.Fixed(UUIDLen)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
src = swap(src)
|
||||
|
||||
var uuid [36]byte
|
||||
{
|
||||
hex.Encode(uuid[:], src[:4])
|
||||
uuid[8] = '-'
|
||||
hex.Encode(uuid[9:13], src[4:6])
|
||||
uuid[13] = '-'
|
||||
hex.Encode(uuid[14:18], src[6:8])
|
||||
uuid[18] = '-'
|
||||
hex.Encode(uuid[19:23], src[8:10])
|
||||
uuid[23] = '-'
|
||||
hex.Encode(uuid[24:], src[10:])
|
||||
}
|
||||
return string(uuid[:]), nil
|
||||
}
|
||||
|
||||
func (u *UUID) Write(encoder *binary.Encoder, v interface{}) (err error) {
|
||||
var uuid []byte
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
if uuid, err = uuid2bytes(v); err != nil {
|
||||
return err
|
||||
}
|
||||
case []byte:
|
||||
if len(v) != UUIDLen {
|
||||
return fmt.Errorf("invalid raw UUID len '%s' (expected %d, got %d)", uuid, UUIDLen, len(uuid))
|
||||
}
|
||||
uuid = make([]byte, 16)
|
||||
copy(uuid, v)
|
||||
default:
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: u,
|
||||
}
|
||||
}
|
||||
|
||||
uuid = swap(uuid)
|
||||
|
||||
if _, err := encoder.Write(uuid); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func swap(src []byte) []byte {
|
||||
_ = src[15]
|
||||
src[0], src[7] = src[7], src[0]
|
||||
src[1], src[6] = src[6], src[1]
|
||||
src[2], src[5] = src[5], src[2]
|
||||
src[3], src[4] = src[4], src[3]
|
||||
src[8], src[15] = src[15], src[8]
|
||||
src[9], src[14] = src[14], src[9]
|
||||
src[10], src[13] = src[13], src[10]
|
||||
src[11], src[12] = src[12], src[11]
|
||||
return src
|
||||
}
|
||||
|
||||
func uuid2bytes(str string) ([]byte, error) {
|
||||
var uuid [16]byte
|
||||
strLength := len(str)
|
||||
if strLength == 0 {
|
||||
str = NullUUID
|
||||
} else if strLength != 36 {
|
||||
return nil, ErrInvalidUUIDFormat
|
||||
}
|
||||
if str[8] != '-' || str[13] != '-' || str[18] != '-' || str[23] != '-' {
|
||||
return nil, ErrInvalidUUIDFormat
|
||||
}
|
||||
for i, x := range [16]int{
|
||||
0, 2, 4, 6,
|
||||
9, 11, 14, 16,
|
||||
19, 21, 24, 26,
|
||||
28, 30, 32, 34,
|
||||
} {
|
||||
if v, ok := xtob(str[x], str[x+1]); !ok {
|
||||
return nil, ErrInvalidUUIDFormat
|
||||
} else {
|
||||
uuid[i] = v
|
||||
}
|
||||
}
|
||||
return uuid[:], nil
|
||||
}
|
||||
|
||||
// xvalues returns the value of a byte as a hexadecimal digit or 255.
|
||||
var xvalues = [256]byte{
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
|
||||
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
}
|
||||
|
||||
// xtob converts hex characters x1 and x2 into a byte.
|
||||
func xtob(x1, x2 byte) (byte, bool) {
|
||||
b1 := xvalues[x1]
|
||||
b2 := xvalues[x2]
|
||||
return (b1 << 4) | b2, b1 != 255 && b2 != 255
|
||||
}
|
|
@ -1,297 +0,0 @@
|
|||
package data
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/column"
|
||||
wb "github.com/ClickHouse/clickhouse-go/lib/writebuffer"
|
||||
)
|
||||
|
||||
type offset [][]int
|
||||
|
||||
type Block struct {
|
||||
Values [][]interface{}
|
||||
Columns []column.Column
|
||||
NumRows uint64
|
||||
NumColumns uint64
|
||||
offsets []offset
|
||||
buffers []*buffer
|
||||
info blockInfo
|
||||
}
|
||||
|
||||
func (block *Block) Copy() *Block {
|
||||
return &Block{
|
||||
Columns: block.Columns,
|
||||
NumColumns: block.NumColumns,
|
||||
info: block.info,
|
||||
}
|
||||
}
|
||||
|
||||
func (block *Block) ColumnNames() []string {
|
||||
names := make([]string, 0, len(block.Columns))
|
||||
for _, column := range block.Columns {
|
||||
names = append(names, column.Name())
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
func (block *Block) Read(serverInfo *ServerInfo, decoder *binary.Decoder) (err error) {
|
||||
if err = block.info.read(decoder); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if block.NumColumns, err = decoder.Uvarint(); err != nil {
|
||||
return err
|
||||
}
|
||||
if block.NumRows, err = decoder.Uvarint(); err != nil {
|
||||
return err
|
||||
}
|
||||
block.Values = make([][]interface{}, block.NumColumns)
|
||||
if block.NumRows > 10 {
|
||||
for i := 0; i < int(block.NumColumns); i++ {
|
||||
block.Values[i] = make([]interface{}, 0, block.NumRows)
|
||||
}
|
||||
}
|
||||
for i := 0; i < int(block.NumColumns); i++ {
|
||||
var (
|
||||
value interface{}
|
||||
columnName string
|
||||
columnType string
|
||||
)
|
||||
if columnName, err = decoder.String(); err != nil {
|
||||
return err
|
||||
}
|
||||
if columnType, err = decoder.String(); err != nil {
|
||||
return err
|
||||
}
|
||||
c, err := column.Factory(columnName, columnType, serverInfo.Timezone)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
block.Columns = append(block.Columns, c)
|
||||
switch column := c.(type) {
|
||||
case *column.Array:
|
||||
if block.Values[i], err = column.ReadArray(decoder, int(block.NumRows)); err != nil {
|
||||
return err
|
||||
}
|
||||
case *column.Nullable:
|
||||
if block.Values[i], err = column.ReadNull(decoder, int(block.NumRows)); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
for row := 0; row < int(block.NumRows); row++ {
|
||||
if value, err = column.Read(decoder); err != nil {
|
||||
return err
|
||||
}
|
||||
block.Values[i] = append(block.Values[i], value)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (block *Block) writeArray(column column.Column, value Value, num, level int) error {
|
||||
if level > column.Depth() {
|
||||
return column.Write(block.buffers[num].Column, value.Interface())
|
||||
}
|
||||
switch {
|
||||
case value.Kind() == reflect.Slice:
|
||||
if len(block.offsets[num]) < level {
|
||||
block.offsets[num] = append(block.offsets[num], []int{value.Len()})
|
||||
} else {
|
||||
block.offsets[num][level-1] = append(
|
||||
block.offsets[num][level-1],
|
||||
block.offsets[num][level-1][len(block.offsets[num][level-1])-1]+value.Len(),
|
||||
)
|
||||
}
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
if err := block.writeArray(column, value.Index(i), num, level+1); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
default:
|
||||
if err := column.Write(block.buffers[num].Column, value.Interface()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (block *Block) AppendRow(args []driver.Value) error {
|
||||
if len(block.Columns) != len(args) {
|
||||
return fmt.Errorf("block: expected %d arguments (columns: %s), got %d", len(block.Columns), strings.Join(block.ColumnNames(), ", "), len(args))
|
||||
}
|
||||
block.Reserve()
|
||||
{
|
||||
block.NumRows++
|
||||
}
|
||||
for num, c := range block.Columns {
|
||||
switch column := c.(type) {
|
||||
case *column.Array:
|
||||
value := reflect.ValueOf(args[num])
|
||||
if value.Kind() != reflect.Slice {
|
||||
return fmt.Errorf("unsupported Array(T) type [%T]", value.Interface())
|
||||
}
|
||||
if err := block.writeArray(c, newValue(value), num, 1); err != nil {
|
||||
return err
|
||||
}
|
||||
case *column.Nullable:
|
||||
if err := column.WriteNull(block.buffers[num].Offset, block.buffers[num].Column, args[num]); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
if err := column.Write(block.buffers[num].Column, args[num]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (block *Block) Reserve() {
|
||||
if len(block.buffers) == 0 {
|
||||
block.buffers = make([]*buffer, len(block.Columns))
|
||||
block.offsets = make([]offset, len(block.Columns))
|
||||
for i := 0; i < len(block.Columns); i++ {
|
||||
var (
|
||||
offsetBuffer = wb.New(wb.InitialSize)
|
||||
columnBuffer = wb.New(wb.InitialSize)
|
||||
)
|
||||
block.buffers[i] = &buffer{
|
||||
Offset: binary.NewEncoder(offsetBuffer),
|
||||
Column: binary.NewEncoder(columnBuffer),
|
||||
offsetBuffer: offsetBuffer,
|
||||
columnBuffer: columnBuffer,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (block *Block) Reset() {
|
||||
block.NumRows = 0
|
||||
block.NumColumns = 0
|
||||
for _, buffer := range block.buffers {
|
||||
buffer.reset()
|
||||
}
|
||||
{
|
||||
block.offsets = nil
|
||||
block.buffers = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (block *Block) Write(serverInfo *ServerInfo, encoder *binary.Encoder) error {
|
||||
if err := block.info.write(encoder); err != nil {
|
||||
return err
|
||||
}
|
||||
encoder.Uvarint(block.NumColumns)
|
||||
encoder.Uvarint(block.NumRows)
|
||||
defer func() {
|
||||
block.NumRows = 0
|
||||
for i := range block.offsets {
|
||||
block.offsets[i] = offset{}
|
||||
}
|
||||
}()
|
||||
for i, column := range block.Columns {
|
||||
encoder.String(column.Name())
|
||||
encoder.String(column.CHType())
|
||||
if len(block.buffers) == len(block.Columns) {
|
||||
for _, offsets := range block.offsets[i] {
|
||||
for _, offset := range offsets {
|
||||
if err := encoder.UInt64(uint64(offset)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if _, err := block.buffers[i].WriteTo(encoder); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type blockInfo struct {
|
||||
num1 uint64
|
||||
isOverflows bool
|
||||
num2 uint64
|
||||
bucketNum int32
|
||||
num3 uint64
|
||||
}
|
||||
|
||||
func (info *blockInfo) read(decoder *binary.Decoder) error {
|
||||
var err error
|
||||
if info.num1, err = decoder.Uvarint(); err != nil {
|
||||
return err
|
||||
}
|
||||
if info.isOverflows, err = decoder.Bool(); err != nil {
|
||||
return err
|
||||
}
|
||||
if info.num2, err = decoder.Uvarint(); err != nil {
|
||||
return err
|
||||
}
|
||||
if info.bucketNum, err = decoder.Int32(); err != nil {
|
||||
return err
|
||||
}
|
||||
if info.num3, err = decoder.Uvarint(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (info *blockInfo) write(encoder *binary.Encoder) error {
|
||||
if err := encoder.Uvarint(1); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := encoder.Bool(info.isOverflows); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := encoder.Uvarint(2); err != nil {
|
||||
return err
|
||||
}
|
||||
if info.bucketNum == 0 {
|
||||
info.bucketNum = -1
|
||||
}
|
||||
if err := encoder.Int32(info.bucketNum); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := encoder.Uvarint(0); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type buffer struct {
|
||||
Offset *binary.Encoder
|
||||
Column *binary.Encoder
|
||||
offsetBuffer *wb.WriteBuffer
|
||||
columnBuffer *wb.WriteBuffer
|
||||
}
|
||||
|
||||
func (buf *buffer) WriteTo(w io.Writer) (int64, error) {
|
||||
var size int64
|
||||
{
|
||||
ln, err := buf.offsetBuffer.WriteTo(w)
|
||||
if err != nil {
|
||||
return size, err
|
||||
}
|
||||
size += ln
|
||||
}
|
||||
{
|
||||
ln, err := buf.columnBuffer.WriteTo(w)
|
||||
if err != nil {
|
||||
return size, err
|
||||
}
|
||||
size += ln
|
||||
}
|
||||
return size, nil
|
||||
}
|
||||
|
||||
func (buf *buffer) reset() {
|
||||
buf.offsetBuffer.Reset()
|
||||
buf.columnBuffer.Reset()
|
||||
}
|
|
@ -1,106 +0,0 @@
|
|||
package data
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
func (block *Block) WriteDate(c int, v time.Time) error {
|
||||
_, offset := v.Zone()
|
||||
nday := (v.Unix() + int64(offset)) / 24 / 3600
|
||||
return block.buffers[c].Column.UInt16(uint16(nday))
|
||||
}
|
||||
|
||||
func (block *Block) WriteDateTime(c int, v time.Time) error {
|
||||
return block.buffers[c].Column.UInt32(uint32(v.Unix()))
|
||||
}
|
||||
|
||||
func (block *Block) WriteBool(c int, v bool) error {
|
||||
if v {
|
||||
return block.buffers[c].Column.UInt8(1)
|
||||
}
|
||||
return block.buffers[c].Column.UInt8(0)
|
||||
}
|
||||
|
||||
func (block *Block) WriteInt8(c int, v int8) error {
|
||||
return block.buffers[c].Column.Int8(v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteInt16(c int, v int16) error {
|
||||
return block.buffers[c].Column.Int16(v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteInt32(c int, v int32) error {
|
||||
return block.buffers[c].Column.Int32(v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteInt64(c int, v int64) error {
|
||||
return block.buffers[c].Column.Int64(v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteUInt8(c int, v uint8) error {
|
||||
return block.buffers[c].Column.UInt8(v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteUInt16(c int, v uint16) error {
|
||||
return block.buffers[c].Column.UInt16(v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteUInt32(c int, v uint32) error {
|
||||
return block.buffers[c].Column.UInt32(v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteUInt64(c int, v uint64) error {
|
||||
return block.buffers[c].Column.UInt64(v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteFloat32(c int, v float32) error {
|
||||
return block.buffers[c].Column.Float32(v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteFloat64(c int, v float64) error {
|
||||
return block.buffers[c].Column.Float64(v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteBytes(c int, v []byte) error {
|
||||
if err := block.buffers[c].Column.Uvarint(uint64(len(v))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := block.buffers[c].Column.Write(v); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (block *Block) WriteString(c int, v string) error {
|
||||
if err := block.buffers[c].Column.Uvarint(uint64(len(v))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := block.buffers[c].Column.Write(binary.Str2Bytes(v)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (block *Block) WriteFixedString(c int, v []byte) error {
|
||||
return block.Columns[c].Write(block.buffers[c].Column, v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteIP(c int, v net.IP) error {
|
||||
return block.Columns[c].Write(block.buffers[c].Column, v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteArray(c int, v interface{}) error {
|
||||
return block.WriteArrayWithValue(c, newValue(reflect.ValueOf(v)))
|
||||
}
|
||||
|
||||
func (block *Block) WriteArrayWithValue(c int, value Value) error {
|
||||
if value.Kind() != reflect.Slice {
|
||||
return fmt.Errorf("unsupported Array(T) type [%T]", value.Interface())
|
||||
}
|
||||
return block.writeArray(block.Columns[c], value, c, 1)
|
||||
}
|
|
@ -1,29 +0,0 @@
|
|||
package data
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
const ClientName = "Golang SQLDriver"
|
||||
|
||||
const (
|
||||
ClickHouseRevision = 54213
|
||||
ClickHouseDBMSVersionMajor = 1
|
||||
ClickHouseDBMSVersionMinor = 1
|
||||
)
|
||||
|
||||
type ClientInfo struct{}
|
||||
|
||||
func (ClientInfo) Write(encoder *binary.Encoder) error {
|
||||
encoder.String(ClientName)
|
||||
encoder.Uvarint(ClickHouseDBMSVersionMajor)
|
||||
encoder.Uvarint(ClickHouseDBMSVersionMinor)
|
||||
encoder.Uvarint(ClickHouseRevision)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ClientInfo) String() string {
|
||||
return fmt.Sprintf("%s %d.%d.%d", ClientName, ClickHouseDBMSVersionMajor, ClickHouseDBMSVersionMinor, ClickHouseRevision)
|
||||
}
|
|
@ -1,47 +0,0 @@
|
|||
package data
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
//"io"
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/protocol"
|
||||
)
|
||||
|
||||
type ServerInfo struct {
|
||||
Name string
|
||||
Revision uint64
|
||||
MinorVersion uint64
|
||||
MajorVersion uint64
|
||||
Timezone *time.Location
|
||||
}
|
||||
|
||||
func (srv *ServerInfo) Read(decoder *binary.Decoder) (err error) {
|
||||
if srv.Name, err = decoder.String(); err != nil {
|
||||
return fmt.Errorf("could not read server name: %v", err)
|
||||
}
|
||||
if srv.MajorVersion, err = decoder.Uvarint(); err != nil {
|
||||
return fmt.Errorf("could not read server major version: %v", err)
|
||||
}
|
||||
if srv.MinorVersion, err = decoder.Uvarint(); err != nil {
|
||||
return fmt.Errorf("could not read server minor version: %v", err)
|
||||
}
|
||||
if srv.Revision, err = decoder.Uvarint(); err != nil {
|
||||
return fmt.Errorf("could not read server revision: %v", err)
|
||||
}
|
||||
if srv.Revision >= protocol.DBMS_MIN_REVISION_WITH_SERVER_TIMEZONE {
|
||||
timezone, err := decoder.String()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not read server timezone: %v", err)
|
||||
}
|
||||
if srv.Timezone, err = time.LoadLocation(timezone); err != nil {
|
||||
return fmt.Errorf("could not load time location: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (srv ServerInfo) String() string {
|
||||
return fmt.Sprintf("%s %d.%d.%d (%s)", srv.Name, srv.MajorVersion, srv.MinorVersion, srv.Revision, srv.Timezone)
|
||||
}
|
|
@ -1,33 +0,0 @@
|
|||
package data
|
||||
|
||||
import "reflect"
|
||||
|
||||
// Value is a writable value.
|
||||
type Value interface {
|
||||
// Kind returns value's Kind.
|
||||
Kind() reflect.Kind
|
||||
|
||||
// Len returns value's length.
|
||||
// It panics if value's Kind is not Array, Chan, Map, Slice, or String.
|
||||
Len() int
|
||||
|
||||
// Index returns value's i'th element.
|
||||
// It panics if value's Kind is not Array, Slice, or String or i is out of range.
|
||||
Index(i int) Value
|
||||
|
||||
// Interface returns value's current value as an interface{}.
|
||||
Interface() interface{}
|
||||
}
|
||||
|
||||
// value is a wrapper that wraps reflect.Value to comply with Value interface.
|
||||
type value struct {
|
||||
reflect.Value
|
||||
}
|
||||
|
||||
func newValue(v reflect.Value) Value {
|
||||
return value{Value: v}
|
||||
}
|
||||
|
||||
func (v value) Index(i int) Value {
|
||||
return newValue(v.Value.Index(i))
|
||||
}
|
|
@ -1,23 +0,0 @@
|
|||
package leakypool
|
||||
|
||||
var pool chan []byte
|
||||
|
||||
func InitBytePool(size int) {
|
||||
pool = make(chan []byte, size)
|
||||
}
|
||||
|
||||
func GetBytes(size, capacity int) (b []byte) {
|
||||
select {
|
||||
case b = <-pool:
|
||||
default:
|
||||
b = make([]byte, size, capacity)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func PutBytes(b []byte) {
|
||||
select {
|
||||
case pool <- b:
|
||||
default:
|
||||
}
|
||||
}
|
|
@ -1,23 +0,0 @@
|
|||
Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
Copyright 2013 Damian Gryski. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -1,7 +0,0 @@
|
|||
// Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
// Copyright 2013 Damian Gryski. All rights reserved.
|
||||
|
||||
// @LINK: https://github.com/bkaradzic/go-lz4
|
||||
// @NOTE: The code is modified to be high performance and less memory usage
|
||||
|
||||
package lz4
|
|
@ -1,23 +0,0 @@
|
|||
// +build gofuzz
|
||||
|
||||
package lz4
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
func Fuzz(data []byte) int {
|
||||
|
||||
if len(data) < 4 {
|
||||
return 0
|
||||
}
|
||||
|
||||
ln := binary.LittleEndian.Uint32(data)
|
||||
if ln > (1 << 21) {
|
||||
return 0
|
||||
}
|
||||
|
||||
if _, err := Decode(nil, data); err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
|
@ -1,179 +0,0 @@
|
|||
/*
|
||||
* Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
* SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
* THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrCorrupt indicates the input was corrupt
|
||||
ErrCorrupt = errors.New("corrupt input")
|
||||
)
|
||||
|
||||
const (
|
||||
mlBits = 4
|
||||
mlMask = (1 << mlBits) - 1
|
||||
runBits = 8 - mlBits
|
||||
runMask = (1 << runBits) - 1
|
||||
)
|
||||
|
||||
type decoder struct {
|
||||
src []byte
|
||||
dst []byte
|
||||
spos uint32
|
||||
dpos uint32
|
||||
ref uint32
|
||||
}
|
||||
|
||||
func (d *decoder) readByte() (uint8, error) {
|
||||
if int(d.spos) == len(d.src) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
b := d.src[d.spos]
|
||||
d.spos++
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (d *decoder) getLen() (uint32, error) {
|
||||
|
||||
length := uint32(0)
|
||||
ln, err := d.readByte()
|
||||
if err != nil {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
for ln == 255 {
|
||||
length += 255
|
||||
ln, err = d.readByte()
|
||||
if err != nil {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
}
|
||||
length += uint32(ln)
|
||||
|
||||
return length, nil
|
||||
}
|
||||
|
||||
func (d *decoder) cp(length, decr uint32) {
|
||||
|
||||
if int(d.ref+length) < int(d.dpos) {
|
||||
copy(d.dst[d.dpos:], d.dst[d.ref:d.ref+length])
|
||||
} else {
|
||||
for ii := uint32(0); ii < length; ii++ {
|
||||
d.dst[d.dpos+ii] = d.dst[d.ref+ii]
|
||||
}
|
||||
}
|
||||
d.dpos += length
|
||||
d.ref += length - decr
|
||||
}
|
||||
|
||||
func (d *decoder) finish(err error) error {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Decode returns the decoded form of src. The returned slice may be a
|
||||
// subslice of dst if it was large enough to hold the entire decoded block.
|
||||
func Decode(dst, src []byte) (int, error) {
|
||||
d := decoder{src: src, dst: dst, spos: 0}
|
||||
|
||||
decr := []uint32{0, 3, 2, 3}
|
||||
|
||||
for {
|
||||
code, err := d.readByte()
|
||||
if err != nil {
|
||||
return len(d.dst), d.finish(err)
|
||||
}
|
||||
|
||||
length := uint32(code >> mlBits)
|
||||
if length == runMask {
|
||||
ln, err := d.getLen()
|
||||
if err != nil {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
length += ln
|
||||
}
|
||||
|
||||
if int(d.spos+length) > len(d.src) || int(d.dpos+length) > len(d.dst) {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
|
||||
for ii := uint32(0); ii < length; ii++ {
|
||||
d.dst[d.dpos+ii] = d.src[d.spos+ii]
|
||||
}
|
||||
|
||||
d.spos += length
|
||||
d.dpos += length
|
||||
|
||||
if int(d.spos) == len(d.src) {
|
||||
return len(d.dst), nil
|
||||
}
|
||||
|
||||
if int(d.spos+2) >= len(d.src) {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
|
||||
back := uint32(d.src[d.spos]) | uint32(d.src[d.spos+1])<<8
|
||||
|
||||
if back > d.dpos {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
|
||||
d.spos += 2
|
||||
d.ref = d.dpos - back
|
||||
|
||||
length = uint32(code & mlMask)
|
||||
if length == mlMask {
|
||||
ln, err := d.getLen()
|
||||
if err != nil {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
length += ln
|
||||
}
|
||||
|
||||
literal := d.dpos - d.ref
|
||||
|
||||
if literal < 4 {
|
||||
if int(d.dpos+4) > len(d.dst) {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
|
||||
d.cp(4, decr[literal])
|
||||
} else {
|
||||
length += 4
|
||||
}
|
||||
|
||||
if int(d.dpos+length) > len(d.dst) {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
|
||||
d.cp(length, 0)
|
||||
}
|
||||
}
|
|
@ -1,203 +0,0 @@
|
|||
/*
|
||||
* Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
* SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
* THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
minMatch = 4
|
||||
hashLog = 16
|
||||
hashTableSize = 1 << hashLog
|
||||
hashShift = (minMatch * 8) - hashLog
|
||||
incompressible uint32 = 128
|
||||
uninitHash = 0x88888888
|
||||
|
||||
mfLimit = 8 + minMatch // The last match cannot start within the last 12 bytes.
|
||||
// MaxInputSize is the largest buffer than can be compressed in a single block
|
||||
MaxInputSize = 0x7E000000
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrTooLarge indicates the input buffer was too large
|
||||
ErrTooLarge = errors.New("input too large")
|
||||
ErrEncodeTooSmall = errors.New("encode buffer too small")
|
||||
|
||||
hashPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return make([]uint32, hashTableSize)
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
type encoder struct {
|
||||
src []byte
|
||||
dst []byte
|
||||
hashTable []uint32
|
||||
pos uint32
|
||||
anchor uint32
|
||||
dpos uint32
|
||||
}
|
||||
|
||||
// CompressBound returns the maximum length of a lz4 block
|
||||
func CompressBound(isize int) int {
|
||||
if isize > MaxInputSize {
|
||||
return 0
|
||||
}
|
||||
return isize + ((isize) / 255) + 16
|
||||
}
|
||||
|
||||
func (e *encoder) writeLiterals(length, mlLen, pos uint32) {
|
||||
|
||||
ln := length
|
||||
|
||||
var code byte
|
||||
if ln > runMask-1 {
|
||||
code = runMask
|
||||
} else {
|
||||
code = byte(ln)
|
||||
}
|
||||
|
||||
if mlLen > mlMask-1 {
|
||||
e.dst[e.dpos] = (code << mlBits) + byte(mlMask)
|
||||
} else {
|
||||
e.dst[e.dpos] = (code << mlBits) + byte(mlLen)
|
||||
}
|
||||
e.dpos++
|
||||
|
||||
if code == runMask {
|
||||
ln -= runMask
|
||||
for ; ln > 254; ln -= 255 {
|
||||
e.dst[e.dpos] = 255
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
e.dst[e.dpos] = byte(ln)
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
for ii := uint32(0); ii < length; ii++ {
|
||||
e.dst[e.dpos+ii] = e.src[pos+ii]
|
||||
}
|
||||
|
||||
e.dpos += length
|
||||
}
|
||||
|
||||
// Encode returns the encoded form of src. The returned array may be a
|
||||
// sub-slice of dst if it was large enough to hold the entire output.
|
||||
func Encode(dst, src []byte) (compressedSize int, error error) {
|
||||
if len(src) >= MaxInputSize {
|
||||
return 0, ErrTooLarge
|
||||
}
|
||||
|
||||
if n := CompressBound(len(src)); len(dst) < n {
|
||||
return 0, ErrEncodeTooSmall
|
||||
}
|
||||
|
||||
hashTable := hashPool.Get().([]uint32)
|
||||
for i := range hashTable {
|
||||
hashTable[i] = 0
|
||||
}
|
||||
e := encoder{src: src, dst: dst, hashTable: hashTable}
|
||||
defer func() {
|
||||
hashPool.Put(hashTable)
|
||||
}()
|
||||
// binary.LittleEndian.PutUint32(dst, uint32(len(src)))
|
||||
// e.dpos = 0
|
||||
|
||||
var (
|
||||
step uint32 = 1
|
||||
limit = incompressible
|
||||
)
|
||||
|
||||
for {
|
||||
if int(e.pos)+12 >= len(e.src) {
|
||||
e.writeLiterals(uint32(len(e.src))-e.anchor, 0, e.anchor)
|
||||
return int(e.dpos), nil
|
||||
}
|
||||
|
||||
sequence := uint32(e.src[e.pos+3])<<24 | uint32(e.src[e.pos+2])<<16 | uint32(e.src[e.pos+1])<<8 | uint32(e.src[e.pos+0])
|
||||
|
||||
hash := (sequence * 2654435761) >> hashShift
|
||||
ref := e.hashTable[hash] + uninitHash
|
||||
e.hashTable[hash] = e.pos - uninitHash
|
||||
|
||||
if ((e.pos-ref)>>16) != 0 || uint32(e.src[ref+3])<<24|uint32(e.src[ref+2])<<16|uint32(e.src[ref+1])<<8|uint32(e.src[ref+0]) != sequence {
|
||||
if e.pos-e.anchor > limit {
|
||||
limit <<= 1
|
||||
step += 1 + (step >> 2)
|
||||
}
|
||||
e.pos += step
|
||||
continue
|
||||
}
|
||||
|
||||
if step > 1 {
|
||||
e.hashTable[hash] = ref - uninitHash
|
||||
e.pos -= step - 1
|
||||
step = 1
|
||||
continue
|
||||
}
|
||||
limit = incompressible
|
||||
|
||||
ln := e.pos - e.anchor
|
||||
back := e.pos - ref
|
||||
|
||||
anchor := e.anchor
|
||||
|
||||
e.pos += minMatch
|
||||
ref += minMatch
|
||||
e.anchor = e.pos
|
||||
|
||||
for int(e.pos) < len(e.src)-5 && e.src[e.pos] == e.src[ref] {
|
||||
e.pos++
|
||||
ref++
|
||||
}
|
||||
|
||||
mlLen := e.pos - e.anchor
|
||||
|
||||
e.writeLiterals(ln, mlLen, anchor)
|
||||
e.dst[e.dpos] = uint8(back)
|
||||
e.dst[e.dpos+1] = uint8(back >> 8)
|
||||
e.dpos += 2
|
||||
|
||||
if mlLen > mlMask-1 {
|
||||
mlLen -= mlMask
|
||||
for mlLen > 254 {
|
||||
mlLen -= 255
|
||||
|
||||
e.dst[e.dpos] = 255
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
e.dst[e.dpos] = byte(mlLen)
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
e.anchor = e.pos
|
||||
}
|
||||
}
|
|
@ -1,4 +0,0 @@
|
|||
# ClickHouse Native protocol
|
||||
|
||||
# Handshake
|
||||
|
|
@ -1,35 +0,0 @@
|
|||
package protocol
|
||||
|
||||
const (
|
||||
DBMS_MIN_REVISION_WITH_SERVER_TIMEZONE = 54058
|
||||
DBMS_MIN_REVISION_WITH_QUOTA_KEY_IN_CLIENT_INFO = 54060
|
||||
)
|
||||
|
||||
const (
|
||||
ClientHello = 0
|
||||
ClientQuery = 1
|
||||
ClientData = 2
|
||||
ClientCancel = 3
|
||||
ClientPing = 4
|
||||
)
|
||||
|
||||
const (
|
||||
CompressEnable uint64 = 1
|
||||
CompressDisable uint64 = 0
|
||||
)
|
||||
|
||||
const (
|
||||
StateComplete = 2
|
||||
)
|
||||
|
||||
const (
|
||||
ServerHello = 0
|
||||
ServerData = 1
|
||||
ServerException = 2
|
||||
ServerProgress = 3
|
||||
ServerPong = 4
|
||||
ServerEndOfStream = 5
|
||||
ServerProfileInfo = 6
|
||||
ServerTotals = 7
|
||||
ServerExtremes = 8
|
||||
)
|
|
@ -1,48 +0,0 @@
|
|||
// Timezoneless date/datetime types
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Truncate timezone
|
||||
//
|
||||
// clickhouse.Date(time.Date(2017, 1, 1, 0, 0, 0, 0, time.Local)) -> time.Date(2017, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
type Date time.Time
|
||||
|
||||
func (date Date) Value() (driver.Value, error) {
|
||||
return date.convert(), nil
|
||||
}
|
||||
|
||||
func (date Date) convert() time.Time {
|
||||
return time.Date(time.Time(date).Year(), time.Time(date).Month(), time.Time(date).Day(), 0, 0, 0, 0, time.UTC)
|
||||
}
|
||||
|
||||
// Truncate timezone
|
||||
//
|
||||
// clickhouse.DateTime(time.Date(2017, 1, 1, 0, 0, 0, 0, time.Local)) -> time.Date(2017, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
type DateTime time.Time
|
||||
|
||||
func (datetime DateTime) Value() (driver.Value, error) {
|
||||
return datetime.convert(), nil
|
||||
}
|
||||
|
||||
func (datetime DateTime) convert() time.Time {
|
||||
return time.Date(
|
||||
time.Time(datetime).Year(),
|
||||
time.Time(datetime).Month(),
|
||||
time.Time(datetime).Day(),
|
||||
time.Time(datetime).Hour(),
|
||||
time.Time(datetime).Minute(),
|
||||
time.Time(datetime).Second(),
|
||||
1,
|
||||
time.UTC,
|
||||
)
|
||||
}
|
||||
|
||||
var (
|
||||
_ driver.Valuer = Date{}
|
||||
_ driver.Valuer = DateTime{}
|
||||
)
|
|
@ -1,99 +0,0 @@
|
|||
package types
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var InvalidUUIDFormatError = errors.New("invalid UUID format")
|
||||
|
||||
// this type will be deprecated because the ClickHouse server (>=1.1.54276) has a built-in type UUID
|
||||
type UUID string
|
||||
|
||||
func (str UUID) Value() (driver.Value, error) {
|
||||
return uuid2bytes(string(str))
|
||||
}
|
||||
|
||||
func (str UUID) MarshalBinary() ([]byte, error) {
|
||||
return uuid2bytes(string(str))
|
||||
}
|
||||
|
||||
func (str *UUID) Scan(v interface{}) error {
|
||||
var src []byte
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
src = []byte(v)
|
||||
case []byte:
|
||||
src = v
|
||||
}
|
||||
|
||||
if len(src) != 16 {
|
||||
return fmt.Errorf("invalid UUID length: %d", len(src))
|
||||
}
|
||||
|
||||
var uuid [36]byte
|
||||
{
|
||||
hex.Encode(uuid[:], src[:4])
|
||||
uuid[8] = '-'
|
||||
hex.Encode(uuid[9:13], src[4:6])
|
||||
uuid[13] = '-'
|
||||
hex.Encode(uuid[14:18], src[6:8])
|
||||
uuid[18] = '-'
|
||||
hex.Encode(uuid[19:23], src[8:10])
|
||||
uuid[23] = '-'
|
||||
hex.Encode(uuid[24:], src[10:])
|
||||
}
|
||||
*str = UUID(uuid[:])
|
||||
return nil
|
||||
}
|
||||
|
||||
func uuid2bytes(str string) ([]byte, error) {
|
||||
var uuid [16]byte
|
||||
if str[8] != '-' || str[13] != '-' || str[18] != '-' || str[23] != '-' {
|
||||
return nil, InvalidUUIDFormatError
|
||||
}
|
||||
for i, x := range [16]int{
|
||||
0, 2, 4, 6,
|
||||
9, 11, 14, 16,
|
||||
19, 21, 24, 26,
|
||||
28, 30, 32, 34,
|
||||
} {
|
||||
if v, ok := xtob(str[x], str[x+1]); !ok {
|
||||
return nil, InvalidUUIDFormatError
|
||||
} else {
|
||||
uuid[i] = v
|
||||
}
|
||||
}
|
||||
return uuid[:], nil
|
||||
}
|
||||
|
||||
// xvalues returns the value of a byte as a hexadecimal digit or 255.
|
||||
var xvalues = [256]byte{
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
|
||||
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
}
|
||||
|
||||
// xtob converts hex characters x1 and x2 into a byte.
|
||||
func xtob(x1, x2 byte) (byte, bool) {
|
||||
b1 := xvalues[x1]
|
||||
b2 := xvalues[x2]
|
||||
return (b1 << 4) | b2, b1 != 255 && b2 != 255
|
||||
}
|
||||
|
||||
var _ driver.Valuer = UUID("")
|
|
@ -1,113 +0,0 @@
|
|||
package writebuffer
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/leakypool"
|
||||
)
|
||||
|
||||
const InitialSize = 256 * 1024
|
||||
|
||||
func New(initSize int) *WriteBuffer {
|
||||
wb := &WriteBuffer{}
|
||||
wb.addChunk(0, initSize)
|
||||
return wb
|
||||
}
|
||||
|
||||
type WriteBuffer struct {
|
||||
chunks [][]byte
|
||||
}
|
||||
|
||||
func (wb *WriteBuffer) Write(data []byte) (int, error) {
|
||||
var (
|
||||
chunkIdx = len(wb.chunks) - 1
|
||||
dataSize = len(data)
|
||||
)
|
||||
for {
|
||||
freeSize := cap(wb.chunks[chunkIdx]) - len(wb.chunks[chunkIdx])
|
||||
if freeSize >= len(data) {
|
||||
wb.chunks[chunkIdx] = append(wb.chunks[chunkIdx], data...)
|
||||
return dataSize, nil
|
||||
}
|
||||
wb.chunks[chunkIdx] = append(wb.chunks[chunkIdx], data[:freeSize]...)
|
||||
data = data[freeSize:]
|
||||
wb.addChunk(0, wb.calcCap(len(data)))
|
||||
chunkIdx++
|
||||
}
|
||||
}
|
||||
|
||||
func (wb *WriteBuffer) WriteTo(w io.Writer) (int64, error) {
|
||||
var size int64
|
||||
for _, chunk := range wb.chunks {
|
||||
ln, err := w.Write(chunk)
|
||||
if err != nil {
|
||||
wb.Reset()
|
||||
return 0, err
|
||||
}
|
||||
size += int64(ln)
|
||||
}
|
||||
wb.Reset()
|
||||
return size, nil
|
||||
}
|
||||
|
||||
func (wb *WriteBuffer) Bytes() []byte {
|
||||
if len(wb.chunks) == 1 {
|
||||
return wb.chunks[0]
|
||||
}
|
||||
bytes := make([]byte, 0, wb.len())
|
||||
for _, chunk := range wb.chunks {
|
||||
bytes = append(bytes, chunk...)
|
||||
}
|
||||
return bytes
|
||||
}
|
||||
|
||||
func (wb *WriteBuffer) addChunk(size, capacity int) {
|
||||
chunk := leakypool.GetBytes(size, capacity)
|
||||
if cap(chunk) >= size {
|
||||
chunk = chunk[:size]
|
||||
}
|
||||
wb.chunks = append(wb.chunks, chunk)
|
||||
}
|
||||
|
||||
func (wb *WriteBuffer) len() int {
|
||||
var v int
|
||||
for _, chunk := range wb.chunks {
|
||||
v += len(chunk)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (wb *WriteBuffer) calcCap(dataSize int) int {
|
||||
dataSize = max(dataSize, 64)
|
||||
if len(wb.chunks) == 0 {
|
||||
return dataSize
|
||||
}
|
||||
// Always double the size of the last chunk
|
||||
return max(dataSize, cap(wb.chunks[len(wb.chunks)-1])*2)
|
||||
}
|
||||
|
||||
func (wb *WriteBuffer) Reset() {
|
||||
if len(wb.chunks) == 0 {
|
||||
return
|
||||
}
|
||||
// Recycle all chunks except the last one
|
||||
chunkSizeThreshold := cap(wb.chunks[0])
|
||||
for _, chunk := range wb.chunks[:len(wb.chunks)-1] {
|
||||
// Drain chunks smaller than the initial size
|
||||
if cap(chunk) >= chunkSizeThreshold {
|
||||
leakypool.PutBytes(chunk[:0])
|
||||
} else {
|
||||
chunkSizeThreshold = cap(chunk)
|
||||
}
|
||||
}
|
||||
// Keep the largest chunk
|
||||
wb.chunks[0] = wb.chunks[len(wb.chunks)-1][:0]
|
||||
wb.chunks = wb.chunks[:1]
|
||||
}
|
||||
|
||||
func max(a, b int) int {
|
||||
if b > a {
|
||||
return b
|
||||
}
|
||||
return a
|
||||
}
|
|
@ -1,283 +0,0 @@
|
|||
package clickhouse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type querySettingType int
|
||||
|
||||
// all possible query setting's data type
|
||||
const (
|
||||
uintQS querySettingType = iota + 1
|
||||
intQS
|
||||
boolQS
|
||||
timeQS
|
||||
)
|
||||
|
||||
// description of single query setting
|
||||
type querySettingInfo struct {
|
||||
name string
|
||||
qsType querySettingType
|
||||
}
|
||||
|
||||
// all possible query settings
|
||||
var querySettingList = []querySettingInfo{
|
||||
{"min_compress_block_size", uintQS},
|
||||
{"max_compress_block_size", uintQS},
|
||||
{"max_block_size", uintQS},
|
||||
{"max_insert_block_size", uintQS},
|
||||
{"min_insert_block_size_rows", uintQS},
|
||||
{"min_insert_block_size_bytes", uintQS},
|
||||
{"max_read_buffer_size", uintQS},
|
||||
{"max_distributed_connections", uintQS},
|
||||
{"max_query_size", uintQS},
|
||||
{"interactive_delay", uintQS},
|
||||
{"poll_interval", uintQS},
|
||||
{"distributed_connections_pool_size", uintQS},
|
||||
{"connections_with_failover_max_tries", uintQS},
|
||||
{"background_pool_size", uintQS},
|
||||
{"background_schedule_pool_size", uintQS},
|
||||
{"replication_alter_partitions_sync", uintQS},
|
||||
{"replication_alter_columns_timeout", uintQS},
|
||||
{"min_count_to_compile", uintQS},
|
||||
{"min_count_to_compile_expression", uintQS},
|
||||
{"group_by_two_level_threshold", uintQS},
|
||||
{"group_by_two_level_threshold_bytes", uintQS},
|
||||
{"aggregation_memory_efficient_merge_threads", uintQS},
|
||||
{"max_parallel_replicas", uintQS},
|
||||
{"parallel_replicas_count", uintQS},
|
||||
{"parallel_replica_offset", uintQS},
|
||||
{"merge_tree_min_rows_for_concurrent_read", uintQS},
|
||||
{"merge_tree_min_bytes_for_concurrent_read", uintQS},
|
||||
{"merge_tree_min_rows_for_seek", uintQS},
|
||||
{"merge_tree_min_bytes_for_seek", uintQS},
|
||||
{"merge_tree_coarse_index_granularity", uintQS},
|
||||
{"merge_tree_max_rows_to_use_cache", uintQS},
|
||||
{"merge_tree_max_bytes_to_use_cache", uintQS},
|
||||
{"mysql_max_rows_to_insert", uintQS},
|
||||
{"optimize_min_equality_disjunction_chain_length", uintQS},
|
||||
{"min_bytes_to_use_direct_io", uintQS},
|
||||
{"mark_cache_min_lifetime", uintQS},
|
||||
{"priority", uintQS},
|
||||
{"log_queries_cut_to_length", uintQS},
|
||||
{"max_concurrent_queries_for_user", uintQS},
|
||||
{"insert_quorum", uintQS},
|
||||
{"select_sequential_consistency", uintQS},
|
||||
{"table_function_remote_max_addresses", uintQS},
|
||||
{"read_backoff_max_throughput", uintQS},
|
||||
{"read_backoff_min_events", uintQS},
|
||||
{"output_format_pretty_max_rows", uintQS},
|
||||
{"output_format_pretty_max_column_pad_width", uintQS},
|
||||
{"output_format_parquet_row_group_size", uintQS},
|
||||
{"http_headers_progress_interval_ms", uintQS},
|
||||
{"input_format_allow_errors_num", uintQS},
|
||||
{"preferred_block_size_bytes", uintQS},
|
||||
{"max_replica_delay_for_distributed_queries", uintQS},
|
||||
{"preferred_max_column_in_block_size_bytes", uintQS},
|
||||
{"insert_distributed_timeout", uintQS},
|
||||
{"odbc_max_field_size", uintQS},
|
||||
{"max_rows_to_read", uintQS},
|
||||
{"max_bytes_to_read", uintQS},
|
||||
{"max_rows_to_group_by", uintQS},
|
||||
{"max_bytes_before_external_group_by", uintQS},
|
||||
{"max_rows_to_sort", uintQS},
|
||||
{"max_bytes_to_sort", uintQS},
|
||||
{"max_bytes_before_external_sort", uintQS},
|
||||
{"max_bytes_before_remerge_sort", uintQS},
|
||||
{"max_result_rows", uintQS},
|
||||
{"max_result_bytes", uintQS},
|
||||
{"min_execution_speed", uintQS},
|
||||
{"max_execution_speed", uintQS},
|
||||
{"min_execution_speed_bytes", uintQS},
|
||||
{"max_execution_speed_bytes", uintQS},
|
||||
{"max_columns_to_read", uintQS},
|
||||
{"max_temporary_columns", uintQS},
|
||||
{"max_temporary_non_const_columns", uintQS},
|
||||
{"max_subquery_depth", uintQS},
|
||||
{"max_pipeline_depth", uintQS},
|
||||
{"max_ast_depth", uintQS},
|
||||
{"max_ast_elements", uintQS},
|
||||
{"max_expanded_ast_elements", uintQS},
|
||||
{"readonly", uintQS},
|
||||
{"max_rows_in_set", uintQS},
|
||||
{"max_bytes_in_set", uintQS},
|
||||
{"max_rows_in_join", uintQS},
|
||||
{"max_bytes_in_join", uintQS},
|
||||
{"max_rows_to_transfer", uintQS},
|
||||
{"max_bytes_to_transfer", uintQS},
|
||||
{"max_rows_in_distinct", uintQS},
|
||||
{"max_bytes_in_distinct", uintQS},
|
||||
{"max_memory_usage", uintQS},
|
||||
{"max_memory_usage_for_user", uintQS},
|
||||
{"max_memory_usage_for_all_queries", uintQS},
|
||||
{"max_network_bandwidth", uintQS},
|
||||
{"max_network_bytes", uintQS},
|
||||
{"max_network_bandwidth_for_user", uintQS},
|
||||
{"max_network_bandwidth_for_all_users", uintQS},
|
||||
{"low_cardinality_max_dictionary_size", uintQS},
|
||||
{"max_fetch_partition_retries_count", uintQS},
|
||||
{"http_max_multipart_form_data_size", uintQS},
|
||||
{"max_partitions_per_insert_block", uintQS},
|
||||
{"max_threads", uintQS},
|
||||
|
||||
{"network_zstd_compression_level", intQS},
|
||||
{"http_zlib_compression_level", intQS},
|
||||
{"distributed_ddl_task_timeout", intQS},
|
||||
|
||||
{"extremes", boolQS},
|
||||
{"use_uncompressed_cache", boolQS},
|
||||
{"replace_running_query", boolQS},
|
||||
{"distributed_directory_monitor_batch_inserts", boolQS},
|
||||
{"optimize_move_to_prewhere", boolQS},
|
||||
{"compile", boolQS},
|
||||
{"allow_suspicious_low_cardinality_types", boolQS},
|
||||
{"compile_expressions", boolQS},
|
||||
{"distributed_aggregation_memory_efficient", boolQS},
|
||||
{"skip_unavailable_shards", boolQS},
|
||||
{"distributed_group_by_no_merge", boolQS},
|
||||
{"optimize_skip_unused_shards", boolQS},
|
||||
{"merge_tree_uniform_read_distribution", boolQS},
|
||||
{"force_index_by_date", boolQS},
|
||||
{"force_primary_key", boolQS},
|
||||
{"log_queries", boolQS},
|
||||
{"insert_deduplicate", boolQS},
|
||||
{"enable_http_compression", boolQS},
|
||||
{"http_native_compression_disable_checksumming_on_decompress", boolQS},
|
||||
{"output_format_write_statistics", boolQS},
|
||||
{"add_http_cors_header", boolQS},
|
||||
{"input_format_skip_unknown_fields", boolQS},
|
||||
{"input_format_with_names_use_header", boolQS},
|
||||
{"input_format_import_nested_json", boolQS},
|
||||
{"input_format_defaults_for_omitted_fields", boolQS},
|
||||
{"input_format_values_interpret_expressions", boolQS},
|
||||
{"output_format_json_quote_64bit_integers", boolQS},
|
||||
{"output_format_json_quote_denormals", boolQS},
|
||||
{"output_format_json_escape_forward_slashes", boolQS},
|
||||
{"output_format_pretty_color", boolQS},
|
||||
{"use_client_time_zone", boolQS},
|
||||
{"send_progress_in_http_headers", boolQS},
|
||||
{"fsync_metadata", boolQS},
|
||||
{"join_use_nulls", boolQS},
|
||||
{"fallback_to_stale_replicas_for_distributed_queries", boolQS},
|
||||
{"insert_distributed_sync", boolQS},
|
||||
{"insert_allow_materialized_columns", boolQS},
|
||||
{"optimize_throw_if_noop", boolQS},
|
||||
{"use_index_for_in_with_subqueries", boolQS},
|
||||
{"empty_result_for_aggregation_by_empty_set", boolQS},
|
||||
{"allow_distributed_ddl", boolQS},
|
||||
{"join_any_take_last_row", boolQS},
|
||||
{"format_csv_allow_single_quotes", boolQS},
|
||||
{"format_csv_allow_double_quotes", boolQS},
|
||||
{"log_profile_events", boolQS},
|
||||
{"log_query_settings", boolQS},
|
||||
{"log_query_threads", boolQS},
|
||||
{"enable_optimize_predicate_expression", boolQS},
|
||||
{"low_cardinality_use_single_dictionary_for_part", boolQS},
|
||||
{"decimal_check_overflow", boolQS},
|
||||
{"prefer_localhost_replica", boolQS},
|
||||
//{"asterisk_left_columns_only", boolQS},
|
||||
{"calculate_text_stack_trace", boolQS},
|
||||
{"allow_ddl", boolQS},
|
||||
{"parallel_view_processing", boolQS},
|
||||
{"enable_debug_queries", boolQS},
|
||||
{"enable_unaligned_array_join", boolQS},
|
||||
{"low_cardinality_allow_in_native_format", boolQS},
|
||||
{"allow_experimental_multiple_joins_emulation", boolQS},
|
||||
{"allow_experimental_cross_to_join_conversion", boolQS},
|
||||
{"cancel_http_readonly_queries_on_client_close", boolQS},
|
||||
{"external_table_functions_use_nulls", boolQS},
|
||||
{"allow_experimental_data_skipping_indices", boolQS},
|
||||
{"allow_hyperscan", boolQS},
|
||||
{"allow_simdjson", boolQS},
|
||||
|
||||
{"connect_timeout", timeQS},
|
||||
{"connect_timeout_with_failover_ms", timeQS},
|
||||
{"receive_timeout", timeQS},
|
||||
{"send_timeout", timeQS},
|
||||
{"tcp_keep_alive_timeout", timeQS},
|
||||
{"queue_max_wait_ms", timeQS},
|
||||
{"distributed_directory_monitor_sleep_time_ms", timeQS},
|
||||
{"insert_quorum_timeout", timeQS},
|
||||
{"read_backoff_min_latency_ms", timeQS},
|
||||
{"read_backoff_min_interval_between_events_ms", timeQS},
|
||||
{"stream_flush_interval_ms", timeQS},
|
||||
{"stream_poll_timeout_ms", timeQS},
|
||||
{"http_connection_timeout", timeQS},
|
||||
{"http_send_timeout", timeQS},
|
||||
{"http_receive_timeout", timeQS},
|
||||
{"max_execution_time", timeQS},
|
||||
{"timeout_before_checking_execution_speed", timeQS},
|
||||
}
|
||||
|
||||
type querySettingValueEncoder func(enc *binary.Encoder) error
|
||||
|
||||
type querySettings struct {
|
||||
settings map[string]querySettingValueEncoder
|
||||
settingsStr string // used for debug output
|
||||
}
|
||||
|
||||
func makeQuerySettings(query url.Values) (*querySettings, error) {
|
||||
qs := &querySettings{
|
||||
settings: make(map[string]querySettingValueEncoder),
|
||||
settingsStr: "",
|
||||
}
|
||||
|
||||
for _, info := range querySettingList {
|
||||
valueStr := query.Get(info.name)
|
||||
if valueStr == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
switch info.qsType {
|
||||
case uintQS, intQS, timeQS:
|
||||
value, err := strconv.ParseUint(valueStr, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qs.settings[info.name] = func(enc *binary.Encoder) error { return enc.Uvarint(value) }
|
||||
|
||||
case boolQS:
|
||||
valueBool, err := strconv.ParseBool(valueStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
value := uint64(0)
|
||||
if valueBool {
|
||||
value = 1
|
||||
}
|
||||
qs.settings[info.name] = func(enc *binary.Encoder) error { return enc.Uvarint(value) }
|
||||
|
||||
default:
|
||||
err := fmt.Errorf("query setting %s has unsupported data type", info.name)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if qs.settingsStr != "" {
|
||||
qs.settingsStr += "&"
|
||||
}
|
||||
qs.settingsStr += info.name + "=" + valueStr
|
||||
}
|
||||
|
||||
return qs, nil
|
||||
}
|
||||
|
||||
func (qs *querySettings) IsEmpty() bool {
|
||||
return len(qs.settings) == 0
|
||||
}
|
||||
|
||||
func (qs *querySettings) Serialize(enc *binary.Encoder) error {
|
||||
for name, fn := range qs.settings {
|
||||
if err := enc.String(name); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := fn(enc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,8 +0,0 @@
|
|||
package clickhouse
|
||||
|
||||
import "errors"
|
||||
|
||||
type result struct{}
|
||||
|
||||
func (*result) LastInsertId() (int64, error) { return 0, errors.New("LastInsertId is not supported") }
|
||||
func (*result) RowsAffected() (int64, error) { return 0, errors.New("RowsAffected is not supported") }
|
|
@ -1,182 +0,0 @@
|
|||
package clickhouse
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/column"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/data"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/protocol"
|
||||
)
|
||||
|
||||
type rows struct {
|
||||
ch *clickhouse
|
||||
err error
|
||||
mutex sync.RWMutex
|
||||
finish func()
|
||||
offset int
|
||||
block *data.Block
|
||||
totals *data.Block
|
||||
extremes *data.Block
|
||||
stream chan *data.Block
|
||||
columns []string
|
||||
blockColumns []column.Column
|
||||
}
|
||||
|
||||
func (rows *rows) Columns() []string {
|
||||
return rows.columns
|
||||
}
|
||||
|
||||
func (rows *rows) ColumnTypeScanType(idx int) reflect.Type {
|
||||
return rows.blockColumns[idx].ScanType()
|
||||
}
|
||||
|
||||
func (rows *rows) ColumnTypeDatabaseTypeName(idx int) string {
|
||||
return rows.blockColumns[idx].CHType()
|
||||
}
|
||||
|
||||
func (rows *rows) Next(dest []driver.Value) error {
|
||||
if rows.block == nil || int(rows.block.NumRows) <= rows.offset {
|
||||
switch block, ok := <-rows.stream; true {
|
||||
case !ok:
|
||||
if err := rows.error(); err != nil {
|
||||
return err
|
||||
}
|
||||
return io.EOF
|
||||
default:
|
||||
rows.block = block
|
||||
rows.offset = 0
|
||||
}
|
||||
}
|
||||
for i := range dest {
|
||||
dest[i] = rows.block.Values[i][rows.offset]
|
||||
}
|
||||
rows.offset++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rows *rows) HasNextResultSet() bool {
|
||||
return rows.totals != nil || rows.extremes != nil
|
||||
}
|
||||
|
||||
func (rows *rows) NextResultSet() error {
|
||||
switch {
|
||||
case rows.totals != nil:
|
||||
rows.block = rows.totals
|
||||
rows.offset = 0
|
||||
rows.totals = nil
|
||||
case rows.extremes != nil:
|
||||
rows.block = rows.extremes
|
||||
rows.offset = 0
|
||||
rows.extremes = nil
|
||||
default:
|
||||
return io.EOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rows *rows) receiveData() error {
|
||||
defer close(rows.stream)
|
||||
var (
|
||||
err error
|
||||
packet uint64
|
||||
progress *progress
|
||||
profileInfo *profileInfo
|
||||
)
|
||||
for {
|
||||
if packet, err = rows.ch.decoder.Uvarint(); err != nil {
|
||||
return rows.setError(err)
|
||||
}
|
||||
switch packet {
|
||||
case protocol.ServerException:
|
||||
rows.ch.logf("[rows] <- exception")
|
||||
return rows.setError(rows.ch.exception())
|
||||
case protocol.ServerProgress:
|
||||
if progress, err = rows.ch.progress(); err != nil {
|
||||
return rows.setError(err)
|
||||
}
|
||||
rows.ch.logf("[rows] <- progress: rows=%d, bytes=%d, total rows=%d",
|
||||
progress.rows,
|
||||
progress.bytes,
|
||||
progress.totalRows,
|
||||
)
|
||||
case protocol.ServerProfileInfo:
|
||||
if profileInfo, err = rows.ch.profileInfo(); err != nil {
|
||||
return rows.setError(err)
|
||||
}
|
||||
rows.ch.logf("[rows] <- profiling: rows=%d, bytes=%d, blocks=%d", profileInfo.rows, profileInfo.bytes, profileInfo.blocks)
|
||||
case protocol.ServerData, protocol.ServerTotals, protocol.ServerExtremes:
|
||||
var (
|
||||
block *data.Block
|
||||
begin = time.Now()
|
||||
)
|
||||
if block, err = rows.ch.readBlock(); err != nil {
|
||||
return rows.setError(err)
|
||||
}
|
||||
rows.ch.logf("[rows] <- data: packet=%d, columns=%d, rows=%d, elapsed=%s", packet, block.NumColumns, block.NumRows, time.Since(begin))
|
||||
if block.NumRows == 0 {
|
||||
continue
|
||||
}
|
||||
switch packet {
|
||||
case protocol.ServerData:
|
||||
rows.stream <- block
|
||||
case protocol.ServerTotals:
|
||||
rows.totals = block
|
||||
case protocol.ServerExtremes:
|
||||
rows.extremes = block
|
||||
}
|
||||
case protocol.ServerEndOfStream:
|
||||
rows.ch.logf("[rows] <- end of stream")
|
||||
return nil
|
||||
default:
|
||||
rows.ch.conn.Close()
|
||||
rows.ch.logf("[rows] unexpected packet [%d]", packet)
|
||||
return rows.setError(fmt.Errorf("[rows] unexpected packet [%d] from server", packet))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rows *rows) Close() error {
|
||||
rows.ch.logf("[rows] close")
|
||||
rows.columns = nil
|
||||
for range rows.stream {
|
||||
}
|
||||
rows.finish()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rows *rows) error() error {
|
||||
rows.mutex.RLock()
|
||||
defer rows.mutex.RUnlock()
|
||||
return rows.err
|
||||
}
|
||||
|
||||
func (rows *rows) setError(err error) error {
|
||||
rows.mutex.Lock()
|
||||
rows.err = err
|
||||
rows.mutex.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
func (rows *rows) ColumnTypeNullable(idx int) (nullable, ok bool) {
|
||||
_, ok = rows.blockColumns[idx].(*column.Nullable)
|
||||
return ok, true
|
||||
}
|
||||
|
||||
func (rows *rows) ColumnTypePrecisionScale(idx int) (precision, scale int64, ok bool) {
|
||||
decimalVal, ok := rows.blockColumns[idx].(*column.Decimal)
|
||||
if !ok {
|
||||
if nullable, nullOk := rows.blockColumns[idx].(*column.Nullable); nullOk {
|
||||
decimalVal, ok = nullable.GetColumn().(*column.Decimal)
|
||||
}
|
||||
}
|
||||
if ok {
|
||||
return int64(decimalVal.GetPrecision()), int64(decimalVal.GetScale()), ok
|
||||
|
||||
}
|
||||
return 0, 0, false
|
||||
}
|
|
@ -1,185 +0,0 @@
|
|||
package clickhouse
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"database/sql/driver"
|
||||
"unicode"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/data"
|
||||
)
|
||||
|
||||
type stmt struct {
|
||||
ch *clickhouse
|
||||
query string
|
||||
counter int
|
||||
numInput int
|
||||
isInsert bool
|
||||
}
|
||||
|
||||
var emptyResult = &result{}
|
||||
|
||||
func (stmt *stmt) NumInput() int {
|
||||
switch {
|
||||
case stmt.ch.block != nil:
|
||||
return len(stmt.ch.block.Columns)
|
||||
case stmt.numInput < 0:
|
||||
return 0
|
||||
}
|
||||
return stmt.numInput
|
||||
}
|
||||
|
||||
func (stmt *stmt) Exec(args []driver.Value) (driver.Result, error) {
|
||||
return stmt.execContext(context.Background(), args)
|
||||
}
|
||||
|
||||
func (stmt *stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
|
||||
dargs := make([]driver.Value, len(args))
|
||||
for i, nv := range args {
|
||||
dargs[i] = nv.Value
|
||||
}
|
||||
return stmt.execContext(ctx, dargs)
|
||||
}
|
||||
|
||||
func (stmt *stmt) execContext(ctx context.Context, args []driver.Value) (driver.Result, error) {
|
||||
if stmt.isInsert {
|
||||
stmt.counter++
|
||||
if err := stmt.ch.block.AppendRow(args); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if (stmt.counter % stmt.ch.blockSize) == 0 {
|
||||
stmt.ch.logf("[exec] flush block")
|
||||
if err := stmt.ch.writeBlock(stmt.ch.block); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := stmt.ch.encoder.Flush(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return emptyResult, nil
|
||||
}
|
||||
if err := stmt.ch.sendQuery(stmt.bind(convertOldArgs(args))); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := stmt.ch.process(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return emptyResult, nil
|
||||
}
|
||||
|
||||
func (stmt *stmt) Query(args []driver.Value) (driver.Rows, error) {
|
||||
return stmt.queryContext(context.Background(), convertOldArgs(args))
|
||||
}
|
||||
|
||||
func (stmt *stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
|
||||
return stmt.queryContext(ctx, args)
|
||||
}
|
||||
|
||||
func (stmt *stmt) queryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
|
||||
finish := stmt.ch.watchCancel(ctx)
|
||||
if err := stmt.ch.sendQuery(stmt.bind(args)); err != nil {
|
||||
finish()
|
||||
return nil, err
|
||||
}
|
||||
meta, err := stmt.ch.readMeta()
|
||||
if err != nil {
|
||||
finish()
|
||||
return nil, err
|
||||
}
|
||||
rows := rows{
|
||||
ch: stmt.ch,
|
||||
finish: finish,
|
||||
stream: make(chan *data.Block, 50),
|
||||
columns: meta.ColumnNames(),
|
||||
blockColumns: meta.Columns,
|
||||
}
|
||||
go rows.receiveData()
|
||||
return &rows, nil
|
||||
}
|
||||
|
||||
func (stmt *stmt) Close() error {
|
||||
stmt.ch.logf("[stmt] close")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (stmt *stmt) bind(args []driver.NamedValue) string {
|
||||
var (
|
||||
buf bytes.Buffer
|
||||
index int
|
||||
keyword bool
|
||||
inBetween bool
|
||||
like = newMatcher("like")
|
||||
limit = newMatcher("limit")
|
||||
between = newMatcher("between")
|
||||
and = newMatcher("and")
|
||||
)
|
||||
switch {
|
||||
case stmt.NumInput() != 0:
|
||||
reader := bytes.NewReader([]byte(stmt.query))
|
||||
for {
|
||||
if char, _, err := reader.ReadRune(); err == nil {
|
||||
switch char {
|
||||
case '@':
|
||||
if param := paramParser(reader); len(param) != 0 {
|
||||
for _, v := range args {
|
||||
if len(v.Name) != 0 && v.Name == param {
|
||||
buf.WriteString(quote(v.Value))
|
||||
}
|
||||
}
|
||||
}
|
||||
case '?':
|
||||
if keyword && index < len(args) && len(args[index].Name) == 0 {
|
||||
buf.WriteString(quote(args[index].Value))
|
||||
index++
|
||||
} else {
|
||||
buf.WriteRune(char)
|
||||
}
|
||||
default:
|
||||
switch {
|
||||
case
|
||||
char == '=',
|
||||
char == '<',
|
||||
char == '>',
|
||||
char == '(',
|
||||
char == ',',
|
||||
char == '+',
|
||||
char == '-',
|
||||
char == '*',
|
||||
char == '/',
|
||||
char == '[':
|
||||
keyword = true
|
||||
default:
|
||||
if limit.matchRune(char) || like.matchRune(char) {
|
||||
keyword = true
|
||||
} else if between.matchRune(char) {
|
||||
keyword = true
|
||||
inBetween = true
|
||||
} else if inBetween && and.matchRune(char) {
|
||||
keyword = true
|
||||
inBetween = false
|
||||
} else {
|
||||
keyword = keyword && unicode.IsSpace(char)
|
||||
}
|
||||
}
|
||||
buf.WriteRune(char)
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
buf.WriteString(stmt.query)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func convertOldArgs(args []driver.Value) []driver.NamedValue {
|
||||
dargs := make([]driver.NamedValue, len(args))
|
||||
for i, v := range args {
|
||||
dargs[i] = driver.NamedValue{
|
||||
Ordinal: i + 1,
|
||||
Value: v,
|
||||
}
|
||||
}
|
||||
return dargs
|
||||
}
|
|
@ -1,45 +0,0 @@
|
|||
package clickhouse
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Based on the original implementation in the project go-sql-driver/mysql:
|
||||
// https://github.com/go-sql-driver/mysql/blob/master/utils.go
|
||||
|
||||
var (
|
||||
tlsConfigLock sync.RWMutex
|
||||
tlsConfigRegistry map[string]*tls.Config
|
||||
)
|
||||
|
||||
// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open.
|
||||
func RegisterTLSConfig(key string, config *tls.Config) error {
|
||||
tlsConfigLock.Lock()
|
||||
if tlsConfigRegistry == nil {
|
||||
tlsConfigRegistry = make(map[string]*tls.Config)
|
||||
}
|
||||
|
||||
tlsConfigRegistry[key] = config
|
||||
tlsConfigLock.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeregisterTLSConfig removes the tls.Config associated with key.
|
||||
func DeregisterTLSConfig(key string) {
|
||||
tlsConfigLock.Lock()
|
||||
if tlsConfigRegistry != nil {
|
||||
delete(tlsConfigRegistry, key)
|
||||
}
|
||||
tlsConfigLock.Unlock()
|
||||
}
|
||||
|
||||
func getTLSConfigClone(key string) (config *tls.Config) {
|
||||
tlsConfigLock.RLock()
|
||||
if v, ok := tlsConfigRegistry[key]; ok {
|
||||
config = v.Clone()
|
||||
}
|
||||
tlsConfigLock.RUnlock()
|
||||
return
|
||||
}
|
||||
|
|
@ -1,31 +0,0 @@
|
|||
package clickhouse
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// wordMatcher is a simple automata to match a single word (case insensitive)
|
||||
type wordMatcher struct {
|
||||
word []rune
|
||||
position uint8
|
||||
}
|
||||
|
||||
// newMatcher returns matcher for word needle
|
||||
func newMatcher(needle string) *wordMatcher {
|
||||
return &wordMatcher{word: []rune(strings.ToUpper(needle)),
|
||||
position: 0}
|
||||
}
|
||||
|
||||
func (m *wordMatcher) matchRune(r rune) bool {
|
||||
if m.word[m.position] == unicode.ToUpper(r) {
|
||||
if m.position == uint8(len(m.word)-1) {
|
||||
m.position = 0
|
||||
return true
|
||||
}
|
||||
m.position++
|
||||
} else {
|
||||
m.position = 0
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -1,54 +0,0 @@
|
|||
package clickhouse
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/data"
|
||||
)
|
||||
|
||||
// Interface for Clickhouse driver
|
||||
type Clickhouse interface {
|
||||
Block() (*data.Block, error)
|
||||
Prepare(query string) (driver.Stmt, error)
|
||||
Begin() (driver.Tx, error)
|
||||
Commit() error
|
||||
Rollback() error
|
||||
Close() error
|
||||
WriteBlock(block *data.Block) error
|
||||
}
|
||||
|
||||
// Interface for Block allowing writes to individual columns
|
||||
type ColumnWriter interface {
|
||||
WriteDate(c int, v time.Time) error
|
||||
WriteDateTime(c int, v time.Time) error
|
||||
WriteUInt8(c int, v uint8) error
|
||||
WriteUInt16(c int, v uint16) error
|
||||
WriteUInt32(c int, v uint32) error
|
||||
WriteUInt64(c int, v uint64) error
|
||||
WriteFloat32(c int, v float32) error
|
||||
WriteFloat64(c int, v float64) error
|
||||
WriteBytes(c int, v []byte) error
|
||||
WriteArray(c int, v interface{}) error
|
||||
WriteString(c int, v string) error
|
||||
WriteFixedString(c int, v []byte) error
|
||||
}
|
||||
|
||||
func OpenDirect(dsn string) (Clickhouse, error) {
|
||||
return open(dsn)
|
||||
}
|
||||
|
||||
func (ch *clickhouse) Block() (*data.Block, error) {
|
||||
if ch.block == nil {
|
||||
return nil, sql.ErrTxDone
|
||||
}
|
||||
return ch.block, nil
|
||||
}
|
||||
|
||||
func (ch *clickhouse) WriteBlock(block *data.Block) error {
|
||||
if block == nil {
|
||||
return sql.ErrTxDone
|
||||
}
|
||||
return ch.writeBlock(block)
|
||||
}
|
|
@ -1,22 +0,0 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
|
@ -1,27 +0,0 @@
|
|||
Copyright (c) 2013 CloudFlare, Inc.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
|
||||
* Neither the name of the CloudFlare, Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -1,14 +0,0 @@
|
|||
GCFLAGS :=
|
||||
LDFLAGS :=
|
||||
|
||||
.PHONY: install
|
||||
install:
|
||||
@go install -v .
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
@go test -gcflags='$(GCFLAGS)' -ldflags='$(LDFLAGS)' .
|
||||
|
||||
.PHONY: bench
|
||||
bench:
|
||||
@go test -gcflags='$(GCFLAGS)' -ldflags='$(LDFLAGS)' -bench .
|
|
@ -1,4 +0,0 @@
|
|||
golz4
|
||||
=====
|
||||
|
||||
Golang interface to LZ4 compression
|
|
@ -1,4 +0,0 @@
|
|||
// Package lz4 implements compression using lz4.c and lz4hc.c
|
||||
//
|
||||
// Copyright (c) 2013 CloudFlare, Inc.
|
||||
package lz4
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue