Skip to content

Commit 069f2c6

Browse files
authored
feat: add dockertest instance bootstrapper (#54)
* feat: add dockertest instance bootstrapper * fix: go mod tidy
1 parent 10d42d9 commit 069f2c6

File tree

13 files changed

+1307
-30
lines changed

13 files changed

+1307
-30
lines changed

.gitignore

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,4 +17,6 @@
1717
.idea
1818
.vscode
1919
expt/
20-
temp.env
20+
temp.env
21+
.DS_Store
22+
temp.env

db/db_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,8 @@ import (
1111

1212
"github.com/jmoiron/sqlx"
1313
"github.com/odpf/salt/db"
14-
"github.com/ory/dockertest"
15-
"github.com/ory/dockertest/docker"
14+
"github.com/ory/dockertest/v3"
15+
"github.com/ory/dockertest/v3/docker"
1616
"github.com/stretchr/testify/assert"
1717
)
1818

dockertest/README.md

Lines changed: 76 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,76 @@
1+
# dockertest
2+
3+
This package is an abstraction of several dockerized data storages using `ory/dockertest` to bootstrap a specific dockerized instance.
4+
5+
Example postgres
6+
7+
```go
8+
// create postgres instance
9+
pgDocker, err := dockertest.CreatePostgres(
10+
dockertest.PostgresWithDetail(
11+
pgUser, pgPass, pgDBName,
12+
),
13+
)
14+
15+
// get connection string
16+
connString := pgDocker.GetExternalConnString()
17+
18+
// purge docker
19+
if err := pgDocker.GetPool().Purge(pgDocker.GetResouce()); err != nil {
20+
return fmt.Errorf("could not purge resource: %w", err)
21+
}
22+
```
23+
24+
Example spice db
25+
26+
- bootsrap spice db with postgres and wire them internally via network bridge
27+
28+
```go
29+
// create custom pool
30+
pool, err := dockertest.NewPool("")
31+
if err != nil {
32+
return nil, err
33+
}
34+
35+
// create a bridge network for testing
36+
network, err = pool.Client.CreateNetwork(docker.CreateNetworkOptions{
37+
Name: fmt.Sprintf("bridge-%s", uuid.New().String()),
38+
})
39+
if err != nil {
40+
return nil, err
41+
}
42+
43+
44+
// create postgres instance
45+
pgDocker, err := dockertest.CreatePostgres(
46+
dockertest.PostgresWithDockerPool(pool),
47+
dockertest.PostgresWithDockerNetwork(network),
48+
dockertest.PostgresWithDetail(
49+
pgUser, pgPass, pgDBName,
50+
),
51+
)
52+
53+
// get connection string
54+
connString := pgDocker.GetInternalConnString()
55+
56+
// create spice db instance
57+
spiceDocker, err := dockertest.CreateSpiceDB(connString,
58+
dockertest.SpiceDBWithDockerPool(pool),
59+
dockertest.SpiceDBWithDockerNetwork(network),
60+
)
61+
62+
if err := dockertest.MigrateSpiceDB(connString,
63+
dockertest.MigrateSpiceDBWithDockerPool(pool),
64+
dockertest.MigrateSpiceDBWithDockerNetwork(network),
65+
); err != nil {
66+
return err
67+
}
68+
69+
// purge docker resources
70+
if err := pool.Purge(spiceDocker.GetResouce()); err != nil {
71+
return fmt.Errorf("could not purge resource: %w", err)
72+
}
73+
if err := pool.Purge(pgDocker.GetResouce()); err != nil {
74+
return fmt.Errorf("could not purge resource: %w", err)
75+
}
76+
```
Lines changed: 121 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,121 @@
1+
# Configuration for running Cortex in single-process mode.
2+
# This configuration should not be used in production.
3+
# It is only for getting started and development.
4+
5+
# Disable the requirement that every request to Cortex has a
6+
# X-Scope-OrgID header. `fake` will be substituted in instead.
7+
auth_enabled: false
8+
9+
server:
10+
http_listen_port: 9009
11+
12+
# Configure the server to allow messages up to 100MB.
13+
grpc_server_max_recv_msg_size: 104857600
14+
grpc_server_max_send_msg_size: 104857600
15+
grpc_server_max_concurrent_streams: 1000
16+
17+
distributor:
18+
shard_by_all_labels: true
19+
pool:
20+
health_check_ingesters: true
21+
22+
ingester_client:
23+
grpc_client_config:
24+
# Configure the client to allow messages up to 100MB.
25+
max_recv_msg_size: 104857600
26+
max_send_msg_size: 104857600
27+
grpc_compression: gzip
28+
29+
ingester:
30+
# We want our ingesters to flush chunks at the same time to optimise
31+
# deduplication opportunities.
32+
spread_flushes: true
33+
chunk_age_jitter: 0
34+
35+
walconfig:
36+
wal_enabled: true
37+
recover_from_wal: true
38+
wal_dir: /tmp/cortex/wal
39+
40+
lifecycler:
41+
# The address to advertise for this ingester. Will be autodiscovered by
42+
# looking up address on eth0 or en0; can be specified if this fails.
43+
# address: 127.0.0.1
44+
45+
# We want to start immediately and flush on shutdown.
46+
join_after: 0
47+
min_ready_duration: 0s
48+
final_sleep: 0s
49+
num_tokens: 512
50+
tokens_file_path: /tmp/cortex/wal/tokens
51+
52+
# Use an in memory ring store, so we don't need to launch a Consul.
53+
ring:
54+
kvstore:
55+
store: inmemory
56+
replication_factor: 1
57+
58+
# Use local storage - BoltDB for the index, and the filesystem
59+
# for the chunks.
60+
schema:
61+
configs:
62+
- from: 2019-07-29
63+
store: boltdb
64+
object_store: filesystem
65+
schema: v10
66+
index:
67+
prefix: index_
68+
period: 1w
69+
70+
storage:
71+
boltdb:
72+
directory: /tmp/cortex/index
73+
74+
filesystem:
75+
directory: /tmp/cortex/chunks
76+
77+
delete_store:
78+
store: boltdb
79+
80+
purger:
81+
object_store_type: filesystem
82+
83+
frontend_worker:
84+
# Configure the frontend worker in the querier to match worker count
85+
# to max_concurrent on the queriers.
86+
match_max_concurrent: true
87+
88+
# Configure the ruler to scan the /tmp/cortex/rules directory for prometheus
89+
# rules: https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/#recording-rules
90+
ruler:
91+
enable_api: true
92+
enable_sharding: false
93+
# alertmanager_url: http://cortex-am:9009/api/prom/alertmanager/
94+
rule_path: /tmp/cortex/rules
95+
storage:
96+
type: s3
97+
s3:
98+
# endpoint: http://minio1:9000
99+
bucketnames: cortex
100+
secret_access_key: minio123
101+
access_key_id: minio
102+
s3forcepathstyle: true
103+
104+
alertmanager:
105+
enable_api: true
106+
sharding_enabled: false
107+
data_dir: data/
108+
external_url: /api/prom/alertmanager
109+
storage:
110+
type: s3
111+
s3:
112+
# endpoint: http://minio1:9000
113+
bucketnames: cortex
114+
secret_access_key: minio123
115+
access_key_id: minio
116+
s3forcepathstyle: true
117+
118+
alertmanager_storage:
119+
backend: local
120+
local:
121+
path: tmp/cortex/alertmanager

0 commit comments

Comments
 (0)