forked from toluaina/pgsync
-
Notifications
You must be signed in to change notification settings - Fork 0
/
.env.sample
105 lines (99 loc) · 3.28 KB
/
.env.sample
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
# Environment variable override
# PGSync
# path to the application schema config
# SCHEMA='/path/to/schema.json'
# number of records to fetch from db at a time
# QUERY_CHUNK_SIZE=10000
# poll db interval (consider reducing this duration to increase throughput)
# POLL_TIMEOUT=0.1
# replication slot cleanup interval (in secs)
# REPLICATION_SLOT_CLEANUP_INTERVAL=180
# checkpoint file path
# CHECKPOINT_PATH=./
# block size for parallel sync
# BLOCK_SIZE=2048*10
# QUERY_LITERAL_BINDS=False
# number of threads to spawn for poll db
# NTHREADS_POLLDB=1
# batch size for LOGICAL_SLOT_CHANGES for minimizing tmp file disk usage
# LOGICAL_SLOT_CHUNK_SIZE=5000
# USE_ASYNC=False
# JOIN_QUERIES=True
# STREAM_RESULTS=True
# Elasticsearch
# ELASTICSEARCH_SCHEME=http
# ELASTICSEARCH_HOST=localhost
# ELASTICSEARCH_PORT=9200
# ELASTICSEARCH_USER=nobody
# ELASTICSEARCH_PASSWORD=PLEASE_REPLACE_ME
# increase this if you are getting read request timeouts
# ELASTICSEARCH_TIMEOUT=10
# number of documents to index at a time
# ELASTICSEARCH_CHUNK_SIZE=2000
# the maximum size of the request in bytes (default: 100MB)
# ELASTICSEARCH_MAX_CHUNK_BYTES=104857600
# the size of the threadpool to use for the bulk requests
# ELASTICSEARCH_THREAD_COUNT=4
# the size of the task queue between the main thread
# (producing chunks to send) and the processing threads.
# ELASTICSEARCH_QUEUE_SIZE=4
# turn on SSL
# ELASTICSEARCH_USE_SSL=False
# don't show warnings about ssl certs verification
# ELASTICSEARCH_SSL_SHOW_WARN=False
# provide a path to CA certs on disk
# ELASTICSEARCH_CA_CERTS=/path/to/ca.cert
# PEM formatted SSL client certificate
# ELASTICSEARCH_CLIENT_CERT=/path/to/cert.pem
# PEM formatted SSL client key
# ELASTICSEARCH_CLIENT_KEY=/path/to/ssl.key
# ELASTICSEARCH_AWS_REGION=eu-west-1
# ELASTICSEARCH_AWS_HOSTED=True
# ELASTICSEARCH_STREAMING_BULK=False
# maximum number of times a document will be retried when ``429`` is received,
# set to 0 (default) for no retries on ``429``
# ELASTICSEARCH_MAX_RETRIES=0
# number of seconds we should wait before the first retry.
# Any subsequent retries will be powers of ``initial_backoff * 2**retry_number``
# ELASTICSEARCH_INITIAL_BACKOFF=2
# maximum number of seconds a retry will wait
# ELASTICSEARCH_MAX_BACKOFF=600
# if ``False`` then don't propagate exceptions from call to elasticsearch ``bulk``
# ELASTICSEARCH_RAISE_ON_EXCEPTION=True
# ELASTICSEARCH_RAISE_ON_ERROR=True
# ELASTICSEARCH_API_KEY_ID=PLEASE_REPLACE_ME
# ELASTICSEARCH_API_KEY=PLEASE_REPLACE_ME
# Postgres
# PG_HOST=localhost
# PG_USER=i-am-root
# PG_PORT=5432
# PG_PASSWORD=PLEASE_REPLACE_ME
# PG_SSLMODE=require
# PG_SSLROOTCERT=/path/to/ca.cert
# Redis
# REDIS_HOST=localhost
# REDIS_PORT=6379
# REDIS_DB=0
# REDIS_AUTH=PLEASE_REPLACE_ME
# number of items to read from Redis at a time
# REDIS_READ_CHUNK_SIZE=1000
# number of items to write from Redis at a time
# REDIS_WRITE_CHUNK_SIZE=1000
# redis socket connection timeout
# REDIS_SOCKET_TIMEOUT=5
# REDIS_POLL_INTERVAL=0.01
# REDIS_SCHEME=redis
# Logging
# CRITICAL - 50
# ERROR - 40
# WARNING - 30
# INFO - 20
# DEBUG - 10
CONSOLE_LOGGING_HANDLER_MIN_LEVEL=DEBUG
CUSTOM_LOGGING=elasticsearch=WARNING,pgsync=INFO
LOG_INTERVAL=0.5
# New Relic
# NEW_RELIC_ENVIRONMENT=development
# NEW_RELIC_APP_NAME=PGSync
# NEW_RELIC_LOG_LEVEL=critical
# NEW_RELIC_LICENSE_KEY=*********