sentry迁移至k8s(20.8.0)

因业务需要,需搭建sentry前端监控,并且能承受2000qps的错误信息上报

之前已经再k8s里简单搭建过一套sentry(helm),但是版本2年前的9.1.2,太老了,所以要搭建新版本sentry(20.8.0(同事之前使用官方docker-compose启动的版版本,因为前端已经接了一点,就没用最新版本))

迁移大致规划:

image-20210510145031594

迁移大致经历:

先将官方docker-compose启动,再将容器迁移至k8s,所有数据都用数据卷迁移过去

开始以为简单将sentry官方提供的docker-compose中所有容器改为pod就好,然而当全部迁移完后前端上传错误接口出现CORS跨域问题(官方搭建并没用),后经多次尝试后将web,relay,api,nginx这四个容器放到一个pod就解决了,在中途还遇到relay程序无法多注册等问题

项目:

https://github.com/getsentry/onpremise 官方docker-compose部署文档

优化:

sentry.conf.py

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
urllib3.disable_warnings()

DEFAULT_KAFKA_OPTIONS = {
"bootstrap.servers": "kafka:9092",
#"bootstrap.servers": "kafka.kafka.svc.nbugs.local:9092",
"message.max.bytes": 50000000,
"socket.timeout.ms": 100000,
}


##############
# Web Server #
##############

SENTRY_WEB_HOST = "0.0.0.0"
SENTRY_WEB_PORT = 9000
SENTRY_WEB_OPTIONS = {
"http": "%s:%s" % (SENTRY_WEB_HOST, SENTRY_WEB_PORT),
"protocol": "uwsgi",
# This is needed in order to prevent https://git.io/fj7Lw
"uwsgi-socket": None,
"so-keepalive": True,
# Keep this between 15s-75s as that's what Relay supports
#"http-keepalive": 15,
"http-keepalive": 30,
"http-chunked-input": True,
# the number of web workers
"workers": 3,
"threads": 4,
"memory-report": False,
# Some stuff so uwsgi will cycle workers sensibly
#"max-requests": 100000,
"max-requests": 10000000,
#"max-requests-delta": 500,
"max-requests-delta": 50000,
#"max-worker-lifetime": 86400,
"max-worker-lifetime": 864000000,
# Duplicate options from sentry default just so we don't get
# bit by sentry changing a default value that we depend on.
"thunder-lock": True,
"log-x-forwarded-for": False,
"buffer-size": 32768,
"limit-post": 209715200,
"disable-logging": True,
"reload-on-rss": 600,
"ignore-sigpipe": True,
"ignore-write-errors": True,
"disable-write-exception": True,
}

####自定义变量
SENTRY_RELAY_OPEN_REGISTRATION = True #开启远程注册
SENTRY_DEFAULT_TIME_ZONE = 'CST' #时区
#TIME_ZONE = 'CST'


SENTRY_SOURCE_FETCH_SOCKET_TIMEOUT = 20
SENTRY_SOURCE_FETCH_TIMEOUT = 200
SYMBOLICATOR_POLL_TIMEOUT = 10
SYMBOLICATOR_PROCESS_EVENT_HARD_TIMEOUT = 1200
SYMBOLICATOR_PROCESS_EVENT_WARN_TIMEOUT = 240
DATA_UPLOAD_MAX_NUMBER_FIELDS = 10000
SENTRY_DEFAULT_MAX_EVENTS_PER_MINUTE = "100%"
SENTRY_MAX_AVATAR_SIZE = 50000000
SENTRY_MAX_DICTIONARY_ITEMS = 100
SENTRY_MAX_HTTP_BODY_SIZE = 1638400
SENTRY_MAX_MESSAGE_LENGTH = 819200
SENTRY_MAX_VARIABLE_SIZE = 51200

代码修改:

realy:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
cat /docker-entrypoint.sh 
#!/usr/bin/env bash
set -e
#sed -i "s/111111111111/$HOSTNAME/" /work/.relay/credentials.json
sed -i "s/MY_POD_IP/$MY_POD_IP/" .relay/config.yml #注册相关
# Enable core dumps. Requires privileged mode.
if [[ "${RELAY_ENABLE_COREDUMPS:-}" == "1" ]]; then
mkdir -p /var/dumps
chmod a+rwx /var/dumps
echo '/var/dumps/core.%h.%e.%t' > /proc/sys/kernel/core_pattern
ulimit -c unlimited
fi

# For compatibility with older images
if [ "$1" == "bash" ]; then
set -- bash
elif [ "$(id -u)" == "0" ]; then
set -- gosu relay /bin/relay "$@"
else
set -- /bin/relay "$@"
fi

exec "$@"

config.yml
relay:
upstream: "http://MY_POD_IP:9000/"
host: 0.0.0.0
port: 3000
logging:
level: DEBUG
processing:
enabled: true
kafka_config:
- {name: "bootstrap.servers", value: "kafka:9092"}
- {name: "message.max.bytes", value: 5000000000}
redis: redis://redis:6379


cat credentials.json
{"secret_key":"T54-TgzbtoLO02iCxysVNno5hORLARnRbYCjuvueWHg","public_key":"GegncsiiQXC2VEdo9mFX695CQ-T0-mCMfpXunN2kOaQ","id":"a1dc0a56-3342-4f73-874d-111111111112"}

worker

1
2
3
4
errormapping.py
REACT_MAPPING_URL = (
"https://raw.githubusercontent.com/facebook/" "react/main/scripts/error-codes/codes.json"
)

post-process-forwarder 这个服务是个单点

分析docker-compose文件:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
version: '3.4'
x-restart-policy: &restart_policy #这是定义一个模板让下面services使用
restart: unless-stopped
x-sentry-defaults: &sentry_defaults
<< : *restart_policy
build:
context: ./sentry
args:
- SENTRY_IMAGE
image: sentry-onpremise-local
depends_on:
- redis
- postgres
- memcached
- smtp
- snuba-api
- snuba-consumer
- snuba-outcomes-consumer
- snuba-sessions-consumer
- snuba-transactions-consumer
- snuba-replacer
- symbolicator
- kafka
environment:
SENTRY_CONF: '/etc/sentry'
SNUBA: 'http://snuba-api:1218'
volumes:
- 'sentry-data:/data'
- './sentry:/etc/sentry'
x-snuba-defaults: &snuba_defaults
<< : *restart_policy
depends_on:
- redis
- clickhouse
- kafka
image: '$SNUBA_IMAGE'
environment:
SNUBA_SETTINGS: docker
CLICKHOUSE_HOST: clickhouse
DEFAULT_BROKERS: 'kafka:9092'
REDIS_HOST: redis
UWSGI_MAX_REQUESTS: '10000'
UWSGI_DISABLE_LOGGING: 'true'
services:
smtp:
<< : *restart_policy
image: tianon/exim4
volumes:
- 'sentry-smtp:/var/spool/exim4'
- 'sentry-smtp-log:/var/log/exim4'
memcached:
<< : *restart_policy
image: 'memcached:1.5-alpine'
redis:
<< : *restart_policy
image: 'redis:5.0-alpine'
volumes:
- 'sentry-redis:/data'
postgres:
<< : *restart_policy
image: 'postgres:9.6'
environment:
POSTGRES_HOST_AUTH_METHOD: 'trust'
volumes:
- 'sentry-postgres:/var/lib/postgresql/data'
zookeeper:
<< : *restart_policy
image: 'confluentinc/cp-zookeeper:5.5.0'
environment:
ZOOKEEPER_CLIENT_PORT: '2181'
CONFLUENT_SUPPORT_METRICS_ENABLE: 'false'
ZOOKEEPER_LOG4J_ROOT_LOGLEVEL: 'WARN'
ZOOKEEPER_TOOLS_LOG4J_LOGLEVEL: 'WARN'
volumes:
- 'sentry-zookeeper:/var/lib/zookeeper/data'
- 'sentry-zookeeper-log:/var/lib/zookeeper/log'
- 'sentry-secrets:/etc/zookeeper/secrets'
kafka:
<< : *restart_policy
depends_on:
- zookeeper
image: 'confluentinc/cp-kafka:5.5.0'
environment:
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka:9092'
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: '1'
KAFKA_MESSAGE_MAX_BYTES: '50000000' #50MB or bust
KAFKA_MAX_REQUEST_SIZE: '50000000' #50MB on requests apparently too
CONFLUENT_SUPPORT_METRICS_ENABLE: 'false'
KAFKA_LOG4J_LOGGERS: 'kafka.cluster=WARN,kafka.controller=WARN,kafka.coordinator=WARN,kafka.log=WARN,kafka.server=WARN,kafka.zookeeper=WARN,state.change.logger=WARN'
KAFKA_LOG4J_ROOT_LOGLEVEL: 'WARN'
KAFKA_TOOLS_LOG4J_LOGLEVEL: 'WARN'
volumes:
- 'sentry-kafka:/var/lib/kafka/data'
- 'sentry-kafka-log:/var/lib/kafka/log'
- 'sentry-secrets:/etc/kafka/secrets'
clickhouse:
<< : *restart_policy
image: 'yandex/clickhouse-server:19.17'
ulimits:
nofile:
soft: 262144
hard: 262144
volumes:
- 'sentry-clickhouse:/var/lib/clickhouse'
- 'sentry-clickhouse-log:/var/log/clickhouse-server'
snuba-api:
<< : *snuba_defaults
# Kafka consumer responsible for feeding events into Clickhouse
snuba-consumer:
<< : *snuba_defaults
command: consumer --storage events --auto-offset-reset=latest --max-batch-time-ms 750
# Kafka consumer responsible for feeding outcomes into Clickhouse
# Use --auto-offset-reset=earliest to recover up to 7 days of TSDB data
# since we did not do a proper migration
snuba-outcomes-consumer:
<< : *snuba_defaults
command: consumer --storage outcomes_raw --auto-offset-reset=earliest --max-batch-time-ms 750
# Kafka consumer responsible for feeding session data into Clickhouse
snuba-sessions-consumer:
<< : *snuba_defaults
command: consumer --storage sessions_raw --auto-offset-reset=latest --max-batch-time-ms 750
# Kafka consumer responsible for feeding transactions data into Clickhouse
snuba-transactions-consumer:
<< : *snuba_defaults
command: consumer --storage transactions --consumer-group transactions_group --auto-offset-reset=latest --max-batch-time-ms 750
snuba-replacer:
<< : *snuba_defaults
command: replacer --storage events --auto-offset-reset=latest --max-batch-size 3
snuba-cleanup:
<< : *snuba_defaults
image: snuba-cleanup-onpremise-local
build:
context: ./cron
args:
BASE_IMAGE: '$SNUBA_IMAGE'
command: '"*/5 * * * * gosu snuba snuba cleanup --dry-run False"'
symbolicator:
<< : *restart_policy
image: '$SYMBOLICATOR_IMAGE'
volumes:
- 'sentry-symbolicator:/data'
- type: bind
read_only: true
source: ./symbolicator
target: /etc/symbolicator
command: run -c /etc/symbolicator/config.yml
symbolicator-cleanup:
<< : *restart_policy
image: symbolicator-cleanup-onpremise-local
build:
context: ./cron
args:
BASE_IMAGE: '$SYMBOLICATOR_IMAGE'
command: '"55 23 * * * gosu symbolicator symbolicator cleanup"'
volumes:
- 'sentry-symbolicator:/data'
web:
<< : *sentry_defaults
cron:
<< : *sentry_defaults
command: run cron
worker:
<< : *sentry_defaults
command: run worker
ingest-consumer:
<< : *sentry_defaults
command: run ingest-consumer --all-consumer-types
post-process-forwarder:
<< : *sentry_defaults
# Increase `--commit-batch-size 1` below to deal with high-load environments.
command: run post-process-forwarder --commit-batch-size 1
sentry-cleanup:
<< : *sentry_defaults
image: sentry-cleanup-onpremise-local
build:
context: ./cron
args:
BASE_IMAGE: 'sentry-onpremise-local'
command: '"0 0 * * * gosu sentry sentry cleanup --days $SENTRY_EVENT_RETENTION_DAYS"'
nginx:
<< : *restart_policy
ports:
- '9000:80/tcp'
image: 'nginx:1.16'
volumes:
- type: bind
read_only: true
source: ./nginx
target: /etc/nginx
depends_on:
- web
- relay
relay:
<< : *restart_policy
image: '$RELAY_IMAGE'
volumes:
- type: bind
read_only: true
source: ./relay
target: /work/.relay
depends_on:
- kafka
- redis
volumes:
sentry-data:
external: true
sentry-postgres:
external: true
sentry-redis:
external: true
sentry-zookeeper:
external: true
sentry-kafka:
external: true
sentry-clickhouse:
external: true
sentry-symbolicator:
external: true
sentry-secrets:
sentry-smtp:
sentry-zookeeper-log:
sentry-kafka-log:
sentry-smtp-log:
sentry-clickhouse-log:

最终yaml文件

kafka

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
apiVersion: apps/v1
kind: Deployment
metadata:
name: nbugs-kafka
namespace: sentry
labels:
app: nbugs-kafka
spec:
selector:
matchLabels:
app: nbugs-kafka
replicas: 1
template:
metadata:
labels:
app: nbugs-kafka
spec:
containers:
- name: kafka
image: "confluentinc/cp-kafka:5.5.0"
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9092
volumeMounts:
- mountPath: /var/lib/kafka/data
name: data
- mountPath: /var/lib/kafka/log
name: log
- mountPath: /etc/kafka/secrets
name: secrets
env:
- name: KAFKA_ZOOKEEPER_CONNECT
value: 'zk-0.zk-hs.canal:2181,zk-1.zk-hs.canal:2181,zk-2.zk-hs.canal:2181'
- name: KAFKA_ADVERTISED_LISTENERS
value: 'PLAINTEXT://kafka.kafka.svc.nbugs.local:9092'
- name: KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR
value: '1'
- name: KAFKA_MESSAGE_MAX_BYTES
value: '50000000'
- name: KAFKA_MAX_REQUEST_SIZE
value: '50000000'
- name: CONFLUENT_SUPPORT_METRICS_ENABLE
value: 'false'
- name: KAFKA_LOG4J_LOGGERS
value: >-
kafka.cluster=INFO,kafka.controller=INFO,kafka.coordinator=INFO,kafka.log=INFO,kafka.server=INFO,kafka.zookeeper=INFO,state.change.logger=INFO
- name: KAFKA_LOG4J_ROOT_LOGLEVEL
value: INFO
- name: KAFKA_TOOLS_LOG4J_LOGLEVEL
value: INFO
resources:
limits:
cpu: 1000m
memory: 1024Mi
requests:
cpu: 100m
memory: 1024Mi
volumes:
- name: data
persistentVolumeClaim:
claimName: kafka-data
- name: log
persistentVolumeClaim:
claimName: kafka-log
- name: secrets
persistentVolumeClaim:
claimName: kafka-secrets


---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: kafka-data
namespace: sentry
spec:
storageClassName: ali-nas-subpath
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: kafka-log
namespace: sentry
spec:
storageClassName: ali-nas-subpath
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: kafka-secrets
namespace: sentry
spec:
storageClassName: ali-nas-subpath
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: Service
metadata:
name: kafka
namespace: sentry
spec:
ports:
- name: tcp
port: 9092
protocol: TCP
targetPort: 9092
selector:
app: nbugs-kafka
sessionAffinity: None
type: ClusterIP


---#topic
./bin/kafka-topics.sh --zookeeper 172.16.176.250:2181/kafka --alter --partitions 10 --topic cdc
./bin/kafka-topics.sh --zookeeper 172.16.176.250:2181/kafka --alter --replication-factor 1 --partitions 50 --topic __consumer_offsets
./bin/kafka-topics.sh --zookeeper 172.16.176.250:2181/kafka --alter --partitions 10 --topic errors-replacements
./bin/kafka-topics.sh --zookeeper 172.16.176.250:2181/kafka --alter --partitions 10 --topic event-replacements
./bin/kafka-topics.sh --zookeeper 172.16.176.250:2181/kafka --alter --partitions 10 --topic events
./bin/kafka-topics.sh --zookeeper 172.16.176.250:2181/kafka --alter --partitions 10 --topic ingest-attachments
./bin/kafka-topics.sh --zookeeper 172.16.176.250:2181/kafka --alter --partitions 10 --topic ingest-events
./bin/kafka-topics.sh --zookeeper 172.16.176.250:2181/kafka --alter --partitions 10 --topic ingest-sessions
./bin/kafka-topics.sh --zookeeper 172.16.176.250:2181/kafka --alter --partitions 10 --topic ingest-transactions
./bin/kafka-topics.sh --zookeeper 172.16.176.250:2181/kafka --alter --partitions 10 --topic outcomes
./bin/kafka-topics.sh --zookeeper 172.16.176.250:2181/kafka --alter --partitions 10 --topic snuba-commit-log

memcached

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
apiVersion: apps/v1
kind: Deployment
metadata:
name: nbugs-memcached
namespace: sentry
labels:
app: nbugs-memcached
spec:
selector:
matchLabels:
app: nbugs-memcached
replicas: 1
template:
metadata:
labels:
app: nbugs-memcached
spec:
containers:
- name: nbugs-memcached
image: "memcached:1.5-alpine"
imagePullPolicy: IfNotPresent
ports:
- containerPort: 11211
resources:
limits:
cpu: 1000m
memory: 300Mi
requests:
cpu: 100m
memory: 300Mi

---
apiVersion: v1
kind: Service
metadata:
name: memcached
namespace: sentry
spec:
ports:
- name: tcp
port: 11211
protocol: TCP
targetPort: 11211
clusterIP: None
selector:
app: nbugs-memcached
sessionAffinity: None
type: ClusterIP

redis

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
apiVersion: apps/v1
kind: Deployment
metadata:
name: nbugs-redis
namespace: sentry
labels:
app: nbugs-redis
spec:
selector:
matchLabels:
app: nbugs-redis
replicas: 1
template:
metadata:
labels:
app: nbugs-redis
spec:
containers:
- name: nbugs-redis
image: "redis:5.0-alpine"
imagePullPolicy: IfNotPresent
ports:
- containerPort: 6379
volumeMounts:
- mountPath: /data
name: data
resources:
limits:
cpu: 1000m
memory: 300Mi
requests:
cpu: 100m
memory: 300Mi
volumes:
- name: data
persistentVolumeClaim:
claimName: redis-data


---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: redis-data
namespace: sentry
spec:
storageClassName: ali-nas-subpath
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi


---
apiVersion: v1
kind: Service
metadata:
name: redis
namespace: sentry
spec:
ports:
- name: tcp
port: 6379
protocol: TCP
targetPort: 6379
selector:
app: nbugs-redis
clusterIP: None
sessionAffinity: None
type: ClusterIP

clickhouse

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
apiVersion: apps/v1
kind: Deployment
metadata:
name: clickhouse
namespace: sentry
labels:
app: clickhouse
spec:
selector:
matchLabels:
app: clickhouse
replicas: 1
template:
metadata:
labels:
app: clickhouse
spec:
containers:
- name: clickhouse
image: "yandex/clickhouse-server:19.17"
imagePullPolicy: IfNotPresent
command:
- /entrypoint.sh
ports:
- containerPort: 8123
- containerPort: 9000
- containerPort: 9009
volumeMounts:
- mountPath: /var/lib/clickhouse
name: data
- mountPath: /var/log/clickhouse-server
name: log
resources:
limits:
cpu: 1000m
memory: 512Mi
requests:
cpu: 100m
memory: 512Mi
volumes:
- name: data
persistentVolumeClaim:
claimName: clickhouse-data
- name: log
persistentVolumeClaim:
claimName: clickhouse-log


---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: clickhouse-data
namespace: sentry
spec:
storageClassName: ali-nas-subpath
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi

---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: clickhouse-log
namespace: sentry
spec:
storageClassName: ali-nas-subpath
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: Service
metadata:
name: clickhouse
namespace: sentry
spec:
ports:
- name: p1
port: 8123
protocol: TCP
targetPort: 8123
- name: p2
port: 9000
protocol: TCP
targetPort: 9000
- name: p3
port: 9009
protocol: TCP
targetPort: 9009
clusterIP: None
selector:
app: clickhouse
sessionAffinity: None
type: ClusterIP

cron

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
apiVersion: apps/v1
kind: Deployment
metadata:
name: cron
namespace: sentry
labels:
app: cron
spec:
selector:
matchLabels:
app: cron
replicas: 1
template:
metadata:
labels:
app: cron
spec:
containers:
- name: cron
image: "registry.cn-hangzhou.aliyuncs.com/nbugs-share/sentry:sentry-onpremise-local"
imagePullPolicy: IfNotPresent
command:
- "/bin/sh"
args:
- "-c"
- exec /docker-entrypoint.sh $0 $@
- run
- cron
ports:
- containerPort: 9000
env:
- name: SENTRY_CONF
value: '/etc/sentry'
- name: SNUBA
value: 'http://snuba-api:1218'
volumeMounts:
- mountPath: /data
name: data
- mountPath: /etc/sentry
name: config
resources:
limits:
cpu: 100m
memory: 300Mi
requests:
cpu: 100m
memory: 300Mi
volumes:
- name: data
persistentVolumeClaim:
claimName: sentry-data
- name: config
persistentVolumeClaim:
claimName: sentry-conf

---
apiVersion: v1
kind: Service
metadata:
name: cron
namespace: sentry
spec:
ports:
- name: tcp
port: 9000
protocol: TCP
targetPort: 9000
selector:
app: cron
type: ClusterIP

ingest-consumer

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
apiVersion: apps/v1
kind: Deployment
metadata:
name: ingest-consumer
namespace: sentry
labels:
app: ingest-consumer
spec:
selector:
matchLabels:
app: ingest-consumer
replicas: 1
template:
metadata:
labels:
app: ingest-consumer
spec:
containers:
- name: ingest-consumer
image: "registry.cn-hangzhou.aliyuncs.com/nbugs-share/sentry:sentry-onpremise-local"
imagePullPolicy: IfNotPresent
command:
- "/bin/sh"
args:
- "-c"
- exec /docker-entrypoint.sh $0 $@
- run
- ingest-consumer
- --all-consumer-types
ports:
- containerPort: 9000
env:
- name: SENTRY_CONF
value: '/etc/sentry'
- name: SNUBA
value: 'http://snuba-api:1218'
volumeMounts:
- mountPath: /data
name: data
- mountPath: /etc/sentry
name: config
resources:
limits:
cpu: 500m
memory: 300Mi
requests:
cpu: 500m
memory: 300Mi
volumes:
- name: data
persistentVolumeClaim:
claimName: sentry-data
- name: config
persistentVolumeClaim:
claimName: sentry-conf

---
apiVersion: v1
kind: Service
metadata:
name: ingest-consumer
namespace: sentry
spec:
ports:
- name: tcp
port: 9000
protocol: TCP
targetPort: 9000
selector:
app: ingest-consumer
type: ClusterIP

post-process-forwarder

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
apiVersion: apps/v1
kind: Deployment
metadata:
name: post-process-forwarder
namespace: sentry
labels:
app: post-process-forwarder
spec:
selector:
matchLabels:
app: post-process-forwarder
replicas: 1
template:
metadata:
labels:
app: post-process-forwarder
spec:
containers:
- name: post-process-forwarder
image: "registry.cn-hangzhou.aliyuncs.com/nbugs-share/sentry:sentry-onpremise-local"
imagePullPolicy: IfNotPresent
command:
- "/bin/sh"
args:
- "-c"
- exec /docker-entrypoint.sh $0 $@
- run
- post-process-forwarder
- --commit-batch-size 1
ports:
- containerPort: 9000
env:
- name: SENTRY_CONF
value: '/etc/sentry'
- name: SNUBA
value: 'http://snuba-api:1218'
volumeMounts:
- mountPath: /data
name: data
- mountPath: /etc/sentry
name: config
resources:
limits:
cpu: 500m
memory: 300Mi
requests:
cpu: 500m
memory: 300Mi
volumes:
- name: data
persistentVolumeClaim:
claimName: sentry-data
- name: config
persistentVolumeClaim:
claimName: sentry-conf


---
apiVersion: v1
kind: Service
metadata:
name: ingest-consumer
namespace: sentry
spec:
ports:
- name: tcp
port: 9000
protocol: TCP
targetPort: 9000
selector:
app: post-process-forwarder
type: ClusterIP

sentry-cleanup

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
apiVersion: apps/v1
kind: Deployment
metadata:
name: sentry-cleanup
namespace: sentry
labels:
app: sentry-cleanup
spec:
selector:
matchLabels:
app: sentry-cleanup
replicas: 1
template:
metadata:
labels:
app: sentry-cleanup
spec:
containers:
- name: sentry-cleanup
image: "registry.cn-hangzhou.aliyuncs.com/nbugs-share/sentry:sentry-cleanup-onpremise-local"
imagePullPolicy: IfNotPresent
command:
- "/entrypoint.sh"
args:
- "0 0 * * * gosu sentry sentry cleanup --days $SENTRY_EVENT_RETENTION_DAYS"
ports:
- containerPort: 9000
env:
- name: SENTRY_CONF
value: '/etc/sentry'
- name: SNUBA
value: 'http://snuba-api:1218'
volumeMounts:
- mountPath: /data
name: data
- mountPath: /etc/sentry
name: config
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
volumes:
- name: data
persistentVolumeClaim:
claimName: sentry-data
- name: config
persistentVolumeClaim:
claimName: sentry-conf
---
apiVersion: v1
kind: Service
metadata:
name: sentry-cleanup
namespace: sentry
spec:
ports:
- name: tcp
port: 9000
protocol: TCP
targetPort: 9000
selector:
app: sentry-cleanup
type: ClusterIP

smtp

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
apiVersion: apps/v1
kind: Deployment
metadata:
name: smtp
namespace: sentry
labels:
app: smtp
spec:
selector:
matchLabels:
app: smtp
replicas: 1
template:
metadata:
labels:
app: smtp
spec:
containers:
- name: smtp
image: "tianon/exim4"
imagePullPolicy: IfNotPresent
ports:
- containerPort: 25
volumeMounts:
- mountPath: /var/spool/exim4
name: data
- mountPath: /var/log/exim4
name: log
resources:
limits:
cpu: 1000m
memory: 512Mi
requests:
cpu: 100m
memory: 512Mi
volumes:
- name: data
persistentVolumeClaim:
claimName: smtp-data
- name: log
persistentVolumeClaim:
claimName: smtp-log


---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: smtp-data
namespace: sentry
spec:
storageClassName: ali-nas-subpath
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi

---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: smtp-log
namespace: sentry
spec:
storageClassName: ali-nas-subpath
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: Service
metadata:
name: smtp
namespace: sentry
spec:
ports:
- name: tcp
port: 25
protocol: TCP
targetPort: 25
selector:
app: smtp
sessionAffinity: None
type: ClusterIP

snuba-cleanup

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
apiVersion: apps/v1
kind: Deployment
metadata:
name: snuba-cleanup
namespace: sentry
labels:
app: snuba-cleanup
spec:
selector:
matchLabels:
app: snuba-cleanup
replicas: 1
template:
metadata:
labels:
app: snuba-cleanup
spec:
containers:
- name: snuba-cleanup
image: "registry.cn-hangzhou.aliyuncs.com/nbugs-share/sentry:snuba-cleanup-onpremise-local"
imagePullPolicy: IfNotPresent
command:
- "/entrypoint.sh"
args:
- '*/5 * * * * gosu snuba snuba cleanup --dry-run False'
ports:
- containerPort: 1218
env:
- name: SNUBA_SETTINGS
value: docker
- name: CLICKHOUSE_HOST
value: clickhouse
- name: DEFAULT_BROKERS
valueFrom:
configMapKeyRef:
name: sentry-conf
key: kafka-host
- name: REDIS_HOST
value: redis
- name: UWSGI_MAX_REQUESTS
value: '10000'
- name: UWSGI_DISABLE_LOGGING
value: 'true'
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi

---
apiVersion: v1
kind: Service
metadata:
name: snuba-cleanup
namespace: sentry
spec:
ports:
- name: tcp
port: 1218
protocol: TCP
targetPort: 1218
selector:
app: snuba-cleanup
clusterIP: None
sessionAffinity: None
type: ClusterIP

snuba-consumer

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
apiVersion: apps/v1
kind: Deployment
metadata:
name: snuba-consumer
namespace: sentry
labels:
app: snuba-consumer
spec:
selector:
matchLabels:
app: snuba-consumer
replicas: 1
template:
metadata:
labels:
app: snuba-consumer
spec:
containers:
- name: snuba-consumer
image: "getsentry/snuba:20.8.0"
imagePullPolicy: IfNotPresent
command:
- /usr/src/snuba/docker_entrypoint.sh
- consumer
- '--storage'
- events
- '--auto-offset-reset=latest'
- '--max-batch-time-ms'
- '750'
ports:
- containerPort: 1218
env:
- name: SNUBA_SETTINGS
value: docker
- name: CLICKHOUSE_HOST
value: clickhouse
- name: DEFAULT_BROKERS
valueFrom:
configMapKeyRef:
name: sentry-conf
key: kafka-host
- name: REDIS_HOST
value: redis
- name: UWSGI_MAX_REQUESTS
value: '10000'
- name: UWSGI_DISABLE_LOGGING
value: 'true'
resources:
limits:
cpu: 1000m
memory: 512Mi
requests:
cpu: 100m
memory: 512Mi

---
apiVersion: v1
kind: Service
metadata:
name: snuba-consumer
namespace: sentry
spec:
ports:
- name: tcp
port: 1218
protocol: TCP
targetPort: 1218
clusterIP: None
selector:
app: snuba-consumer
sessionAffinity: None
type: ClusterIP

snuba-outcomes-consumer

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
apiVersion: apps/v1
kind: Deployment
metadata:
name: snuba-outcomes-consumer
namespace: sentry
labels:
app: snuba-outcomes-consumer
spec:
selector:
matchLabels:
app: snuba-outcomes-consumer
replicas: 1
template:
metadata:
labels:
app: snuba-outcomes-consumer
spec:
containers:
- name: snuba-outcomes-consumer
image: "getsentry/snuba:20.8.0"
imagePullPolicy: IfNotPresent
command:
- '/usr/src/snuba/docker_entrypoint.sh'
- consumer
- --storage
- outcomes_raw
- --auto-offset-reset=earliest
- --max-batch-time-ms
- '750'
ports:
- containerPort: 1218
env:
- name: SNUBA_SETTINGS
value: docker
- name: CLICKHOUSE_HOST
value: clickhouse
- name: DEFAULT_BROKERS
valueFrom:
configMapKeyRef:
name: sentry-conf
key: kafka-host
- name: REDIS_HOST
value: redis
- name: UWSGI_MAX_REQUESTS
value: '10000'
- name: UWSGI_DISABLE_LOGGING
value: 'true'
resources:
limits:
cpu: 1000m
memory: 512Mi
requests:
cpu: 100m
memory: 512Mi

---
apiVersion: v1
kind: Service
metadata:
name: snuba-outcomes-consumer
namespace: sentry
spec:
ports:
- name: tcp
port: 1218
protocol: TCP
targetPort: 1218
clusterIP: None
selector:
app: snuba-outcomes-consumer
sessionAffinity: None
type: ClusterIP

snuba-replacer

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
apiVersion: apps/v1
kind: Deployment
metadata:
name: snuba-replacer
namespace: sentry
labels:
app: snuba-replacer
spec:
selector:
matchLabels:
app: snuba-replacer
replicas: 1
template:
metadata:
labels:
app: snuba-replacer
spec:
containers:
- name: snuba-replacer
image: "getsentry/snuba:20.8.0"
imagePullPolicy: IfNotPresent
command:
- '/usr/src/snuba/docker_entrypoint.sh'
- replacer
- --storage
- events
- --auto-offset-reset=latest
- --max-batch-size
- '3'
ports:
- containerPort: 1218
env:
- name: SNUBA_SETTINGS
value: docker
- name: CLICKHOUSE_HOST
value: clickhouse
- name: DEFAULT_BROKERS
valueFrom:
configMapKeyRef:
name: sentry-conf
key: kafka-host
- name: REDIS_HOST
value: redis
- name: UWSGI_MAX_REQUESTS
value: '10000'
- name: UWSGI_DISABLE_LOGGING
value: 'true'
resources:
limits:
cpu: 1000m
memory: 512Mi
requests:
cpu: 100m
memory: 512Mi

---
apiVersion: v1
kind: Service
metadata:
name: snuba-replacer
namespace: sentry
spec:
ports:
- name: tcp
port: 1218
protocol: TCP
targetPort: 1218
selector:
app: snuba-replacer
sessionAffinity: None
type: ClusterIP

snuba-transactions-consumer

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
apiVersion: apps/v1
kind: Deployment
metadata:
name: snuba-transactions-consumer
namespace: sentry
labels:
app: snuba-transactions-consumer
spec:
selector:
matchLabels:
app: snuba-transactions-consumer
replicas: 1
template:
metadata:
labels:
app: snuba-transactions-consumer
spec:
containers:
- name: snuba-transactions-consumer
image: "getsentry/snuba:20.8.0"
imagePullPolicy: IfNotPresent
command:
- '/usr/src/snuba/docker_entrypoint.sh'
- consumer
- --storage
- transactions
- --consumer-group
- transactions_group
- --auto-offset-reset=latest
- --max-batch-time-ms
- '750'
ports:
- containerPort: 1218
env:
- name: SNUBA_SETTINGS
value: docker
- name: CLICKHOUSE_HOST
value: clickhouse
- name: DEFAULT_BROKERS
valueFrom:
configMapKeyRef:
name: sentry-conf
key: kafka-host
- name: REDIS_HOST
value: redis
- name: UWSGI_MAX_REQUESTS
value: '10000'
- name: UWSGI_DISABLE_LOGGING
value: 'true'
resources:
limits:
cpu: 1000m
memory: 512Mi
requests:
cpu: 100m
memory: 512Mi

---
apiVersion: v1
kind: Service
metadata:
name: snuba-transactions-consumer
namespace: sentry
spec:
ports:
- name: tcp
port: 1218
protocol: TCP
targetPort: 1218
selector:
app: snuba-transactions-consumer
sessionAffinity: None
type: ClusterIP

symbolicator-cleanup

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
apiVersion: apps/v1
kind: Deployment
metadata:
name: symbolicator-cleanup
namespace: sentry
labels:
app: symbolicator-cleanup
spec:
selector:
matchLabels:
app: symbolicator-cleanup
replicas: 1
template:
metadata:
labels:
app: symbolicator-cleanup
spec:
containers:
- name: symbolicator-cleanup
image: "registry.cn-hangzhou.aliyuncs.com/nbugs-share/sentry:symbolicator-cleanup-onpremise-local"
imagePullPolicy: IfNotPresent
command:
- "/entrypoint.sh"
args:
- '55 23 * * * gosu symbolicator symbolicator cleanup'
ports:
- containerPort: 3021
volumeMounts:
- mountPath: /data
name: data
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
volumes:
- name: data
persistentVolumeClaim:
claimName: symbolicator-data

---
apiVersion: v1
kind: Service
metadata:
name: symbolicator-cleanup
namespace: sentry
spec:
ports:
- name: tcp
port: 3021
protocol: TCP
targetPort: 3021
clusterIP: None
selector:
app: symbolicator-cleanup
sessionAffinity: None
type: ClusterIP

symbolicator

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
apiVersion: apps/v1
kind: Deployment
metadata:
name: symbolicator
namespace: sentry
labels:
app: symbolicator
spec:
selector:
matchLabels:
app: symbolicator
replicas: 1
template:
metadata:
labels:
app: symbolicator
spec:
containers:
- name: symbolicator
image: "getsentry/symbolicator:17269fd5432f3d8fcb738081fac640a45639fd54"
imagePullPolicy: IfNotPresent
command:
- "/bin/bash"
args:
- /docker-entrypoint.sh
- run
- -c
- /etc/symbolicator/config.yml
ports:
- containerPort: 3021
volumeMounts:
- mountPath: /data
name: data
- mountPath: /etc/symbolicator
name: conf
resources:
limits:
cpu: 1000m
memory: 300Mi
requests:
cpu: 100m
memory: 300Mi
volumes:
- name: data
persistentVolumeClaim:
claimName: symbolicator-data
- name: conf
persistentVolumeClaim:
claimName: symbolicator-conf
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: symbolicator-data
namespace: sentry
spec:
storageClassName: ali-nas-subpath
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: symbolicator-conf
namespace: sentry
spec:
storageClassName: ali-nas-subpath
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: Service
metadata:
name: symbolicator
namespace: sentry
spec:
ports:
- name: tcp
port: 3021
protocol: TCP
targetPort: 3021
selector:
app: symbolicator
sessionAffinity: None
type: ClusterIP

web

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
apiVersion: apps/v1
kind: Deployment
metadata:
name: web
namespace: sentry
labels:
app: web
spec:
selector:
matchLabels:
app: web
replicas: 1
template:
metadata:
labels:
app: web
spec:
containers:
- name: web
image: "registry.cn-hangzhou.aliyuncs.com/nbugs-share/sentry:sentry-onpremise-local"
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
- 'exec /docker-entrypoint.sh $0 $@'
args:
- run
- web
ports:
- containerPort: 9000
env:
- name: SENTRY_CONF
value: '/etc/sentry'
- name: SNUBA
value: 'http://snuba-api:1218'
volumeMounts:
- mountPath: /data
name: data
- mountPath: /etc/sentry
name: config
readinessProbe:
tcpSocket:
port: 9000
periodSeconds: 5
timeoutSeconds: 5
livenessProbe:
tcpSocket:
port: 9000
periodSeconds: 5
timeoutSeconds: 5
startupProbe:
tcpSocket:
port: 9000
initialDelaySeconds: 10
failureThreshold: 15
periodSeconds: 5
timeoutSeconds: 10

resources:
limits:
cpu: '2'
memory: 1024Mi
requests:
cpu: 10m
memory: 1024Mi
- name: relay
image: "registry-vpc.cn-hangzhou.aliyuncs.com/nbugs-share/sentry:relay-local-1"
imagePullPolicy: Always
command:
- "/bin/bash"
args:
- /docker-entrypoint.sh
- run
ports:
- containerPort: 3000
# volumeMounts:
# - mountPath: /work/.relay
# name: relay
resources:
limits:
cpu: 100m
memory: 300Mi
requests:
cpu: 10m
memory: 300Mi
- name: api
image: "getsentry/snuba:20.8.0"
imagePullPolicy: IfNotPresent
command:
- "/usr/src/snuba/docker_entrypoint.sh"
args:
- api
ports:
- containerPort: 1218
env:
- name: SNUBA_SETTINGS
value: docker
- name: CLICKHOUSE_HOST
value: clickhouse
- name: DEFAULT_BROKERS
valueFrom:
configMapKeyRef:
name: sentry-conf
key: kafka-host
- name: REDIS_HOST
value: redis
- name: UWSGI_MAX_REQUESTS
value: '10000'
- name: UWSGI_DISABLE_LOGGING
value: 'true'
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 10m
memory: 512Mi
- name: nginx
image: "nginx:1.16"
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
volumeMounts:
- mountPath: /etc/nginx
name: config-nginx
readOnly: true
resources:
limits:
cpu: 1000m
memory: 300Mi
requests:
cpu: 100m
memory: 300Mi



volumes:
- name: data
persistentVolumeClaim:
claimName: sentry-data
- name: config
persistentVolumeClaim:
claimName: sentry-conf
- name: config-nginx
persistentVolumeClaim:
claimName: nbugs-nginx

---
apiVersion: v1
kind: Service
metadata:
name: web
namespace: sentry
spec:
ports:
- name: tcp
port: 9000
protocol: TCP
targetPort: 9000
clusterIP: None
selector:
app: web
type: ClusterIP

worker

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
apiVersion: apps/v1
kind: Deployment
metadata:
name: worker
namespace: sentry
labels:
app: worker
spec:
selector:
matchLabels:
app: worker
replicas: 1
template:
metadata:
labels:
app: worker
spec:
containers:
- name: worker
image: "registry.cn-hangzhou.aliyuncs.com/nbugs-share/sentry:sentry-onpremise-local"
imagePullPolicy: IfNotPresent
command:
- "/bin/sh"
args:
- "-c"
- 'exec /docker-entrypoint.sh $0 $@'
- run
- worker
ports:
- containerPort: 9000
env:
- name: SENTRY_CONF
value: '/etc/sentry'
- name: SNUBA
value: 'http://snuba-api:1218'
volumeMounts:
- mountPath: /data
name: data
- mountPath: /etc/sentry
name: config
resources:
limits:
cpu: 1000m
memory: 1024Mi
requests:
cpu: 500m
memory: 1024Mi
readinessProbe:
tcpSocket:
port: 9000
periodSeconds: 5
timeoutSeconds: 5
livenessProbe:
tcpSocket:
port: 9000
periodSeconds: 5
timeoutSeconds: 5
startupProbe:
tcpSocket:
port: 9000
initialDelaySeconds: 10
failureThreshold: 15
periodSeconds: 5
timeoutSeconds: 10
volumes:
- name: data
persistentVolumeClaim:
claimName: sentry-data
- name: config
persistentVolumeClaim:
claimName: sentry-conf
---
apiVersion: v1
kind: Service
metadata:
name: worker
namespace: sentry
spec:
ports:
- name: tcp
port: 9000
protocol: TCP
targetPort: 9000
selector:
app: worker
type: ClusterIP