提交 0b368282 authored 作者: AFA_周世超's avatar AFA_周世超

create init

上级 5851b739

要显示的修改太多。

为保证性能只显示 1000 of 1000+ 个文件。

差异被折叠。
VERSION
audit.log
FROM python:3.7.4-stretch
MAINTAINER afa@afa.com
WORKDIR /workspaces
COPY src/backend/requirements.txt /workspaces/
RUN pip3 install -r /workspaces/requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ \
&& rm -rf /root/.cache/pip /root/.python_history /root/.wget-hsts
COPY src/backend/common/upload/NotoSans-Bold.ttf /usr/share/fonts/
RUN sed -i s@/deb.debian.org/@/mirrors.163.com/@g /etc/apt/sources.list \
&& apt-get update \
&& apt-get -y install fontconfig
RUN cd /usr/share/fonts \
&& wget http://ftp.de.debian.org/debian/pool/contrib/m/msttcorefonts/ttf-mscorefonts-installer_3.7_all.deb \
&& apt install -y ./ttf-mscorefonts-installer_3.7_all.deb \
&& apt-mark hold ttf-mscorefonts-installer \
&& mkfontscale \
&& mkfontdir \
&& fc-cache \
&& apt-get clean
FROM 172.17.0.7:8443/powerkeeper/powerkeeper-base-backend
MAINTAINER afa@afa.com
WORKDIR /workspaces
COPY src/backend /workspaces/backend
RUN pip3 install -r /workspaces/backend/requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ && rm -rf /root/.cache/pip /root/.python_history /root/.wget-hsts
WORKDIR /workspaces/backend
COPY make/build/containers/celery/buildsite/docker-entrypoint.sh /workspaces/backend/
COPY make/build/containers/celery/buildsite/docker-flower-entrypoint.sh /workspaces/backend/
COPY make/build/containers/celery/buildsite/docker-worker-entrypoint.sh /workspaces/backend/
COPY make/build/containers/celery/buildsite/readiness.sh /workspaces/backend/
#ENTRYPOINT ["/bin/sh","-c","./docker-entrypoint.sh"]
#!/bin/sh
python celery_run.py -A etc.celery_production beat -l info -s "celerybeat-schedule" --loglevel=debug --logfile=../logs/celery.log --pidfile=
#while True:
#do
# sleep 100
#done
#!/bin/sh
python celery_run.py -A etc.celery_production control enable_events &
python celery_run.py -A etc.celery_production flower --port=5556
#while True:
#do
# sleep 100
#done
#!/bin/bash
sleep 10 #wait for celery-beat
# --max-tasks-per-child=50
celery -A etc.celery_production worker -l info -P eventlet -n worker.%h
#!/bin/bash
str1=$(celery -A etc.celery_production status)
str2="OK"
result=$(echo $str1 | grep "${str2}")
if [ "$result" != "" ];then
echo "celery status is OK"
else
echo "celery is dead!"
$(cat xxx)
fi
FROM python:2.7.18-stretch
MAINTAINER afa@afa.com
WORKDIR /opt/etl/datax_job
RUN cd /opt \
&& wget http://datax-opensource.oss-cn-hangzhou.aliyuncs.com/datax.tar.gz \
&& tar -zxvf datax.tar.gz \
&& rm -rf datax.tar.gz
RUN sed -i s@/deb.debian.org/@/mirrors.163.com/@g /etc/apt/sources.list \
&& apt-get update \
&& apt-get -y install openjdk-8-jdk \
&& apt-get clean
COPY src/etl /opt/etl
COPY src/etl/bin /usr/local/bin
COPY make/build/containers/etl/cyclejobs/buildsite/entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
ENV BASE_PATH "/opt"
ENTRYPOINT /entrypoint.sh
#!/bin/bash
set -e
path="/opt/etl/cycle_jobs"
if [[ ! -d $path ]];then
echo "####################$path is not exist#########"
exit 1
fi
noExecJob=$NOEXECJOB
execJob=$EXECJOB
files=`ls $path`
echo "datax job list: $files"
if [[ $files != "" ]]; then
current=`date "+%Y-%m-%d %H:%M:%S"`
timeStamp=`date -d "$current" +%s`
echo "##################recond start time: $timeStamp ####################"
echo $timeStamp>/starttime
echo "##################datax jobs####################"
if [[ $EXECJOB != "" ]]; then
echo "##################exec data job: $EXECJOB"
datax start $path"/"$EXECJOB
else
for file in $files
do
if [[ $noExecJob == $file ]]; then
echo "##############jump over $noExecJob#######"
continue
fi
echo "#################exec datax job: $file"
datax start $path"/"$file
done
fi
else
echo "###############no datax jobs####################"
exit 1
fi
FROM python:3.7.4-stretch AS build
MAINTAINER afa@afa.com
WORKDIR /opt/etl/datax_job
COPY src/etl /opt/etl
RUN pip3 install -r /opt/etl/requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ \
&& rm -rf /root/.cache/pip /root/.python_history /root/.wget-hsts
RUN export BASE_PATH="/opt" && /opt/etl/bin/init_jobs
FROM python:2.7.18-stretch
MAINTAINER afa@afa.com
WORKDIR /opt/etl/datax_job
RUN cd /opt \
&& wget http://datax-opensource.oss-cn-hangzhou.aliyuncs.com/datax.tar.gz \
&& tar -zxvf datax.tar.gz \
&& rm -rf datax.tar.gz
RUN sed -i s@/deb.debian.org/@/mirrors.163.com/@g /etc/apt/sources.list \
&& apt-get update \
&& apt-get -y install openjdk-8-jdk \
&& apt-get clean
COPY src/etl /opt/etl
COPY src/etl/bin /usr/local/bin
COPY --from=build /opt/etl/datax_job /opt/etl/datax_job
COPY make/build/containers/etl/datax/buildsite/entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
ENV BASE_PATH "/opt"
ENTRYPOINT /entrypoint.sh
#!/bin/bash
set -e
path="/opt/etl/datax_job/trans_jobs"
if [[ ! -d $path ]];then
echo "####################$path is not exist#########"
exit 1
fi
noExecJob=$NOEXECJOB
execJob=$EXECJOB
files=`ls $path`
echo "datax job list: $files"
if [[ $files != "" ]]; then
current=`date "+%Y-%m-%d %H:%M:%S"`
timeStamp=`date -d "$current" +%s`
echo "##################recond start time: $timeStamp ####################"
echo $timeStamp>/starttime
echo "##################datax jobs####################"
if [[ $EXECJOB != "" ]]; then
echo "##################exec data job: $EXECJOB"
datax start $path"/"$EXECJOB
else
for file in $files
do
if [[ $noExecJob == $file ]]; then
echo "##############jump over $noExecJob#######"
continue
fi
echo "#################exec datax job: $file"
datax start $path"/"$file
done
fi
else
echo "###############no datax jobs####################"
exit 1
fi
FROM python:3.7.4-stretch
MAINTAINER afa@afa.com
WORKDIR /opt
COPY src/backend/requirements.txt /opt/
RUN pip install -i https://mirrors.aliyun.com/pypi/simple/ -r /opt/requirements.txt
COPY src/backend/etl /opt/etl
COPY src/backend/etc /opt/etc
COPY src/backend/common /opt/common
ENV BASE_PATH "/opt"
FROM python:3.7.4-stretch
MAINTAINER afa@afa.com
WORKDIR /opt
RUN pip install mysql-connector-python==8.0.21 -i https://mirrors.aliyun.com/pypi/simple/ \
&& pip install SQLAlchemy==1.3.19 -i https://mirrors.aliyun.com/pypi/simple/ \
&& pip install addict==2.2.1 -i https://mirrors.aliyun.com/pypi/simple/
COPY src/backend/etl /opt/etl
COPY src/backend/etc /opt/etc
COPY src/backend/common /opt/common
ENV BASE_PATH "/opt"
# nginx Dockerfile
# Version 1.0
# author jinjie
# Base images 基础镜像
FROM ubuntu:18.04
MAINTAINER jinjie afa@afa.com
#安装相关依赖
RUN apt-get update && \
apt-get install -y libpcre3 libpcre3-dev && \
apt-get install -y zlib1g-dev && \
apt-get install -y build-essential libssl-dev && \
apt-get clean
#编译安装Nginx
RUN useradd -M -s /sbin/nologin nginx
#RUN mkdir -p /usr/local/nginx
ADD make/build/containers/fe/screen/baseimage/nginx-1.17.6.tar.gz /usr/local/nginx/
COPY make/build/containers/fe/screen/baseimage/nginx-upstream-dynamic-servers /usr/local/nginx/nginx-upstream-dynamic-servers
COPY make/build/containers/fe/screen/baseimage/ngx_upstream_jdomain /usr/local/nginx/ngx_upstream_jdomain
WORKDIR /usr/local/nginx
RUN cd /usr/local/nginx/nginx-1.17.6 \
&& ./configure --prefix=/etc/nginx --user=nginx --group=nginx \
--conf-path=/etc/nginx/nginx.conf \
--error-log-path=/var/log/nginx/error.log \
--http-log-path=/var/log/nginx/access.log \
--pid-path=/var/run/nginx.pid \
--lock-path=/var/run/nginx.lock \
--with-http_stub_status_module \
--with-http_ssl_module \
--with-http_sub_module \
--add-module=/usr/local/nginx/nginx-upstream-dynamic-servers \
--add-module=/usr/local/nginx/ngx_upstream_jdomain \
&& make && make install
#参数说明
#--prefix 用于指定nginx编译后的安装目录
#--add-module 为添加的第三方模块,此次添加了fdfs的nginx模块
#--with..._module 表示启用的nginx模块,如此处启用了http_ssl_module模块
RUN /etc/nginx/sbin/nginx -c /etc/nginx/nginx.conf && ln -s /usr/local/nginx/sbin/* /usr/local/sbin/
#CMD 运行以下命令
CMD ["/etc/nginx/sbin/nginx","-g","daemon off;"]
# nginx-upstream-dynamic-servers Change Log
## [0.4.0] - 2016-03-14
### Changed
- New API using the standard nginx `server` syntax (instead of `dynamic_server`) and a `resolve` parameter. (Thanks to @wandenberg)
- Improved memory usage. (Thanks to @wandenberg)
## [0.3.0] - 2016-03-07
### Added
- Compatibility with nginx 1.6 and 1.9. (Thanks to @wandenberg)
## [0.2.0] - 2016-03-02
### Added
- Compatibility with nginx 1.8 and 1.7. (Thanks to @wandenberg)
### Fixed
- Fix segfault during repeated nginx reloads. (Thanks to @wandenberg)
## 0.1.0 - 2014-11-29
### Added
- Initial release.
[0.4.0]: https://github.com/GUI/nginx-upstream-dynamic-servers/compare/v0.3.0...v0.4.0
[0.3.0]: https://github.com/GUI/nginx-upstream-dynamic-servers/compare/v0.2.0...v0.3.0
[0.2.0]: https://github.com/GUI/nginx-upstream-dynamic-servers/compare/v0.1.0...v0.2.0
Copyright (c) 2014 Nick Muerdter
MIT License
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
export PATH := $(PWD)/t/build/sbin:$(PWD)/t/build/bin:$(PATH)
export PERL5LIB := $(PWD)/t/build/lib/perl5
export UNBOUND_PID := $(PWD)/t/build/etc/unbound/unbound.pid
unbound_version=1.4.22
lua_jit_version=2.0.3
lua_nginx_module_version=0.9.13rc1
lua_upstream_nginx_module_version=0.02
nginx_version=1.7.7
nginx_no_pool_version?=$(nginx_version)
nginx_url=http://nginx.org/download/nginx-$(nginx_version).tar.gz
clean:
rm -rf t/build t/servroot t/tmp
prepare: t/build/lib/perl5 t/build/sbin/unbound t/build/sbin/nginx
test: prepare
echo "" > /tmp/nginx_upstream_dynamic_servers_unbound_active_test.conf
echo "" > /tmp/unbound.log
if [ -f $(UNBOUND_PID) ] && ps -p `cat $(UNBOUND_PID)` > /dev/null; then kill -QUIT `cat $(UNBOUND_PID)`; fi
sleep 0.2
env PATH=$(PATH) unbound -c $(PWD)/t/unbound/unbound.conf -vvv
env PATH=$(PATH) PERL5LIB=$(PERL5LIB) UNBOUND_PID=$(UNBOUND_PID) LD_LIBRARY_PATH=$(PWD)/t/build/lib:$(LD_LIBRARY_PATH) prove
STATUS=$$?
if [ -f $(UNBOUND_PID) ] && ps -p `cat $(UNBOUND_PID)` > /dev/null; then kill -QUIT `cat $(UNBOUND_PID)`; fi
exit $$STATUS
grind:
env TEST_NGINX_USE_VALGRIND=1 TEST_NGINX_SLEEP=5 $(MAKE) test
t/tmp:
mkdir -p $@
touch $@
t/tmp/cpanm: | t/tmp
curl -o $@ -L http://cpanmin.us
chmod +x $@
touch $@
t/build/lib/perl5: t/tmp/cpanm
$< -L t/build --notest LWP::Protocol::https
$< -L t/build --notest https://github.com/openresty/test-nginx/archive/8d5c8668364251cdae01ccf1ef933d80b642982d.tar.gz
touch $@
t/tmp/unbound-$(unbound_version).tar.gz: | t/tmp
curl -o $@ "http://unbound.net/downloads/unbound-$(unbound_version).tar.gz"
t/tmp/unbound-$(unbound_version): t/tmp/unbound-$(unbound_version).tar.gz
tar -C t/tmp -xf $<
touch $@
t/tmp/unbound-$(unbound_version)/Makefile: | t/tmp/unbound-$(unbound_version)
cd t/tmp/unbound-$(unbound_version) && ./configure --prefix=$(PWD)/t/build
touch $@
t/tmp/unbound-$(unbound_version)/unbound: t/tmp/unbound-$(unbound_version)/Makefile
cd t/tmp/unbound-$(unbound_version) && make
touch $@
t/build/sbin/unbound: t/tmp/unbound-$(unbound_version)/unbound
cd t/tmp/unbound-$(unbound_version) && make install
touch $@
t/tmp/LuaJIT-$(lua_jit_version).tar.gz: | t/tmp
curl -o $@ "http://luajit.org/download/LuaJIT-$(lua_jit_version).tar.gz"
t/tmp/LuaJIT-$(lua_jit_version): t/tmp/LuaJIT-$(lua_jit_version).tar.gz
tar -C t/tmp -xf $<
touch $@
t/tmp/LuaJIT-$(lua_jit_version)/src/luajit: | t/tmp/LuaJIT-$(lua_jit_version)
cd t/tmp/LuaJIT-$(lua_jit_version) && make PREFIX=$(PWD)/t/build
touch $@
t/build/bin/luajit: t/tmp/LuaJIT-$(lua_jit_version)/src/luajit
cd t/tmp/LuaJIT-$(lua_jit_version) && make install PREFIX=$(PWD)/t/build
touch $@
t/tmp/lua-nginx-module-$(lua_nginx_module_version).tar.gz: | t/tmp
curl -Lo $@ "https://github.com/openresty/lua-nginx-module/archive/v$(lua_nginx_module_version).tar.gz"
t/tmp/lua-nginx-module-$(lua_nginx_module_version): t/tmp/lua-nginx-module-$(lua_nginx_module_version).tar.gz
tar -C t/tmp -xf $<
touch $@
t/tmp/lua-upstream-nginx-module-$(lua_upstream_nginx_module_version).tar.gz: | t/tmp
curl -Lo $@ "https://github.com/openresty/lua-upstream-nginx-module/archive/v$(lua_upstream_nginx_module_version).tar.gz"
t/tmp/lua-upstream-nginx-module-$(lua_upstream_nginx_module_version): t/tmp/lua-upstream-nginx-module-$(lua_upstream_nginx_module_version).tar.gz
tar -C t/tmp -xf $<
touch $@
t/tmp/nginx-$(nginx_version).tar.gz: | t/tmp
curl -o $@ $(nginx_url)
t/tmp/nginx-$(nginx_version): t/tmp/nginx-$(nginx_version).tar.gz
tar -C t/tmp -xf $<
touch $@
t/tmp/nginx-$(nginx_no_pool_version)-no_pool.patch: | t/tmp
curl -o $@ https://raw.githubusercontent.com/openresty/no-pool-nginx/master/nginx-$(nginx_no_pool_version)-no_pool.patch
t/tmp/nginx-$(nginx_version)/.patches-applied: | t/tmp/nginx-$(nginx_version) t/tmp/nginx-$(nginx_no_pool_version)-no_pool.patch
cat t/tmp/nginx-$(nginx_no_pool_version)-no_pool.patch | sed "s,.*nginx_version.*, `cat t/tmp/nginx-$(nginx_version)/src/core/nginx.h | grep nginx_version`," | sed 's,"$(nginx_no_pool_version),"$(nginx_version),' | patch -d t/tmp/nginx-$(nginx_version) -p1 --quiet
touch $@
t/tmp/nginx-$(nginx_version)/Makefile: config | t/tmp/nginx-$(nginx_version) t/tmp/nginx-$(nginx_version)/.patches-applied t/build/bin/luajit t/tmp/lua-nginx-module-$(lua_nginx_module_version) t/tmp/lua-upstream-nginx-module-$(lua_upstream_nginx_module_version)
cd t/tmp/nginx-$(nginx_version) && env \
LUAJIT_LIB=$(PWD)/t/build/lib \
LUAJIT_INC=$(PWD)/t/build/include/luajit-2.0 \
./configure \
--prefix=$(PWD)/t/build \
--with-debug \
--with-ipv6 \
--add-module=$(PWD)/t/tmp/lua-nginx-module-$(lua_nginx_module_version) \
--add-module=$(PWD)/t/tmp/lua-upstream-nginx-module-$(lua_upstream_nginx_module_version) \
--add-module=$(PWD) \
--without-http_charset_module \
--without-http_userid_module \
--without-http_auth_basic_module \
--without-http_autoindex_module \
--without-http_geo_module \
--without-http_split_clients_module \
--without-http_referer_module \
--without-http_fastcgi_module \
--without-http_uwsgi_module \
--without-http_scgi_module \
--without-http_memcached_module \
--without-http_limit_conn_module \
--without-http_limit_req_module \
--without-http_empty_gif_module \
--without-http_browser_module \
--without-http_upstream_ip_hash_module
t/tmp/nginx-$(nginx_version)/objs/nginx: t/tmp/nginx-$(nginx_version)/Makefile *.c
cd t/tmp/nginx-$(nginx_version) && make
t/build/sbin/nginx: t/tmp/nginx-$(nginx_version)/objs/nginx
cd t/tmp/nginx-$(nginx_version) && make install
# nginx-upstream-dynamic-servers
An nginx module to resolve domain names inside upstreams and keep them up to date.
By default, servers defined in nginx upstreams are only resolved when nginx starts. This module provides an additional `resolve` parameter for `server` definitions that can be used to asynchronously resolve upstream domain names. This keeps the upstream definition up to date according to the DNS TTL of each domain names. This can be useful if you want to use upstreams for dynamic types of domain names that may frequently change IP addresses.
This module also allows nginx to start if an upstream contains a defunct domain name that no longer resolves. By default, nginx will fail to start if an upstream server contains an unresolvable domain name. With this module, nginx is still allowed to start with invalid domain names, but an error will be logged and the unresolvable domain names will be marked as down.
## Installation
```sh
./configure --add-module=/path/to/nginx-upstream-dynamic-servers
make && make install
```
## Usage
Use the `server` definition inside your upstreams and specify the `resolve` parameter.
*Note:* A `resolver` must be defined at the `http` level of nginx's config for `resolve` to work.
```
http {
resolver 8.8.8.8;
upstream example {
server example.com resolve;
}
}
```
## Directives
### server
**Syntax:** `server address [parameters];`
**Context** `upstream`
Defines a server for an upstream. The module adds the ability to specify a `resolve` parameter. When specified:
- Domain names will be resolved on an ongoing basis and kept up to date according to the TTL of each domain name.
- Unresolvable domain names are considered non-fatal errors (but logged). nginx is allowed to startup if a domain name can't be resolved, but the server is marked as down.
The following parameters can be used (see nginx's [server documentation](http://nginx.org/en/docs/http/ngx_http_upstream_module.html#server) for details):
`weight=number`
`max_fails=number`
`fail_timeout=time`
`backup`
`down`
`resolve`
# Compatibility
Tested with nginx 1.6, 1.7, 1.8, 1.9.
## Alternatives
- [proxy_pass + resolver](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass): If you only need to proxy to 1 domain and don't need the additional capabilities of upstreams, nginx's `proxy_pass` can perform resolving at run-time.
- [ngx_upstream_jdomain](http://wiki.nginx.org/HttpUpstreamJdomainModule): An nginx module that asyncronously resolves domain names. The primary differences between jdomain and this module is that this module keeps domain names up to date even if no server traffic is being generated (jdomain requires traffic to each upstream in order to keep it up to date). This module also allows nginx to startup if unresolvable domain names are given.
- [tengine's dynamic_resolve](https://github.com/alibaba/tengine/blob/master/docs/modules/ngx_http_upstream_dynamic.md): If you're using tengine (an nginx fork), there's a new feature (currently unreleased) to support resolving domain names in upstreams at run-time.
- [NGINX Plus](http://nginx.com/resources/admin-guide/load-balancer/#resolve)
## License
nginx-upstream-dynamic-servers is open sourced under the [MIT license](https://github.com/GUI/nginx-upstream-dynamic-servers/blob/master/LICENSE.txt).
dependencies:
pre:
- make prepare
# Install newer version of Valgrind from custom repo so Test::Nginx's
# num-callers works: https://github.com/openresty/test-nginx/pull/17
- sudo add-apt-repository -y ppa:jtaylor/jtaylor
- sudo apt-get update -q
- sudo apt-get -y install valgrind
cache_directories:
- "t/build"
- "t/tmp"
test:
override:
- make test
# - make grind
ngx_addon_name=ngx_http_upstream_dynamic_servers_module
HTTP_MODULES=$(echo $HTTP_MODULES | sed "s/ngx_http_upstream_module/ngx_http_upstream_dynamic_servers_module ngx_http_upstream_module/")
NGX_ADDON_SRCS="$NGX_ADDON_SRCS $ngx_addon_dir/ngx_http_upstream_dynamic_servers.c"
server:
port: 1982
interface: 127.0.0.1
chroot: ""
username: ""
logfile: /tmp/unbound.log
log-queries: yes
local-zone: "." static
include: /tmp/nginx_upstream_dynamic_servers_unbound_active_test.conf
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
fun:ngx_event_process_init
fun:ngx_single_process_cycle
fun:main
}
{
<insert_a_suppression_name_here>
Memcheck:Cond
obj:*
obj:*
fun:lua_tolstring
}
{
<insert_a_suppression_name_here>
Memcheck:Cond
obj:*
fun:lua_pushlstring
}
{
<insert_a_suppression_name_here>
Memcheck:Addr4
obj:*
fun:lua_pushlstring
}
{
<insert_a_suppression_name_here>
Memcheck:Addr4
obj:*
fun:lua_setfield
}
{
<insert_a_suppression_name_here>
Memcheck:Addr4
obj:*
fun:lua_getfield
}
# In nginx 1.7.5+ there's a known leak with the resolver running in debug mode.
# See: http://forum.nginx.org/read.php?29,253644,253650#msg-253650
{
<insert_a_suppression_name_here>
Memcheck:Addr4
fun:ngx_resolve_name_done
}
ngx_upstream_jdomain
====================
An asynchronous domain name resolve module for nginx upstream
Installation:
./configure --add-module=/path/to/this/directory
make
make install
Usage:
upstream backend {
jdomain www.baidu.com; #port=80
#jdomain www.baidu.com port=8080; #port=8080
}
resolver 8.8.8.8; #Your Local DNS Server
Jdomain:
* Syntax: jdomain <domain-name> [port=80] [max_ips=20] [interval=1] [retry_off]
* Context: upstream
* port: Backend's listening port.
* max_ips: IP buffer size.
* interval: How many seconds to resolve domain name.
* retry_off: Do not retry if one IP fails.
See https://www.nginx.com/resources/wiki/modules/domain_resolve/ for details.
Author
======
wdaike <wdaike@163.com>, Baidu Inc.
Copyright & License
===================
This module is licenced under the BSD License.
Copyright (C) 2014-2014, by wdaike <wdaike@163.com>, Baidu Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
ngx_addon_name=ngx_http_upstream_jdomain_module
HTTP_MODULES="$HTTP_MODULES ngx_http_upstream_jdomain_module"
NGX_ADDON_SRCS="$NGX_ADDON_SRCS $ngx_addon_dir/ngx_http_upstream_jdomain.c"
FROM ubuntu:18.04 AS build
MAINTAINER afa
WORKDIR /usr/local/nginx
RUN apt-get update && \
apt-get install -y libpcre3 libpcre3-dev zlib1g-dev build-essential libssl-dev wget && \
apt-get install -y git && \
apt-get install -y python3.6 && \
ln -s /usr/bin/python3.6 /usr/bin/python && \
useradd -M -s /sbin/nologin nginx && \
apt-get autoremove -y && \
apt-get clean -y
RUN wget https://npm.taobao.org/mirrors/node/v14.15.1/node-v14.15.1-linux-x64.tar.xz && \
tar xf node-v14.15.1-linux-x64.tar.xz -C /usr/local && \
ln -s /usr/local/node-v14.15.1-linux-x64 /usr/local/node && \
echo "export PATH=$PATH:/usr/local/node/bin" >>/root/.bashrc && \
rm -rf node-v14.15.1-linux-x64.tar.xz
RUN export PATH=$PATH:/usr/local/node/bin && \
npm config set registry https://registry.npm.taobao.org
FROM $image_prefix-base-backend
MAINTAINER afa@afa.com
WORKDIR /workspaces/backend
COPY src/backend /workspaces/backend
RUN pip3 install -r /workspaces/backend/requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ \
&& rm -rf /root/.cache/pip /root/.python_history /root/.wget-hsts
ENTRYPOINT ["/bin/sh","-c","./docker-entrypoint.sh"]
FROM $image_prefix-base-backend
MAINTAINER afa@afa.com
WORKDIR /workspaces
COPY src/backend /workspaces/backend
RUN pip3 install -r /workspaces/backend/requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ \
&& rm -rf /root/.cache/pip /root/.python_history /root/.wget-hsts
WORKDIR /workspaces/backend
COPY make/build/containers/celery/buildsite/docker-entrypoint.sh /workspaces/backend/
COPY make/build/containers/celery/buildsite/docker-flower-entrypoint.sh /workspaces/backend/
COPY make/build/containers/celery/buildsite/docker-worker-entrypoint.sh /workspaces/backend/
COPY make/build/containers/celery/buildsite/readiness.sh /workspaces/backend/
#ENTRYPOINT ["/bin/sh","-c","./docker-entrypoint.sh"]
FROM $image_prefix-env-fe/screen AS build
MAINTAINER afa
WORKDIR /usr/local/nginx
COPY src/fe/screen /opt/fe/screen
RUN export PATH=$PATH:/usr/local/node/bin && \
npm config set registry https://registry.npm.taobao.org && \
cd /opt/fe/screen && \
rm -rf /opt/fe/screen/.git && \
npm i -g yarn && \
yarn config set registry https://registry.npm.taobao.org && \
yarn && \
yarn build
FROM $image_prefix-base-fe/screen
MAINTAINER afa
COPY --from=build /opt/fe/screen/dist /usr/share/nginx/html/
CMD ["/etc/nginx/sbin/nginx","-g","daemon off;"]
FROM $image_prefix-base-fe/screen
MAINTAINER afa
COPY src/ironman-dist /usr/share/nginx/html/
CMD ["/etc/nginx/sbin/nginx","-g","daemon off;"]
FROM python:3.7.4-stretch
MAINTAINER afa@afa.com
WORKDIR /workspaces
COPY industrial_internet_monitor /workspaces/industrial_internet_monitor
RUN pip3 install -r /workspaces/industrial_internet_monitor/requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ && rm -rf /root/.cache/pip /root/.python_history /root/.wget-hsts
RUN ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && echo 'Asia/Shanghai' >/etc/timezone
WORKDIR /workspaces/industrial_internet_monitor
ENTRYPOINT ["/bin/sh","-c","./docker-entrypoint.sh"]
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: monitor
name: monitor
spec:
replicas: 1
selector:
matchLabels:
app: monitor
template:
metadata:
labels:
app: monitor
name: monitor
spec:
imagePullSecrets:
- name: harbor-key
containers:
- image: 172.17.0.7:8443/powerkeeper/powerkeeper-monitor:v2.0
name: monitor
ports:
- containerPort: 8090
protocol: TCP
env:
- name: MYSQL_USER
valueFrom:
configMapKeyRef:
name: backend-config
key: MYSQL_USER
- name: MYSQL_PASS
valueFrom:
configMapKeyRef:
name: backend-config
key: MYSQL_PASS
- name: MYSQL_HOST
valueFrom:
configMapKeyRef:
name: backend-config
key: MYSQL_HOST
- name: MYSQL_PORT
valueFrom:
configMapKeyRef:
name: backend-config
key: MYSQL_PORT
- name: MYSQL_DB
valueFrom:
configMapKeyRef:
name: backend-config
key: MYSQL_DB
- name: REDIS_HOST
valueFrom:
configMapKeyRef:
name: backend-config
key: REDIS_HOST
- name: REDIS_PORT
valueFrom:
configMapKeyRef:
name: backend-config
key: REDIS_PORT
- name: REDIS_PASSWORD
valueFrom:
configMapKeyRef:
name: backend-config
key: REDIS_PASSWORD
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 90
preference:
matchExpressions:
- key: kubernetes.io/role
operator: In
values:
- monitor
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: monitor-ingress
annotations:
kubernetes.io/ingress.class: "nginx"
spec:
rules:
- host: monitor.ampbyte.com
http:
paths:
- backend:
serviceName: monitor
servicePort: 8090
apiVersion: v1
kind: Service
metadata:
name: monitor
spec:
selector:
app: monitor
ports:
- name: http
port: 8090
targetPort: 8090
nodePort: 31007
type: NodePort
apiVersion: v1
kind: Service
metadata:
name: mysql
spec:
selector:
app: mysql
ports:
- name: mysql
port: 3306
targetPort: 3306
nodePort: 31006
type: NodePort
apiVersion: v1
kind: Service
metadata:
name: backend
spec:
selector:
app: backend
ports:
- name: http
port: 8090
targetPort: 8090
nodePort: 31002
type: NodePort
apiVersion: v1
kind: ConfigMap
metadata:
name: celery-config
namespace: default
data:
MYSQL_DB: console
MYSQL_PASS: "epkafafa"
MYSQL_USER: admin
MYSQL_PORT: "3306"
MYSQL_HOST: 172.17.0.8
REDIS_HOST: "redis"
REDIS_PORT: "6379"
REDIS_PASSWORD: "None"
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: celery
name: celery
spec:
replicas: 1
selector:
matchLabels:
app: celery
template:
metadata:
labels:
app: celery
name: celery
spec:
imagePullSecrets:
- name: harbor-key
containers:
- env:
- name: MYSQL_USER
valueFrom:
configMapKeyRef:
name: celery-config
key: MYSQL_USER
- name: MYSQL_PASS
valueFrom:
configMapKeyRef:
name: celery-config
key: MYSQL_PASS
- name: MYSQL_HOST
valueFrom:
configMapKeyRef:
name: celery-config
key: MYSQL_HOST
- name: MYSQL_PORT
valueFrom:
configMapKeyRef:
name: celery-config
key: MYSQL_PORT
- name: MYSQL_DB
valueFrom:
configMapKeyRef:
name: celery-config
key: MYSQL_DB
- name: REDIS_HOST
valueFrom:
configMapKeyRef:
name: celery-config
key: REDIS_HOST
- name: REDIS_PORT
valueFrom:
configMapKeyRef:
name: celery-config
key: REDIS_PORT
- name: REDIS_PASSWORD
valueFrom:
configMapKeyRef:
name: celery-config
key: REDIS_PASSWORD
image: 172.17.0.7:8443/powerkeeper/powerkeeper-celery:v1.0.348.Beta-26-g57c05048
imagePullPolicy: Always
command:
- ./docker-entrypoint.sh
name: celery-beat
- env:
- name: MYSQL_USER
valueFrom:
configMapKeyRef:
name: celery-config
key: MYSQL_USER
- name: MYSQL_PASS
valueFrom:
configMapKeyRef:
name: celery-config
key: MYSQL_PASS
- name: MYSQL_HOST
valueFrom:
configMapKeyRef:
name: celery-config
key: MYSQL_HOST
- name: MYSQL_PORT
valueFrom:
configMapKeyRef:
name: celery-config
key: MYSQL_PORT
- name: MYSQL_DB
valueFrom:
configMapKeyRef:
name: celery-config
key: MYSQL_DB
- name: REDIS_HOST
valueFrom:
configMapKeyRef:
name: celery-config
key: REDIS_HOST
- name: REDIS_PORT
valueFrom:
configMapKeyRef:
name: celery-config
key: REDIS_PORT
- name: REDIS_PASSWORD
valueFrom:
configMapKeyRef:
name: celery-config
key: REDIS_PASSWORD
image: 172.17.0.7:8443/powerkeeper/powerkeeper-celery:v1.0.348.Beta-26-g57c05048
imagePullPolicy: Always
command:
- ./docker-flower-entrypoint.sh
name: celery-flower
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 90
preference:
matchExpressions:
- key: kubernetes.io/role
operator: In
values:
- celery
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: celery-worker
name: celery-worker
spec:
replicas: 2
selector:
matchLabels:
app: celery-worker
template:
metadata:
labels:
app: celery-worker
name: celery-worker
spec:
imagePullSecrets:
- name: harbor-key
containers:
- image: 172.17.0.7:8443/powerkeeper/powerkeeper-celery:v1.0.348.Beta-26-g57c05048
imagePullPolicy: Always
command:
- ./docker-worker-entrypoint.sh
name: celery-worker
env:
- name: MYSQL_USER
valueFrom:
configMapKeyRef:
name: celery-config
key: MYSQL_USER
- name: MYSQL_PASS
valueFrom:
configMapKeyRef:
name: celery-config
key: MYSQL_PASS
- name: MYSQL_HOST
valueFrom:
configMapKeyRef:
name: celery-config
key: MYSQL_HOST
- name: MYSQL_PORT
valueFrom:
configMapKeyRef:
name: celery-config
key: MYSQL_PORT
- name: MYSQL_DB
valueFrom:
configMapKeyRef:
name: celery-config
key: MYSQL_DB
- name: REDIS_HOST
valueFrom:
configMapKeyRef:
name: celery-config
key: REDIS_HOST
- name: REDIS_PORT
valueFrom:
configMapKeyRef:
name: celery-config
key: REDIS_PORT
- name: REDIS_PASSWORD
valueFrom:
configMapKeyRef:
name: celery-config
key: REDIS_PASSWORD
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 90
preference:
matchExpressions:
- key: kubernetes.io/role
operator: In
values:
- celery
apiVersion: v1
kind: Service
metadata:
name: celery
spec:
selector:
app: celery
ports:
- name: http
port: 5556
targetPort: 5556
nodePort: 31006
type: NodePort
apiVersion: batch/v1beta1
kind: CronJob
metadata:
labels:
app: datax-cycle-job
name: datax-cycle-job
namespace: default
spec:
schedule: "0,5,10,15,20,25,30,35,40,45,50,55 * * * *"
jobTemplate:
spec:
template:
metadata:
labels:
app: datax-cycle-job
spec:
restartPolicy: OnFailure
imagePullSecrets:
- name: harbor-key
containers:
- image: 172.17.0.7:8443/powerkeeper/powerkeeper-etl/cyclejobs:v1.0.170.Beta-52-g6e10c5ab
name: datax-1
env:
- name: EXECJOB
value: test_oa_real_data.json
- image: 172.17.0.7:8443/powerkeeper/powerkeeper-etl/cyclejobs:v1.0.170.Beta-52-g6e10c5ab
name: datax-2
env:
- name: EXECJOB
value: oa_real_data.json
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 90
preference:
matchExpressions:
- key: deploy.app/etl
operator: In
values:
- datax
apiVersion: batch/v1beta1
kind: CronJob
metadata:
labels:
app: datax-job
name: datax-job
namespace: default
spec:
schedule: "24,54 * * * *"
jobTemplate:
spec:
template:
metadata:
labels:
app: datax-job
spec:
restartPolicy: OnFailure
imagePullSecrets:
- name: harbor-key
containers:
- image: 172.17.0.7:8443/powerkeeper/powerkeeper-etl/datax:v1.0.171.Beta-6-g9d2b267e
name: datax
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 90
preference:
matchExpressions:
- key: deploy.app/etl
operator: In
values:
- datax
差异被折叠。
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: maxwell-app
name: maxwell-app
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: maxwell-app
template:
metadata:
labels:
app: maxwell-app
spec:
containers:
- image: 172.17.0.7:8000/library/maxwell:v1
name: maxwell
command:
- "/bin/bash"
- "-c"
- "bin/maxwell --config=/app/config.cnf"
volumeMounts:
- name: maxwell-config
mountPath: /app/config.cnf
subPath: maxwell.cnf
volumes:
- name: maxwell-config
configMap:
name: maxwell-config
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 90
preference:
matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- 172.17.0.15
apiVersion: v1
kind: ConfigMap
metadata:
name: incdb-config
namespace: default
data:
config.py: |
from addict import Dict
KAFKA = Dict({
"bootstrap_servers": ["bootstrap.kafka.svc.cluster.local:9092"],
"enable_auto_commit": True,
"auto_commit_interval_ms": 3000,
"consumer_timeout_ms": float('inf'),
"auto_offset_reset": "earliest",
"default_group_id": "etl_kk",
"customers": [],
})
SQLARCHEMY = Dict({
"database":
"mysql+mysqlconnector://root:liando@211.159.161.242:8333/power_oa",
"pool": 5,
"max_overflow": 10,
"pool_timeout": 30,
"pool_pre_ping": True
})
apiVersion: v1
kind: Service
metadata:
name: syncdb-im-real
namespace: default
spec:
clusterIP: None
selector:
app: syncdb-im-real
apiVersion: apps/v1
kind: StatefulSet
metadata:
labels:
app: syncdb-im-real
name: syncdb-im-real
namespace: default
spec:
serviceName: syncdb-im-real
replicas: 1
selector:
matchLabels:
app: syncdb-im-real
template:
metadata:
labels:
app: syncdb-im-real
spec:
containers:
- image: 172.17.0.7:8000/library/ehousekeeper-etl/incdb:b293d9d
name: incdb
command:
- "bash"
- "-c"
- "python /opt/etl/im_real_customer.py"
volumeMounts:
- name: incdb-config
mountPath: /opt/etc/etl
- name: data
mountPath: /data
initContainers:
- name: datax-initdb
image: 172.17.0.7:8000/library/ehousekeeper-etl/initdb:b293d9d
command:
- "bash"
- "-c"
- "/opt/etl/bin/init_db"
- name: datax-exec-jobs
image: 172.17.0.7:8000/library/ehousekeeper-etl/datax:b293d9d
env:
- name: EXECJOB
value: "im_real.json"
volumeMounts:
- name: data
mountPath: /data
volumes:
- name: incdb-config
configMap:
name: incdb-config
- name: data
emptyDir: {}
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 90
preference:
matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- 172.17.0.15
apiVersion: apps/v1
kind: StatefulSet
metadata:
labels:
app: syncdb-other
name: syncdb-other
namespace: default
spec:
serviceName: syncdb-other
replicas: 1
selector:
matchLabels:
app: syncdb-other
template:
metadata:
labels:
app: syncdb-other
spec:
containers:
- image: 172.17.0.7:8000/library/ehousekeeper-etl/incdb:b293d9d
name: incdb
command:
- "bash"
- "-c"
- "python /opt/etl/customer.py"
volumeMounts:
- name: incdb-config
mountPath: /opt/etc/etl
- name: data
mountPath: /data
initContainers:
- name: wait-im-real-job
image: busybox:1.32.0
command:
- "sh"
- "-c"
- "until ping -c 4 syncdb-im-real-0.syncdb-im-real; do echo waiting for sync im real job; sleep 2; done;"
- name: datax-exec-jobs
image: 172.17.0.7:8000/library/ehousekeeper-etl/datax:b293d9d
env:
- name: NOEXECJOB
value: "im_real.json"
volumeMounts:
- name: data
mountPath: /data
volumes:
- name: incdb-config
configMap:
name: incdb-config
- name: data
emptyDir: {}
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 90
preference:
matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- 172.17.0.15
apiVersion: v1
kind: Service
metadata:
name: frontend
spec:
selector:
app: frontend
ports:
- name: http
port: 80
targetPort: 80
nodePort: 31004
type: NodePort
apiVersion: v1
kind: Service
metadata:
name: ironman
spec:
selector:
app: ironman
ports:
- name: http
port: 80
targetPort: 80
nodePort: 31005
type: NodePort
apiVersion: v1
kind: ConfigMap
metadata:
name: mysql-config
namespace: default
data:
MYSQL_PASS: "123456"
r.sql: |
CREATE DATABASE IF NOT EXISTS console DEFAULT CHARSET utf8 COLLATE utf8_general_ci;
CREATE DATABASE IF NOT EXISTS test_console DEFAULT CHARSET utf8 COLLATE utf8_general_ci;
SET GLOBAL sql_mode =NO_ENGINE_SUBSTITUTION;
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: mysql
name: mysql
spec:
replicas: 1
selector:
matchLabels:
app: mysql
template:
metadata:
labels:
app: mysql
name: mysql
spec:
containers:
- image: mysql:5.7
name: mysql
ports:
- containerPort: 3306
protocol: TCP
env:
- name: MYSQL_ROOT_PASSWORD
valueFrom:
configMapKeyRef:
name: backend-config
key: MYSQL_PASS
volumeMounts:
- mountPath: /docker-entrypoint-initdb.d/r.sql
name: mysql-sql-config-volume
subPath: r.sql
- mountPath: /var/lib/mysql
name: mysql
volumes:
- name: mysql-sql-config-volume
configMap:
name: mysql-config
- name: mysql
hostPath:
path: /mysql
type: Directory
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 90
preference:
matchExpressions:
- key: deploy/app
operator: In
values:
- mysql
apiVersion: v1
kind: Service
metadata:
name: mysql
spec:
selector:
app: mysql
ports:
- name: mysql
port: 3306
targetPort: 3306
type: ClusterIP
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论