nacos加入到项目当中

This commit is contained in:
dengqichen 2024-08-14 13:23:50 +08:00
parent 6342d3e324
commit c362baa0c6
48 changed files with 3242 additions and 19 deletions

2
.gitignore vendored
View File

@ -23,3 +23,5 @@
hs_err_pid*
/deploy-ease-core/target/
.flattened-pom.xml
/bin/nacos-server-2.3.0/logs/
/bin/nacos-server-2.3.0/data/protocol/raft/*/log/

24
.idea/GrepConsole.xml Normal file
View File

@ -0,0 +1,24 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="GrepConsole">
<option name="pinnedGrepConsolesState">
<PinnedGrepConsolesState>
<option name="map">
<map>
<entry>
<key>
<RunConfigurationRef>
<option name="icon" value="springBoot.svg)" />
<option name="name" value="DeployEaseApplication" />
</RunConfigurationRef>
</key>
<value>
<Pins />
</value>
</entry>
</map>
</option>
</PinnedGrepConsolesState>
</option>
</component>
</project>

BIN
bin/nacos-logo.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 894 B

BIN
bin/nacos-logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 KiB

View File

@ -0,0 +1,316 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (properties) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
------
This product has a bundle logback, which is available under the EPL v1.0 License.
The source code of logback can be found at https://github.com/qos-ch/logback.
Logback LICENSE
---------------
Logback: the reliable, generic, fast and flexible logging framework.
Copyright (C) 1999-2015, QOS.ch. All rights reserved.
This program and the accompanying materials are dual-licensed under
either the terms of the Eclipse Public License v1.0 as published by
the Eclipse Foundation
or (per the licensee's choosing)
under the terms of the GNU Lesser General Public License version 2.1
as published by the Free Software Foundation.
------
This product has a bundle slf4j, which is available under the MIT License.
The source code of slf4j can be found at https://github.com/qos-ch/slf4j.
Copyright (c) 2004-2017 QOS.ch
All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
------
This product has a bundle javassist, which is available under the ASL2 License.
The source code of javassist can be found at https://github.com/jboss-javassist/javassist.
Copyright (C) 1999- by Shigeru Chiba, All rights reserved.
Javassist (JAVA programming ASSISTant) makes Java bytecode manipulation simple.
It is a class library for editing bytecodes in Java; it enables Java programs to define a new class
at runtime and to modify a class file when the JVM loads it. Unlike other similar bytecode editors,
Javassist provides two levels of API: source level and bytecode level. If the users use the source- level API,
they can edit a class file without knowledge of the specifications of the Java bytecode.
The whole API is designed with only the vocabulary of the Java language.
You can even specify inserted bytecode in the form of source text; Javassist compiles it on the fly.
On the other hand, the bytecode-level API allows the users to directly edit a class file as other editors.
This software is distributed under the Mozilla Public License Version 1.1,
the GNU Lesser General Public License Version 2.1 or later, or the Apache License Version 2.0.
------
This product has a bundle jna, which is available under the ASL2 License.
The source code of jna can be found at https://github.com/java-native-access/jna.
This copy of JNA is licensed under the
Apache (Software) License, version 2.0 ("the License").
See the License for details about distribution rights, and the
specific rights regarding derivate works.
You may obtain a copy of the License at:
http://www.apache.org/licenses/
A copy is also included in the downloadable source code package
containing JNA, in file "AL2.0", under the same directory
as this file.
------
This product has a bundle guava, which is available under the ASL2 License.
The source code of guava can be found at https://github.com/google/guava.
Copyright (C) 2007 The Guava authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
------
This product has a bundle OpenMessaging, which is available under the ASL2 License.
The source code of OpenMessaging can be found at https://github.com/openmessaging/openmessaging.
Copyright (C) 2017 The OpenMessaging authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,36 @@
Nacos
Copyright 2018-2019 The Apache Software Foundation
This product includes software developed at
The Alibaba MiddleWare Group.
------
This product has a bundle netty:
The Spring oot Project
=================
Please visit the Netty web site for more information:
* http://netty.io/
Copyright 2014 The Netty Project
The Netty Project licenses this file to you under the Apache License,
version 2.0 (the "License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
Also, please refer to each LICENSE.<component>.txt file, which is located in
the 'license' directory of the distribution file, for the license terms of the
components that this product depends on.
------
This product has a bundle commons-lang, which includes software from the Spring Framework,
under the Apache License 2.0 (see: StringUtils.containsWhitespace())

View File

@ -0,0 +1,40 @@
@echo off
:: Copyright 1999-2018 Alibaba Group Holding Ltd.
:: Licensed under the Apache License, Version 2.0 (the "License");
:: you may not use this file except in compliance with the License.
:: You may obtain a copy of the License at
::
:: http://www.apache.org/licenses/LICENSE-2.0
::
:: Unless required by applicable law or agreed to in writing, software
:: distributed under the License is distributed on an "AS IS" BASIS,
:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
:: See the License for the specific language governing permissions and
:: limitations under the License.
if not exist "%JAVA_HOME%\bin\jps.exe" (
echo Please set the JAVA_HOME variable in your environment to the correct JDK directory.
echo JDK8 or later is recommended!
EXIT /B 1
)
setlocal
set "PATH=%JAVA_HOME%\bin;%PATH%"
echo Killing Nacos server...
:: Find and kill Nacos server process
set "NACOS_RUNNING=false"
for /f "tokens=1" %%i in ('jps -m ^| find "nacos.nacos"') do (
set "NACOS_RUNNING=true"
taskkill /F /PID %%i
)
if "%NACOS_RUNNING%"=="true" (
echo Nacos server stopped.
) else (
echo Nacos server is not running.
)
echo Done!

View File

@ -0,0 +1,28 @@
#!/bin/bash
# Copyright 1999-2018 Alibaba Group Holding Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
cd `dirname $0`/../target
target_dir=`pwd`
pid=`pgrep -f nacos.nacos`
if [ -z "$pid" ] ; then
echo "No nacosServer running."
exit -1;
fi
echo "The nacosServer(${pid}) is running..."
kill ${pid}
echo "Send shutdown request to nacosServer(${pid}) OK"

View File

@ -0,0 +1 @@
./startup.cmd -m standalone

View File

@ -0,0 +1,95 @@
@echo off
rem Copyright 1999-2018 Alibaba Group Holding Ltd.
rem Licensed under the Apache License, Version 2.0 (the "License");
rem you may not use this file except in compliance with the License.
rem You may obtain a copy of the License at
rem
rem http://www.apache.org/licenses/LICENSE-2.0
rem
rem Unless required by applicable law or agreed to in writing, software
rem distributed under the License is distributed on an "AS IS" BASIS,
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
rem See the License for the specific language governing permissions and
rem limitations under the License.
if not exist "%JAVA_HOME%\bin\java.exe" echo Please set the JAVA_HOME variable in your environment, We need java(x64)! jdk8 or later is better! & EXIT /B 1
set "JAVA=%JAVA_HOME%\bin\java.exe"
setlocal enabledelayedexpansion
set BASE_DIR=%~dp0
rem added double quotation marks to avoid the issue caused by the folder names containing spaces.
rem removed the last 5 chars(which means \bin\) to get the base DIR.
set BASE_DIR="%BASE_DIR:~0,-5%"
set CUSTOM_SEARCH_LOCATIONS=file:%BASE_DIR%/conf/
set MODE="cluster"
set FUNCTION_MODE="all"
set SERVER=nacos-server
set MODE_INDEX=-1
set FUNCTION_MODE_INDEX=-1
set SERVER_INDEX=-1
set EMBEDDED_STORAGE_INDEX=-1
set EMBEDDED_STORAGE=""
set i=0
for %%a in (%*) do (
if "%%a" == "-m" ( set /a MODE_INDEX=!i!+1 )
if "%%a" == "-f" ( set /a FUNCTION_MODE_INDEX=!i!+1 )
if "%%a" == "-s" ( set /a SERVER_INDEX=!i!+1 )
if "%%a" == "-p" ( set /a EMBEDDED_STORAGE_INDEX=!i!+1 )
set /a i+=1
)
set i=0
for %%a in (%*) do (
if %MODE_INDEX% == !i! ( set MODE="%%a" )
if %FUNCTION_MODE_INDEX% == !i! ( set FUNCTION_MODE="%%a" )
if %SERVER_INDEX% == !i! (set SERVER="%%a")
if %EMBEDDED_STORAGE_INDEX% == !i! (set EMBEDDED_STORAGE="%%a")
set /a i+=1
)
rem if nacos startup mode is standalone
if %MODE% == "standalone" (
echo "nacos is starting with standalone"
set "NACOS_OPTS=-Dnacos.standalone=true"
set "NACOS_JVM_OPTS=-Xms512m -Xmx512m -Xmn256m"
)
rem if nacos startup mode is cluster
if %MODE% == "cluster" (
echo "nacos is starting with cluster"
if %EMBEDDED_STORAGE% == "embedded" (
set "NACOS_OPTS=-DembeddedStorage=true"
)
set "NACOS_JVM_OPTS=-server -Xms2g -Xmx2g -Xmn1g -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=320m -XX:-OmitStackTraceInFastThrow -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=%BASE_DIR%\logs\java_heapdump.hprof -XX:-UseLargePages"
)
rem set nacos's functionMode
if %FUNCTION_MODE% == "config" (
set "NACOS_OPTS=%NACOS_OPTS% -Dnacos.functionMode=config"
)
if %FUNCTION_MODE% == "naming" (
set "NACOS_OPTS=%NACOS_OPTS% -Dnacos.functionMode=naming"
)
rem set nacos options
set "NACOS_OPTS=%NACOS_OPTS% -Dloader.path=%BASE_DIR%/plugins,%BASE_DIR%/plugins/health,%BASE_DIR%/plugins/cmdb,%BASE_DIR%/plugins/selector"
set "NACOS_OPTS=%NACOS_OPTS% -Dnacos.home=%BASE_DIR%"
set "NACOS_OPTS=%NACOS_OPTS% -jar %BASE_DIR%\target\%SERVER%.jar"
rem set nacos spring config location
set "NACOS_CONFIG_OPTS=--spring.config.additional-location=%CUSTOM_SEARCH_LOCATIONS%"
rem set nacos log4j file location
set "NACOS_LOG4J_OPTS=--logging.config=%BASE_DIR%/conf/nacos-logback.xml"
set COMMAND="%JAVA%" %NACOS_JVM_OPTS% %NACOS_OPTS% %NACOS_CONFIG_OPTS% %NACOS_LOG4J_OPTS% nacos.nacos %*
rem start nacos command
%COMMAND%

View File

@ -0,0 +1,149 @@
#!/bin/bash
# Copyright 1999-2018 Alibaba Group Holding Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
cygwin=false
darwin=false
os400=false
case "`uname`" in
CYGWIN*) cygwin=true;;
Darwin*) darwin=true;;
OS400*) os400=true;;
esac
error_exit ()
{
echo "ERROR: $1 !!"
exit 1
}
[ ! -e "$JAVA_HOME/bin/java" ] && JAVA_HOME=$HOME/jdk/java
[ ! -e "$JAVA_HOME/bin/java" ] && JAVA_HOME=/usr/java
[ ! -e "$JAVA_HOME/bin/java" ] && JAVA_HOME=/opt/taobao/java
[ ! -e "$JAVA_HOME/bin/java" ] && unset JAVA_HOME
if [ -z "$JAVA_HOME" ]; then
if $darwin; then
if [ -x '/usr/libexec/java_home' ] ; then
export JAVA_HOME=`/usr/libexec/java_home`
elif [ -d "/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home" ]; then
export JAVA_HOME="/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home"
fi
else
JAVA_PATH=`dirname $(readlink -f $(which javac))`
if [ "x$JAVA_PATH" != "x" ]; then
export JAVA_HOME=`dirname $JAVA_PATH 2>/dev/null`
fi
fi
if [ -z "$JAVA_HOME" ]; then
error_exit "Please set the JAVA_HOME variable in your environment, We need java(x64)! jdk8 or later is better!"
fi
fi
export SERVER="nacos-server"
export MODE="cluster"
export FUNCTION_MODE="all"
export MEMBER_LIST=""
export EMBEDDED_STORAGE=""
while getopts ":m:f:s:c:p:" opt
do
case $opt in
m)
MODE=$OPTARG;;
f)
FUNCTION_MODE=$OPTARG;;
s)
SERVER=$OPTARG;;
c)
MEMBER_LIST=$OPTARG;;
p)
EMBEDDED_STORAGE=$OPTARG;;
?)
echo "Unknown parameter"
exit 1;;
esac
done
export JAVA_HOME
export JAVA="$JAVA_HOME/bin/java"
export BASE_DIR=`cd $(dirname $0)/..; pwd`
export CUSTOM_SEARCH_LOCATIONS=file:${BASE_DIR}/conf/
#===========================================================================================
# JVM Configuration
#===========================================================================================
if [[ "${MODE}" == "standalone" ]]; then
JAVA_OPT="${JAVA_OPT} -Xms512m -Xmx512m -Xmn256m"
JAVA_OPT="${JAVA_OPT} -Dnacos.standalone=true"
else
if [[ "${EMBEDDED_STORAGE}" == "embedded" ]]; then
JAVA_OPT="${JAVA_OPT} -DembeddedStorage=true"
fi
JAVA_OPT="${JAVA_OPT} -server -Xms2g -Xmx2g -Xmn1g -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=320m"
JAVA_OPT="${JAVA_OPT} -XX:-OmitStackTraceInFastThrow -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=${BASE_DIR}/logs/java_heapdump.hprof"
JAVA_OPT="${JAVA_OPT} -XX:-UseLargePages"
fi
if [[ "${FUNCTION_MODE}" == "config" ]]; then
JAVA_OPT="${JAVA_OPT} -Dnacos.functionMode=config"
elif [[ "${FUNCTION_MODE}" == "naming" ]]; then
JAVA_OPT="${JAVA_OPT} -Dnacos.functionMode=naming"
fi
JAVA_OPT="${JAVA_OPT} -Dnacos.member.list=${MEMBER_LIST}"
JAVA_MAJOR_VERSION=$($JAVA -version 2>&1 | sed -E -n 's/.* version "([0-9]*).*$/\1/p')
if [[ "$JAVA_MAJOR_VERSION" -ge "9" ]] ; then
JAVA_OPT="${JAVA_OPT} -Xlog:gc*:file=${BASE_DIR}/logs/nacos_gc.log:time,tags:filecount=10,filesize=100m"
else
JAVA_OPT="${JAVA_OPT} -XX:+UseConcMarkSweepGC -XX:+UseCMSCompactAtFullCollection -XX:CMSInitiatingOccupancyFraction=70 -XX:+CMSParallelRemarkEnabled -XX:SoftRefLRUPolicyMSPerMB=0 -XX:+CMSClassUnloadingEnabled -XX:SurvivorRatio=8 "
JAVA_OPT_EXT_FIX="-Djava.ext.dirs=${JAVA_HOME}/jre/lib/ext:${JAVA_HOME}/lib/ext"
JAVA_OPT="${JAVA_OPT} -Xloggc:${BASE_DIR}/logs/nacos_gc.log -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=100M"
fi
JAVA_OPT="${JAVA_OPT} -Dloader.path=${BASE_DIR}/plugins,${BASE_DIR}/plugins/health,${BASE_DIR}/plugins/cmdb,${BASE_DIR}/plugins/selector"
JAVA_OPT="${JAVA_OPT} -Dnacos.home=${BASE_DIR}"
JAVA_OPT="${JAVA_OPT} -jar ${BASE_DIR}/target/${SERVER}.jar"
JAVA_OPT="${JAVA_OPT} ${JAVA_OPT_EXT}"
JAVA_OPT="${JAVA_OPT} --spring.config.additional-location=${CUSTOM_SEARCH_LOCATIONS}"
JAVA_OPT="${JAVA_OPT} --logging.config=${BASE_DIR}/conf/nacos-logback.xml"
JAVA_OPT="${JAVA_OPT} --server.max-http-header-size=524288"
if [ ! -d "${BASE_DIR}/logs" ]; then
mkdir ${BASE_DIR}/logs
fi
echo "$JAVA $JAVA_OPT_EXT_FIX ${JAVA_OPT}"
if [[ "${MODE}" == "standalone" ]]; then
echo "nacos is starting with standalone"
else
echo "nacos is starting with cluster"
fi
# check the start.out log output file
if [ ! -f "${BASE_DIR}/logs/start.out" ]; then
touch "${BASE_DIR}/logs/start.out"
fi
# start
echo "$JAVA $JAVA_OPT_EXT_FIX ${JAVA_OPT}" > ${BASE_DIR}/logs/start.out 2>&1 &
if [[ "$JAVA_OPT_EXT_FIX" == "" ]]; then
nohup "$JAVA" ${JAVA_OPT} nacos.nacos >> ${BASE_DIR}/logs/start.out 2>&1 &
else
nohup "$JAVA" "$JAVA_OPT_EXT_FIX" ${JAVA_OPT} nacos.nacos >> ${BASE_DIR}/logs/start.out 2>&1 &
fi
echo "nacos is starting. you can check the ${BASE_DIR}/logs/start.out"

View File

@ -0,0 +1,27 @@
/*
* Copyright 1999-2018 Alibaba Group Holding Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
ALTER TABLE `config_info_tag`
MODIFY COLUMN `src_ip` varchar(50) CHARACTER SET utf8 COLLATE utf8_bin NULL DEFAULT NULL COMMENT 'source ip' AFTER `src_user`;
ALTER TABLE `his_config_info`
MODIFY COLUMN `src_ip` varchar(50) CHARACTER SET utf8 COLLATE utf8_bin NULL DEFAULT NULL AFTER `src_user`;
ALTER TABLE `config_info`
MODIFY COLUMN `src_ip` varchar(50) CHARACTER SET utf8 COLLATE utf8_bin NULL DEFAULT NULL COMMENT 'source ip' AFTER `src_user`;
ALTER TABLE `config_info_beta`
MODIFY COLUMN `src_ip` varchar(50) CHARACTER SET utf8 COLLATE utf8_bin NULL DEFAULT NULL COMMENT 'source ip' AFTER `src_user`;

View File

@ -0,0 +1 @@
当前集群没有开启鉴权,请参考<a href="https://nacos.io/zh-cn/docs/v2/guide/user/auth.html">文档</a>开启鉴权~

View File

@ -0,0 +1,299 @@
#
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#*************** Spring Boot Related Configurations ***************#
### Default web context path:
server.servlet.contextPath=/nacos
### Include message field
server.error.include-message=ALWAYS
### Default web server port:
server.port=8848
#*************** Network Related Configurations ***************#
### If prefer hostname over ip for Nacos server addresses in cluster.conf:
# nacos.inetutils.prefer-hostname-over-ip=false
### Specify local server's IP:
# nacos.inetutils.ip-address=
#*************** Config Module Related Configurations ***************#
### If use MySQL as datasource:
### Deprecated configuration property, it is recommended to use `spring.sql.init.platform` replaced.
spring.datasource.platform=mysql
spring.sql.init.platform=mysql
### Count of DB:
db.num=1
### Connect URL of DB:
db.url.0=jdbc:mysql://127.0.0.1:3306/nacos?characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&useUnicode=true&useSSL=false&serverTimezone=UTC
db.user.0=root
db.password.0=root
### Connection pool configuration: hikariCP
db.pool.config.connectionTimeout=30000
db.pool.config.validationTimeout=10000
db.pool.config.maximumPoolSize=20
db.pool.config.minimumIdle=2
### the maximum retry times for push
nacos.config.push.maxRetryTime=50
#*************** Naming Module Related Configurations ***************#
### If enable data warmup. If set to false, the server would accept request without local data preparation:
# nacos.naming.data.warmup=true
### If enable the instance auto expiration, kind like of health check of instance:
# nacos.naming.expireInstance=true
### Add in 2.0.0
### The interval to clean empty service, unit: milliseconds.
# nacos.naming.clean.empty-service.interval=60000
### The expired time to clean empty service, unit: milliseconds.
# nacos.naming.clean.empty-service.expired-time=60000
### The interval to clean expired metadata, unit: milliseconds.
# nacos.naming.clean.expired-metadata.interval=5000
### The expired time to clean metadata, unit: milliseconds.
# nacos.naming.clean.expired-metadata.expired-time=60000
### The delay time before push task to execute from service changed, unit: milliseconds.
# nacos.naming.push.pushTaskDelay=500
### The timeout for push task execute, unit: milliseconds.
# nacos.naming.push.pushTaskTimeout=5000
### The delay time for retrying failed push task, unit: milliseconds.
# nacos.naming.push.pushTaskRetryDelay=1000
### Since 2.0.3
### The expired time for inactive client, unit: milliseconds.
# nacos.naming.client.expired.time=180000
#*************** CMDB Module Related Configurations ***************#
### The interval to dump external CMDB in seconds:
# nacos.cmdb.dumpTaskInterval=3600
### The interval of polling data change event in seconds:
# nacos.cmdb.eventTaskInterval=10
### The interval of loading labels in seconds:
# nacos.cmdb.labelTaskInterval=300
### If turn on data loading task:
# nacos.cmdb.loadDataAtStart=false
#***********Metrics for tomcat **************************#
server.tomcat.mbeanregistry.enabled=true
#***********Expose prometheus and health **************************#
#management.endpoints.web.exposure.include=prometheus,health
### Metrics for elastic search
management.metrics.export.elastic.enabled=false
#management.metrics.export.elastic.host=http://localhost:9200
### Metrics for influx
management.metrics.export.influx.enabled=false
#management.metrics.export.influx.db=springboot
#management.metrics.export.influx.uri=http://localhost:8086
#management.metrics.export.influx.auto-create-db=true
#management.metrics.export.influx.consistency=one
#management.metrics.export.influx.compressed=true
#*************** Access Log Related Configurations ***************#
### If turn on the access log:
server.tomcat.accesslog.enabled=true
### file name pattern, one file per hour
server.tomcat.accesslog.rotate=true
server.tomcat.accesslog.file-date-format=.yyyy-MM-dd-HH
### The access log pattern:
server.tomcat.accesslog.pattern=%h %l %u %t "%r" %s %b %D %{User-Agent}i %{Request-Source}i
### The directory of access log:
server.tomcat.basedir=file:.
#*************** Access Control Related Configurations ***************#
### If enable spring security, this option is deprecated in 1.2.0:
#spring.security.enabled=false
### The ignore urls of auth
nacos.security.ignore.urls=/,/error,/**/*.css,/**/*.js,/**/*.html,/**/*.map,/**/*.svg,/**/*.png,/**/*.ico,/console-ui/public/**,/v1/auth/**,/v1/console/health/**,/actuator/**,/v1/console/server/**
### The auth system to use, currently only 'nacos' and 'ldap' is supported:
nacos.core.auth.system.type=nacos
### If turn on auth system:
nacos.core.auth.enabled=true
nacos.core.auth.username=nacos
nacos.core.auth.password=nacos
### Turn on/off caching of auth information. By turning on this switch, the update of auth information would have a 15 seconds delay.
nacos.core.auth.caching.enabled=true
### Since 1.4.1, Turn on/off white auth for user-agent: nacos-server, only for upgrade from old version.
nacos.core.auth.enable.userAgentAuthWhite=false
### Since 1.4.1, worked when nacos.core.auth.enabled=true and nacos.core.auth.enable.userAgentAuthWhite=false.
### The two properties is the white list for auth and used by identity the request from other server.
nacos.core.auth.server.identity.key=nacosKey
nacos.core.auth.server.identity.value=nacosValue
### worked when nacos.core.auth.system.type=nacos
### The token expiration in seconds:
nacos.core.auth.plugin.nacos.token.cache.enable=false
nacos.core.auth.plugin.nacos.token.expire.seconds=18000
### The default token (Base64 String):
nacos.core.auth.plugin.nacos.token.secret.key=SecretKey012345678901234567890123456789012345678901234567890123456789
### worked when nacos.core.auth.system.type=ldap{0} is Placeholder,replace login username
#nacos.core.auth.ldap.url=ldap://localhost:389
#nacos.core.auth.ldap.basedc=dc=example,dc=org
#nacos.core.auth.ldap.userDn=cn=admin,${nacos.core.auth.ldap.basedc}
#nacos.core.auth.ldap.password=admin
#nacos.core.auth.ldap.userdn=cn={0},dc=example,dc=org
#nacos.core.auth.ldap.filter.prefix=uid
#nacos.core.auth.ldap.case.sensitive=true
#nacos.core.auth.ldap.ignore.partial.result.exception=false
#*************** Control Plugin Related Configurations ***************#
# plugin type
#nacos.plugin.control.manager.type=nacos
# local control rule storage dir, default ${nacos.home}/data/connection and ${nacos.home}/data/tps
#nacos.plugin.control.rule.local.basedir=${nacos.home}
# external control rule storage type, if exist
#nacos.plugin.control.rule.external.storage=
#*************** Config Change Plugin Related Configurations ***************#
# webhook
#nacos.core.config.plugin.webhook.enabled=false
# It is recommended to use EB https://help.aliyun.com/document_detail/413974.html
#nacos.core.config.plugin.webhook.url=http://localhost:8080/webhook/send?token=***
# The content push max capacity ,byte
#nacos.core.config.plugin.webhook.contentMaxCapacity=102400
# whitelist
#nacos.core.config.plugin.whitelist.enabled=false
# The import file suffixs
#nacos.core.config.plugin.whitelist.suffixs=xml,text,properties,yaml,html
# fileformatcheck,which validate the import file of type and content
#nacos.core.config.plugin.fileformatcheck.enabled=false
#*************** Istio Related Configurations ***************#
### If turn on the MCP server:
nacos.istio.mcp.server.enabled=false
#*************** Core Related Configurations ***************#
### set the WorkerID manually
# nacos.core.snowflake.worker-id=
### Member-MetaData
# nacos.core.member.meta.site=
# nacos.core.member.meta.adweight=
# nacos.core.member.meta.weight=
### MemberLookup
### Addressing pattern category, If set, the priority is highest
# nacos.core.member.lookup.type=[file,address-server]
## Set the cluster list with a configuration file or command-line argument
# nacos.member.list=192.168.16.101:8847?raft_port=8807,192.168.16.101?raft_port=8808,192.168.16.101:8849?raft_port=8809
## for AddressServerMemberLookup
# Maximum number of retries to query the address server upon initialization
# nacos.core.address-server.retry=5
## Server domain name address of [address-server] mode
# address.server.domain=jmenv.tbsite.net
## Server port of [address-server] mode
# address.server.port=8080
## Request address of [address-server] mode
# address.server.url=/nacos/serverlist
#*************** JRaft Related Configurations ***************#
### Sets the Raft cluster election timeout, default value is 5 second
# nacos.core.protocol.raft.data.election_timeout_ms=5000
### Sets the amount of time the Raft snapshot will execute periodically, default is 30 minute
# nacos.core.protocol.raft.data.snapshot_interval_secs=30
### raft internal worker threads
# nacos.core.protocol.raft.data.core_thread_num=8
### Number of threads required for raft business request processing
# nacos.core.protocol.raft.data.cli_service_thread_num=4
### raft linear read strategy. Safe linear reads are used by default, that is, the Leader tenure is confirmed by heartbeat
# nacos.core.protocol.raft.data.read_index_type=ReadOnlySafe
### rpc request timeout, default 5 seconds
# nacos.core.protocol.raft.data.rpc_request_timeout_ms=5000
#*************** Distro Related Configurations ***************#
### Distro data sync delay time, when sync task delayed, task will be merged for same data key. Default 1 second.
# nacos.core.protocol.distro.data.sync.delayMs=1000
### Distro data sync timeout for one sync data, default 3 seconds.
# nacos.core.protocol.distro.data.sync.timeoutMs=3000
### Distro data sync retry delay time when sync data failed or timeout, same behavior with delayMs, default 3 seconds.
# nacos.core.protocol.distro.data.sync.retryDelayMs=3000
### Distro data verify interval time, verify synced data whether expired for a interval. Default 5 seconds.
# nacos.core.protocol.distro.data.verify.intervalMs=5000
### Distro data verify timeout for one verify, default 3 seconds.
# nacos.core.protocol.distro.data.verify.timeoutMs=3000
### Distro data load retry delay when load snapshot data failed, default 30 seconds.
# nacos.core.protocol.distro.data.load.retryDelayMs=30000
### enable to support prometheus service discovery
#nacos.prometheus.metrics.enabled=true
### Since 2.3
#*************** Grpc Configurations ***************#
## sdk grpc(between nacos server and client) configuration
## Sets the maximum message size allowed to be received on the server.
#nacos.remote.server.grpc.sdk.max-inbound-message-size=10485760
## Sets the time(milliseconds) without read activity before sending a keepalive ping. The typical default is two hours.
#nacos.remote.server.grpc.sdk.keep-alive-time=7200000
## Sets a time(milliseconds) waiting for read activity after sending a keepalive ping. Defaults to 20 seconds.
#nacos.remote.server.grpc.sdk.keep-alive-timeout=20000
## Sets a time(milliseconds) that specify the most aggressive keep-alive time clients are permitted to configure. The typical default is 5 minutes
#nacos.remote.server.grpc.sdk.permit-keep-alive-time=300000
## cluster grpc(inside the nacos server) configuration
#nacos.remote.server.grpc.cluster.max-inbound-message-size=10485760
## Sets the time(milliseconds) without read activity before sending a keepalive ping. The typical default is two hours.
#nacos.remote.server.grpc.cluster.keep-alive-time=7200000
## Sets a time(milliseconds) waiting for read activity after sending a keepalive ping. Defaults to 20 seconds.
#nacos.remote.server.grpc.cluster.keep-alive-timeout=20000
## Sets a time(milliseconds) that specify the most aggressive keep-alive time clients are permitted to configure. The typical default is 5 minutes
#nacos.remote.server.grpc.cluster.permit-keep-alive-time=300000
## open nacos default console ui
#nacos.console.ui.enabled=true

View File

@ -0,0 +1,271 @@
#
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#*************** Spring Boot Related Configurations ***************#
### Default web context path:
server.servlet.contextPath=/nacos
### Include message field
server.error.include-message=ALWAYS
### Default web server port:
server.port=8848
#*************** Network Related Configurations ***************#
### If prefer hostname over ip for Nacos server addresses in cluster.conf:
# nacos.inetutils.prefer-hostname-over-ip=false
### Specify local server's IP:
# nacos.inetutils.ip-address=
#*************** Config Module Related Configurations ***************#
### If use MySQL as datasource:
### Deprecated configuration property, it is recommended to use `spring.sql.init.platform` replaced.
# spring.datasource.platform=mysql
# spring.sql.init.platform=mysql
### Count of DB:
# db.num=1
### Connect URL of DB:
# db.url.0=jdbc:mysql://127.0.0.1:3306/nacos?characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&useUnicode=true&useSSL=false&serverTimezone=UTC
# db.user.0=nacos
# db.password.0=nacos
### Connection pool configuration: hikariCP
db.pool.config.connectionTimeout=30000
db.pool.config.validationTimeout=10000
db.pool.config.maximumPoolSize=20
db.pool.config.minimumIdle=2
### the maximum retry times for push
nacos.config.push.maxRetryTime=50
#*************** Naming Module Related Configurations ***************#
### If enable data warmup. If set to false, the server would accept request without local data preparation:
# nacos.naming.data.warmup=true
### If enable the instance auto expiration, kind like of health check of instance:
# nacos.naming.expireInstance=true
### will be removed and replaced by `nacos.naming.clean` properties
nacos.naming.empty-service.auto-clean=true
nacos.naming.empty-service.clean.initial-delay-ms=50000
nacos.naming.empty-service.clean.period-time-ms=30000
### Add in 2.0.0
### The interval to clean empty service, unit: milliseconds.
# nacos.naming.clean.empty-service.interval=60000
### The expired time to clean empty service, unit: milliseconds.
# nacos.naming.clean.empty-service.expired-time=60000
### The interval to clean expired metadata, unit: milliseconds.
# nacos.naming.clean.expired-metadata.interval=5000
### The expired time to clean metadata, unit: milliseconds.
# nacos.naming.clean.expired-metadata.expired-time=60000
### The delay time before push task to execute from service changed, unit: milliseconds.
# nacos.naming.push.pushTaskDelay=500
### The timeout for push task execute, unit: milliseconds.
# nacos.naming.push.pushTaskTimeout=5000
### The delay time for retrying failed push task, unit: milliseconds.
# nacos.naming.push.pushTaskRetryDelay=1000
### Since 2.0.3
### The expired time for inactive client, unit: milliseconds.
# nacos.naming.client.expired.time=180000
#*************** CMDB Module Related Configurations ***************#
### The interval to dump external CMDB in seconds:
# nacos.cmdb.dumpTaskInterval=3600
### The interval of polling data change event in seconds:
# nacos.cmdb.eventTaskInterval=10
### The interval of loading labels in seconds:
# nacos.cmdb.labelTaskInterval=300
### If turn on data loading task:
# nacos.cmdb.loadDataAtStart=false
#*************** Metrics Related Configurations ***************#
### Metrics for prometheus
#management.endpoints.web.exposure.include=*
### Metrics for elastic search
management.metrics.export.elastic.enabled=false
#management.metrics.export.elastic.host=http://localhost:9200
### Metrics for influx
management.metrics.export.influx.enabled=false
#management.metrics.export.influx.db=springboot
#management.metrics.export.influx.uri=http://localhost:8086
#management.metrics.export.influx.auto-create-db=true
#management.metrics.export.influx.consistency=one
#management.metrics.export.influx.compressed=true
#*************** Access Log Related Configurations ***************#
### If turn on the access log:
server.tomcat.accesslog.enabled=true
### The access log pattern:
server.tomcat.accesslog.pattern=%h %l %u %t "%r" %s %b %D %{User-Agent}i %{Request-Source}i
### The directory of access log:
server.tomcat.basedir=file:.
#*************** Access Control Related Configurations ***************#
### If enable spring security, this option is deprecated in 1.2.0:
#spring.security.enabled=false
### The ignore urls of auth, is deprecated in 1.2.0:
nacos.security.ignore.urls=/,/error,/**/*.css,/**/*.js,/**/*.html,/**/*.map,/**/*.svg,/**/*.png,/**/*.ico,/console-ui/public/**,/v1/auth/**,/v1/console/health/**,/actuator/**,/v1/console/server/**
### The auth system to use, currently only 'nacos' and 'ldap' is supported:
nacos.core.auth.system.type=nacos
### If turn on auth system:
nacos.core.auth.enabled=false
### worked when nacos.core.auth.system.type=ldap{0} is Placeholder,replace login username
#nacos.core.auth.system.type=ldap
#nacos.core.auth.ldap.url=ldap://localhost:389
#nacos.core.auth.ldap.basedc=dc=example,dc=org
#nacos.core.auth.ldap.userDn=cn=admin,${nacos.core.auth.ldap.basedc}
#nacos.core.auth.ldap.password=admin
#nacos.core.auth.ldap.userdn=cn={0},dc=example,dc=org
#nacos.core.auth.ldap.filter.prefix=uid
#nacos.core.auth.ldap.case.sensitive=true
#nacos.core.auth.ldap.ignore.partial.result.exception=false
### worked when nacos.core.auth.system.type=nacos
### The token expiration in seconds:
nacos.core.auth.plugin.nacos.token.cache.enable=false
nacos.core.auth.plugin.nacos.token.expire.seconds=18000
### The default token (Base64 String):
nacos.core.auth.plugin.nacos.token.secret.key=
### Turn on/off caching of auth information. By turning on this switch, the update of auth information would have a 15 seconds delay.
nacos.core.auth.caching.enabled=true
### Since 1.4.1, Turn on/off white auth for user-agent: nacos-server, only for upgrade from old version.
nacos.core.auth.enable.userAgentAuthWhite=false
### Since 1.4.1, worked when nacos.core.auth.enabled=true and nacos.core.auth.enable.userAgentAuthWhite=false.
### The two properties is the white list for auth and used by identity the request from other server.
#nacos.core.auth.server.identity.key=example
#nacos.core.auth.server.identity.value=example
#*************** Istio Related Configurations ***************#
### If turn on the MCP server:
nacos.istio.mcp.server.enabled=false
#*************** Core Related Configurations ***************#
### set the WorkerID manually
# nacos.core.snowflake.worker-id=
### Member-MetaData
# nacos.core.member.meta.site=
# nacos.core.member.meta.adweight=
# nacos.core.member.meta.weight=
### MemberLookup
### Addressing pattern category, If set, the priority is highest
# nacos.core.member.lookup.type=[file,address-server]
## Set the cluster list with a configuration file or command-line argument
# nacos.member.list=192.168.16.101:8847?raft_port=8807,192.168.16.101?raft_port=8808,192.168.16.101:8849?raft_port=8809
## for AddressServerMemberLookup
# Maximum number of retries to query the address server upon initialization
# nacos.core.address-server.retry=5
## Server domain name address of [address-server] mode
# address.server.domain=jmenv.tbsite.net
## Server port of [address-server] mode
# address.server.port=8080
## Request address of [address-server] mode
# address.server.url=/nacos/serverlist
#*************** JRaft Related Configurations ***************#
### Sets the Raft cluster election timeout, default value is 5 second
# nacos.core.protocol.raft.data.election_timeout_ms=5000
### Sets the amount of time the Raft snapshot will execute periodically, default is 30 minute
# nacos.core.protocol.raft.data.snapshot_interval_secs=30
### raft internal worker threads
# nacos.core.protocol.raft.data.core_thread_num=8
### Number of threads required for raft business request processing
# nacos.core.protocol.raft.data.cli_service_thread_num=4
### raft linear read strategy. Safe linear reads are used by default, that is, the Leader tenure is confirmed by heartbeat
# nacos.core.protocol.raft.data.read_index_type=ReadOnlySafe
### rpc request timeout, default 5 seconds
# nacos.core.protocol.raft.data.rpc_request_timeout_ms=5000
#*************** Distro Related Configurations ***************#
### Distro data sync delay time, when sync task delayed, task will be merged for same data key. Default 1 second.
# nacos.core.protocol.distro.data.sync.delayMs=1000
### Distro data sync timeout for one sync data, default 3 seconds.
# nacos.core.protocol.distro.data.sync.timeoutMs=3000
### Distro data sync retry delay time when sync data failed or timeout, same behavior with delayMs, default 3 seconds.
# nacos.core.protocol.distro.data.sync.retryDelayMs=3000
### Distro data verify interval time, verify synced data whether expired for a interval. Default 5 seconds.
# nacos.core.protocol.distro.data.verify.intervalMs=5000
### Distro data verify timeout for one verify, default 3 seconds.
# nacos.core.protocol.distro.data.verify.timeoutMs=3000
### Distro data load retry delay when load snapshot data failed, default 30 seconds.
# nacos.core.protocol.distro.data.load.retryDelayMs=30000
### enable to support prometheus service discovery
#nacos.prometheus.metrics.enabled=true
### Since 2.3
#*************** Grpc Configurations ***************#
## sdk grpc(between nacos server and client) configuration
## Sets the maximum message size allowed to be received on the server.
#nacos.remote.server.grpc.sdk.max-inbound-message-size=10485760
## Sets the time(milliseconds) without read activity before sending a keepalive ping. The typical default is two hours.
#nacos.remote.server.grpc.sdk.keep-alive-time=7200000
## Sets a time(milliseconds) waiting for read activity after sending a keepalive ping. Defaults to 20 seconds.
#nacos.remote.server.grpc.sdk.keep-alive-timeout=20000
## Sets a time(milliseconds) that specify the most aggressive keep-alive time clients are permitted to configure. The typical default is 5 minutes
#nacos.remote.server.grpc.sdk.permit-keep-alive-time=300000
## cluster grpc(inside the nacos server) configuration
#nacos.remote.server.grpc.cluster.max-inbound-message-size=10485760
## Sets the time(milliseconds) without read activity before sending a keepalive ping. The typical default is two hours.
#nacos.remote.server.grpc.cluster.keep-alive-time=7200000
## Sets a time(milliseconds) waiting for read activity after sending a keepalive ping. Defaults to 20 seconds.
#nacos.remote.server.grpc.cluster.keep-alive-timeout=20000
## Sets a time(milliseconds) that specify the most aggressive keep-alive time clients are permitted to configure. The typical default is 5 minutes
#nacos.remote.server.grpc.cluster.permit-keep-alive-time=300000

View File

@ -0,0 +1,21 @@
#
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#it is ip
#example
192.168.16.101:8847
192.168.16.102
192.168.16.103

View File

@ -0,0 +1 @@
当前节点已关闭Nacos开源控制台使用请修改application.properties中的nacos.console.ui.enabled参数为true打开开源控制台使用详情查看<a href="https://nacos.io/zh-cn/docs/v2/guide/admin/console-guide.html">文档</a>中关于<code>关闭默认控制台部分</code>。

View File

@ -0,0 +1,231 @@
/*
* Copyright 1999-2018 Alibaba Group Holding Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
CREATE SCHEMA nacos AUTHORIZATION nacos;
CREATE TABLE config_info (
id bigint NOT NULL generated by default as identity,
data_id varchar(255) NOT NULL,
group_id varchar(128) NOT NULL,
tenant_id varchar(128) default '',
app_name varchar(128),
content CLOB,
md5 varchar(32) DEFAULT NULL,
gmt_create timestamp NOT NULL DEFAULT '2010-05-05 00:00:00',
gmt_modified timestamp NOT NULL DEFAULT '2010-05-05 00:00:00',
src_user varchar(128) DEFAULT NULL,
src_ip varchar(50) DEFAULT NULL,
c_desc varchar(256) DEFAULT NULL,
c_use varchar(64) DEFAULT NULL,
effect varchar(64) DEFAULT NULL,
type varchar(64) DEFAULT NULL,
c_schema LONG VARCHAR DEFAULT NULL,
encrypted_data_key LONG VARCHAR DEFAULT NULL,
constraint configinfo_id_key PRIMARY KEY (id),
constraint uk_configinfo_datagrouptenant UNIQUE (data_id,group_id,tenant_id));
CREATE INDEX configinfo_dataid_key_idx ON config_info(data_id);
CREATE INDEX configinfo_groupid_key_idx ON config_info(group_id);
CREATE INDEX configinfo_dataid_group_key_idx ON config_info(data_id, group_id);
CREATE TABLE his_config_info (
id bigint NOT NULL,
nid bigint NOT NULL generated by default as identity,
data_id varchar(255) NOT NULL,
group_id varchar(128) NOT NULL,
tenant_id varchar(128) default '',
app_name varchar(128),
content CLOB,
md5 varchar(32) DEFAULT NULL,
gmt_create timestamp NOT NULL DEFAULT '2010-05-05 00:00:00.000',
gmt_modified timestamp NOT NULL DEFAULT '2010-05-05 00:00:00.000',
src_user varchar(128),
src_ip varchar(50) DEFAULT NULL,
op_type char(10) DEFAULT NULL,
encrypted_data_key LONG VARCHAR DEFAULT NULL,
constraint hisconfiginfo_nid_key PRIMARY KEY (nid));
CREATE INDEX hisconfiginfo_dataid_key_idx ON his_config_info(data_id);
CREATE INDEX hisconfiginfo_gmt_create_idx ON his_config_info(gmt_create);
CREATE INDEX hisconfiginfo_gmt_modified_idx ON his_config_info(gmt_modified);
CREATE TABLE config_info_beta (
id bigint NOT NULL generated by default as identity,
data_id varchar(255) NOT NULL,
group_id varchar(128) NOT NULL,
tenant_id varchar(128) default '',
app_name varchar(128),
content CLOB,
beta_ips varchar(1024),
md5 varchar(32) DEFAULT NULL,
gmt_create timestamp NOT NULL DEFAULT '2010-05-05 00:00:00',
gmt_modified timestamp NOT NULL DEFAULT '2010-05-05 00:00:00',
src_user varchar(128),
src_ip varchar(50) DEFAULT NULL,
encrypted_data_key LONG VARCHAR DEFAULT NULL,
constraint configinfobeta_id_key PRIMARY KEY (id),
constraint uk_configinfobeta_datagrouptenant UNIQUE (data_id,group_id,tenant_id));
CREATE TABLE config_info_tag (
id bigint NOT NULL generated by default as identity,
data_id varchar(255) NOT NULL,
group_id varchar(128) NOT NULL,
tenant_id varchar(128) default '',
tag_id varchar(128) NOT NULL,
app_name varchar(128),
content CLOB,
md5 varchar(32) DEFAULT NULL,
gmt_create timestamp NOT NULL DEFAULT '2010-05-05 00:00:00',
gmt_modified timestamp NOT NULL DEFAULT '2010-05-05 00:00:00',
src_user varchar(128),
src_ip varchar(50) DEFAULT NULL,
constraint configinfotag_id_key PRIMARY KEY (id),
constraint uk_configinfotag_datagrouptenanttag UNIQUE (data_id,group_id,tenant_id,tag_id));
CREATE TABLE config_info_aggr (
id bigint NOT NULL generated by default as identity,
data_id varchar(255) NOT NULL,
group_id varchar(128) NOT NULL,
tenant_id varchar(128) default '',
datum_id varchar(255) NOT NULL,
app_name varchar(128),
content CLOB,
gmt_modified timestamp NOT NULL DEFAULT '2010-05-05 00:00:00',
constraint configinfoaggr_id_key PRIMARY KEY (id),
constraint uk_configinfoaggr_datagrouptenantdatum UNIQUE (data_id,group_id,tenant_id,datum_id));
CREATE TABLE app_list (
id bigint NOT NULL generated by default as identity,
app_name varchar(128) NOT NULL,
is_dynamic_collect_disabled smallint DEFAULT 0,
last_sub_info_collected_time timestamp DEFAULT '1970-01-01 08:00:00.0',
sub_info_lock_owner varchar(128),
sub_info_lock_time timestamp DEFAULT '1970-01-01 08:00:00.0',
constraint applist_id_key PRIMARY KEY (id),
constraint uk_appname UNIQUE (app_name));
CREATE TABLE app_configdata_relation_subs (
id bigint NOT NULL generated by default as identity,
app_name varchar(128) NOT NULL,
data_id varchar(255) NOT NULL,
group_id varchar(128) NOT NULL,
gmt_modified timestamp DEFAULT '2010-05-05 00:00:00',
constraint configdatarelationsubs_id_key PRIMARY KEY (id),
constraint uk_app_sub_config_datagroup UNIQUE (app_name, data_id, group_id));
CREATE TABLE app_configdata_relation_pubs (
id bigint NOT NULL generated by default as identity,
app_name varchar(128) NOT NULL,
data_id varchar(255) NOT NULL,
group_id varchar(128) NOT NULL,
gmt_modified timestamp DEFAULT '2010-05-05 00:00:00',
constraint configdatarelationpubs_id_key PRIMARY KEY (id),
constraint uk_app_pub_config_datagroup UNIQUE (app_name, data_id, group_id));
CREATE TABLE config_tags_relation (
id bigint NOT NULL,
tag_name varchar(128) NOT NULL,
tag_type varchar(64) DEFAULT NULL,
data_id varchar(255) NOT NULL,
group_id varchar(128) NOT NULL,
tenant_id varchar(128) DEFAULT '',
nid bigint NOT NULL generated by default as identity,
constraint config_tags_id_key PRIMARY KEY (nid),
constraint uk_configtagrelation_configidtag UNIQUE (id, tag_name, tag_type));
CREATE INDEX config_tags_tenant_id_idx ON config_tags_relation(tenant_id);
CREATE TABLE group_capacity (
id bigint NOT NULL generated by default as identity,
group_id varchar(128) DEFAULT '',
quota int DEFAULT 0,
usage int DEFAULT 0,
max_size int DEFAULT 0,
max_aggr_count int DEFAULT 0,
max_aggr_size int DEFAULT 0,
max_history_count int DEFAULT 0,
gmt_create timestamp DEFAULT '2010-05-05 00:00:00',
gmt_modified timestamp DEFAULT '2010-05-05 00:00:00',
constraint group_capacity_id_key PRIMARY KEY (id),
constraint uk_group_id UNIQUE (group_id));
CREATE TABLE tenant_capacity (
id bigint NOT NULL generated by default as identity,
tenant_id varchar(128) DEFAULT '',
quota int DEFAULT 0,
usage int DEFAULT 0,
max_size int DEFAULT 0,
max_aggr_count int DEFAULT 0,
max_aggr_size int DEFAULT 0,
max_history_count int DEFAULT 0,
gmt_create timestamp DEFAULT '2010-05-05 00:00:00',
gmt_modified timestamp DEFAULT '2010-05-05 00:00:00',
constraint tenant_capacity_id_key PRIMARY KEY (id),
constraint uk_tenant_id UNIQUE (tenant_id));
CREATE TABLE tenant_info (
id bigint NOT NULL generated by default as identity,
kp varchar(128) NOT NULL,
tenant_id varchar(128) DEFAULT '',
tenant_name varchar(128) DEFAULT '',
tenant_desc varchar(256) DEFAULT NULL,
create_source varchar(32) DEFAULT NULL,
gmt_create bigint NOT NULL,
gmt_modified bigint NOT NULL,
constraint tenant_info_id_key PRIMARY KEY (id),
constraint uk_tenant_info_kptenantid UNIQUE (kp,tenant_id));
CREATE INDEX tenant_info_tenant_id_idx ON tenant_info(tenant_id);
CREATE TABLE users (
username varchar(50) NOT NULL PRIMARY KEY,
password varchar(500) NOT NULL,
enabled boolean NOT NULL DEFAULT true
);
CREATE TABLE roles (
username varchar(50) NOT NULL,
role varchar(50) NOT NULL,
constraint uk_username_role UNIQUE (username,role)
);
CREATE TABLE permissions (
role varchar(50) NOT NULL,
resource varchar(512) NOT NULL,
action varchar(8) NOT NULL,
constraint uk_role_permission UNIQUE (role,resource,action)
);
INSERT INTO users (username, password, enabled) VALUES ('nacos', '$2a$10$EuWPZHzz32dJN7jexM34MOeYirDdFAZm2kuWj7VEOJhhZkDrxfvUu', TRUE);
INSERT INTO roles (username, role) VALUES ('nacos', 'ROLE_ADMIN');
/******************************************/
/* ipv6 support */
/******************************************/
ALTER TABLE `config_info_tag`
MODIFY COLUMN `src_ip` varchar(50) CHARACTER SET utf8 COLLATE utf8_bin NULL DEFAULT NULL COMMENT 'source ip' AFTER `src_user`;
ALTER TABLE `his_config_info`
MODIFY COLUMN `src_ip` varchar(50) CHARACTER SET utf8 COLLATE utf8_bin NULL DEFAULT NULL AFTER `src_user`;
ALTER TABLE `config_info`
MODIFY COLUMN `src_ip` varchar(50) CHARACTER SET utf8 COLLATE utf8_bin NULL DEFAULT NULL COMMENT 'source ip' AFTER `src_user`;
ALTER TABLE `config_info_beta`
MODIFY COLUMN `src_ip` varchar(50) CHARACTER SET utf8 COLLATE utf8_bin NULL DEFAULT NULL COMMENT 'source ip' AFTER `src_user`;

View File

@ -0,0 +1,213 @@
/*
* Copyright 1999-2018 Alibaba Group Holding Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/******************************************/
/* 表名称 = config_info */
/******************************************/
CREATE TABLE `config_info` (
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
`group_id` varchar(128) DEFAULT NULL COMMENT 'group_id',
`content` longtext NOT NULL COMMENT 'content',
`md5` varchar(32) DEFAULT NULL COMMENT 'md5',
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
`src_user` text COMMENT 'source user',
`src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip',
`app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',
`tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
`c_desc` varchar(256) DEFAULT NULL COMMENT 'configuration description',
`c_use` varchar(64) DEFAULT NULL COMMENT 'configuration usage',
`effect` varchar(64) DEFAULT NULL COMMENT '配置生效的描述',
`type` varchar(64) DEFAULT NULL COMMENT '配置的类型',
`c_schema` text COMMENT '配置的模式',
`encrypted_data_key` text NOT NULL COMMENT '密钥',
PRIMARY KEY (`id`),
UNIQUE KEY `uk_configinfo_datagrouptenant` (`data_id`,`group_id`,`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_info';
/******************************************/
/* 表名称 = config_info_aggr */
/******************************************/
CREATE TABLE `config_info_aggr` (
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
`group_id` varchar(128) NOT NULL COMMENT 'group_id',
`datum_id` varchar(255) NOT NULL COMMENT 'datum_id',
`content` longtext NOT NULL COMMENT '内容',
`gmt_modified` datetime NOT NULL COMMENT '修改时间',
`app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',
`tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
PRIMARY KEY (`id`),
UNIQUE KEY `uk_configinfoaggr_datagrouptenantdatum` (`data_id`,`group_id`,`tenant_id`,`datum_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='增加租户字段';
/******************************************/
/* 表名称 = config_info_beta */
/******************************************/
CREATE TABLE `config_info_beta` (
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
`group_id` varchar(128) NOT NULL COMMENT 'group_id',
`app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',
`content` longtext NOT NULL COMMENT 'content',
`beta_ips` varchar(1024) DEFAULT NULL COMMENT 'betaIps',
`md5` varchar(32) DEFAULT NULL COMMENT 'md5',
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
`src_user` text COMMENT 'source user',
`src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip',
`tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
`encrypted_data_key` text NOT NULL COMMENT '密钥',
PRIMARY KEY (`id`),
UNIQUE KEY `uk_configinfobeta_datagrouptenant` (`data_id`,`group_id`,`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_info_beta';
/******************************************/
/* 表名称 = config_info_tag */
/******************************************/
CREATE TABLE `config_info_tag` (
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
`group_id` varchar(128) NOT NULL COMMENT 'group_id',
`tenant_id` varchar(128) DEFAULT '' COMMENT 'tenant_id',
`tag_id` varchar(128) NOT NULL COMMENT 'tag_id',
`app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',
`content` longtext NOT NULL COMMENT 'content',
`md5` varchar(32) DEFAULT NULL COMMENT 'md5',
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
`src_user` text COMMENT 'source user',
`src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip',
PRIMARY KEY (`id`),
UNIQUE KEY `uk_configinfotag_datagrouptenanttag` (`data_id`,`group_id`,`tenant_id`,`tag_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_info_tag';
/******************************************/
/* 表名称 = config_tags_relation */
/******************************************/
CREATE TABLE `config_tags_relation` (
`id` bigint(20) NOT NULL COMMENT 'id',
`tag_name` varchar(128) NOT NULL COMMENT 'tag_name',
`tag_type` varchar(64) DEFAULT NULL COMMENT 'tag_type',
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
`group_id` varchar(128) NOT NULL COMMENT 'group_id',
`tenant_id` varchar(128) DEFAULT '' COMMENT 'tenant_id',
`nid` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'nid, 自增长标识',
PRIMARY KEY (`nid`),
UNIQUE KEY `uk_configtagrelation_configidtag` (`id`,`tag_name`,`tag_type`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_tag_relation';
/******************************************/
/* 表名称 = group_capacity */
/******************************************/
CREATE TABLE `group_capacity` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键ID',
`group_id` varchar(128) NOT NULL DEFAULT '' COMMENT 'Group ID空字符表示整个集群',
`quota` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '配额0表示使用默认值',
`usage` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '使用量',
`max_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个配置大小上限单位为字节0表示使用默认值',
`max_aggr_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '聚合子配置最大个数0表示使用默认值',
`max_aggr_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个聚合数据的子配置大小上限单位为字节0表示使用默认值',
`max_history_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '最大变更历史数量',
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
PRIMARY KEY (`id`),
UNIQUE KEY `uk_group_id` (`group_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='集群、各Group容量信息表';
/******************************************/
/* 表名称 = his_config_info */
/******************************************/
CREATE TABLE `his_config_info` (
`id` bigint(20) unsigned NOT NULL COMMENT 'id',
`nid` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'nid, 自增标识',
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
`group_id` varchar(128) NOT NULL COMMENT 'group_id',
`app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',
`content` longtext NOT NULL COMMENT 'content',
`md5` varchar(32) DEFAULT NULL COMMENT 'md5',
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
`src_user` text COMMENT 'source user',
`src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip',
`op_type` char(10) DEFAULT NULL COMMENT 'operation type',
`tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
`encrypted_data_key` text NOT NULL COMMENT '密钥',
PRIMARY KEY (`nid`),
KEY `idx_gmt_create` (`gmt_create`),
KEY `idx_gmt_modified` (`gmt_modified`),
KEY `idx_did` (`data_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='多租户改造';
/******************************************/
/* 表名称 = tenant_capacity */
/******************************************/
CREATE TABLE `tenant_capacity` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键ID',
`tenant_id` varchar(128) NOT NULL DEFAULT '' COMMENT 'Tenant ID',
`quota` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '配额0表示使用默认值',
`usage` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '使用量',
`max_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个配置大小上限单位为字节0表示使用默认值',
`max_aggr_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '聚合子配置最大个数',
`max_aggr_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个聚合数据的子配置大小上限单位为字节0表示使用默认值',
`max_history_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '最大变更历史数量',
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
PRIMARY KEY (`id`),
UNIQUE KEY `uk_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='租户容量信息表';
CREATE TABLE `tenant_info` (
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
`kp` varchar(128) NOT NULL COMMENT 'kp',
`tenant_id` varchar(128) default '' COMMENT 'tenant_id',
`tenant_name` varchar(128) default '' COMMENT 'tenant_name',
`tenant_desc` varchar(256) DEFAULT NULL COMMENT 'tenant_desc',
`create_source` varchar(32) DEFAULT NULL COMMENT 'create_source',
`gmt_create` bigint(20) NOT NULL COMMENT '创建时间',
`gmt_modified` bigint(20) NOT NULL COMMENT '修改时间',
PRIMARY KEY (`id`),
UNIQUE KEY `uk_tenant_info_kptenantid` (`kp`,`tenant_id`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='tenant_info';
CREATE TABLE `users` (
`username` varchar(50) NOT NULL PRIMARY KEY COMMENT 'username',
`password` varchar(500) NOT NULL COMMENT 'password',
`enabled` boolean NOT NULL COMMENT 'enabled'
);
CREATE TABLE `roles` (
`username` varchar(50) NOT NULL COMMENT 'username',
`role` varchar(50) NOT NULL COMMENT 'role',
UNIQUE INDEX `idx_user_role` (`username` ASC, `role` ASC) USING BTREE
);
CREATE TABLE `permissions` (
`role` varchar(50) NOT NULL COMMENT 'role',
`resource` varchar(255) NOT NULL COMMENT 'resource',
`action` varchar(8) NOT NULL COMMENT 'action',
UNIQUE INDEX `uk_role_permission` (`role`,`resource`,`action`) USING BTREE
);
INSERT INTO users (username, password, enabled) VALUES ('nacos', '$2a$10$EuWPZHzz32dJN7jexM34MOeYirDdFAZm2kuWj7VEOJhhZkDrxfvUu', TRUE);
INSERT INTO roles (username, role) VALUES ('nacos', 'ROLE_ADMIN');

View File

@ -0,0 +1,799 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Copyright 1999-2018 Alibaba Group Holding Ltd.
~
~ Licensed under the Apache License, Version 2.0 (the "License");
~ you may not use this file except in compliance with the License.
~ You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<configuration scan="true" scanPeriod="10 seconds">
<springProperty scope="context" name="logPath" source="nacos.logs.path" defaultValue="${nacos.home}/logs"/>
<property name="LOG_HOME" value="${logPath}"/>
<appender name="cmdb-main"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${nacos.home}/logs/cmdb-main.log</file>
<append>true</append>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${nacos.home}/logs/cmdb-main.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
<maxFileSize>2GB</maxFileSize>
<maxHistory>7</maxHistory>
<totalSizeCap>7GB</totalSizeCap>
<cleanHistoryOnStart>true</cleanHistoryOnStart>
</rollingPolicy>
<encoder>
<Pattern>%date %level %msg%n%n</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<Pattern>%date %level %msg%n%n</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="naming-server"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/naming-server.log</file>
<append>true</append>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/naming-server.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
<maxFileSize>1GB</maxFileSize>
<maxHistory>7</maxHistory>
<totalSizeCap>7GB</totalSizeCap>
<cleanHistoryOnStart>true</cleanHistoryOnStart>
</rollingPolicy>
<encoder>
<Pattern>%date %level %msg%n%n</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="async-naming-server" class="ch.qos.logback.classic.AsyncAppender">
<discardingThreshold>0</discardingThreshold>
<queueSize>1024</queueSize>
<neverBlock>true</neverBlock>
<appender-ref ref="naming-server"/>
</appender>
<appender name="naming-raft"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/naming-raft.log</file>
<append>true</append>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/naming-raft.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
<maxFileSize>1GB</maxFileSize>
<maxHistory>7</maxHistory>
<totalSizeCap>3GB</totalSizeCap>
<cleanHistoryOnStart>true</cleanHistoryOnStart>
</rollingPolicy>
<encoder>
<Pattern>%date %level %msg%n%n</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="async-naming-raft" class="ch.qos.logback.classic.AsyncAppender">
<discardingThreshold>0</discardingThreshold>
<queueSize>1024</queueSize>
<neverBlock>true</neverBlock>
<appender-ref ref="naming-raft"/>
</appender>
<appender name="naming-distro"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/naming-distro.log</file>
<append>true</append>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/naming-distro.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
<maxFileSize>1GB</maxFileSize>
<maxHistory>7</maxHistory>
<totalSizeCap>3GB</totalSizeCap>
<cleanHistoryOnStart>true</cleanHistoryOnStart>
</rollingPolicy>
<encoder>
<Pattern>%date %level %msg%n%n</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="async-naming-distro" class="ch.qos.logback.classic.AsyncAppender">
<discardingThreshold>0</discardingThreshold>
<queueSize>1024</queueSize>
<neverBlock>true</neverBlock>
<appender-ref ref="naming-distro"/>
</appender>
<appender name="naming-event"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/naming-event.log</file>
<append>true</append>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/naming-event.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
<maxFileSize>1GB</maxFileSize>
<maxHistory>7</maxHistory>
<totalSizeCap>3GB</totalSizeCap>
<cleanHistoryOnStart>true</cleanHistoryOnStart>
</rollingPolicy>
<encoder>
<Pattern>%date %level %msg%n%n</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="async-naming-event" class="ch.qos.logback.classic.AsyncAppender">
<discardingThreshold>0</discardingThreshold>
<queueSize>1024</queueSize>
<neverBlock>true</neverBlock>
<appender-ref ref="naming-event"/>
</appender>
<appender name="naming-push"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/naming-push.log</file>
<append>true</append>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/naming-push.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
<maxFileSize>1GB</maxFileSize>
<maxHistory>7</maxHistory>
<totalSizeCap>3GB</totalSizeCap>
<cleanHistoryOnStart>true</cleanHistoryOnStart>
</rollingPolicy>
<encoder>
<Pattern>%date %level %msg%n%n</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="naming-rt"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/naming-rt.log</file>
<append>true</append>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/naming-rt.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
<maxFileSize>1GB</maxFileSize>
<maxHistory>7</maxHistory>
<totalSizeCap>3GB</totalSizeCap>
<cleanHistoryOnStart>true</cleanHistoryOnStart>
</rollingPolicy>
<encoder>
<Pattern>%msg%n</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="naming-performance"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/naming-performance.log</file>
<append>true</append>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/naming-performance.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
<maxFileSize>1GB</maxFileSize>
<maxHistory>7</maxHistory>
<totalSizeCap>3GB</totalSizeCap>
<cleanHistoryOnStart>true</cleanHistoryOnStart>
</rollingPolicy>
<encoder>
<Pattern>%date %level %msg%n%n</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!--config module logback config-->
<appender name="dumpFile"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/config-dump.log</file>
<append>true</append>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/config-dump.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
<maxFileSize>2GB</maxFileSize>
<maxHistory>7</maxHistory>
<totalSizeCap>7GB</totalSizeCap>
<cleanHistoryOnStart>true</cleanHistoryOnStart>
</rollingPolicy>
<encoder>
<Pattern>%date %level %msg%n%n</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="pullFile"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/config-pull.log</file>
<append>true</append>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/config-pull.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
<maxFileSize>20MB</maxFileSize>
<maxHistory>7</maxHistory>
<totalSizeCap>128MB</totalSizeCap>
<cleanHistoryOnStart>true</cleanHistoryOnStart>
</rollingPolicy>
<encoder>
<Pattern>%date %level %msg%n%n</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="fatalFile"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/config-fatal.log</file>
<append>true</append>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/config-fatal.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
<maxFileSize>20MB</maxFileSize>
<maxHistory>7</maxHistory>
<totalSizeCap>128MB</totalSizeCap>
<cleanHistoryOnStart>true</cleanHistoryOnStart>
</rollingPolicy>
<encoder>
<Pattern>%date %level %msg%n%n</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="memoryFile"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/config-memory.log</file>
<append>true</append>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/config-memory.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
<maxFileSize>20MB</maxFileSize>
<maxHistory>7</maxHistory>
<totalSizeCap>128MB</totalSizeCap>
<cleanHistoryOnStart>true</cleanHistoryOnStart>
</rollingPolicy>
<encoder>
<Pattern>%date %level %msg%n%n</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="pullCheckFile"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/config-pull-check.log</file>
<append>true</append>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/config-pull-check.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
<maxFileSize>1GB</maxFileSize>
<maxHistory>7</maxHistory>
<totalSizeCap>3GB</totalSizeCap>
<cleanHistoryOnStart>true</cleanHistoryOnStart>
</rollingPolicy>
<encoder>
<Pattern>%msg%n</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="clientLog"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/config-client-request.log</file>
<append>true</append>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/config-client-request.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
<maxFileSize>2GB</maxFileSize>
<maxHistory>7</maxHistory>
<totalSizeCap>7GB</totalSizeCap>
<cleanHistoryOnStart>true</cleanHistoryOnStart>
</rollingPolicy>
<encoder>
<Pattern>%date|%msg%n</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="traceLog"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/config-trace.log</file>
<append>true</append>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/config-trace.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
<maxFileSize>2GB</maxFileSize>
<maxHistory>7</maxHistory>
<totalSizeCap>7GB</totalSizeCap>
<cleanHistoryOnStart>true</cleanHistoryOnStart>
</rollingPolicy>
<encoder>
<Pattern>%date|%msg%n</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="notifyLog"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/config-notify.log</file>
<append>true</append>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/config-notify.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
<maxFileSize>1GB</maxFileSize>
<maxHistory>7</maxHistory>
<totalSizeCap>3GB</totalSizeCap>
<cleanHistoryOnStart>true</cleanHistoryOnStart>
</rollingPolicy>
<encoder>
<Pattern>%date %level %msg%n%n</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="startLog"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/config-server.log</file>
<append>true</append>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/config-server.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
<maxFileSize>50MB</maxFileSize>
<maxHistory>7</maxHistory>
<totalSizeCap>512MB</totalSizeCap>
<cleanHistoryOnStart>true</cleanHistoryOnStart>
</rollingPolicy>
<encoder>
<Pattern>%date %level %msg%n%n</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="rootFile"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/nacos.log</file>
<append>true</append>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/nacos.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
<maxFileSize>50MB</maxFileSize>
<maxHistory>7</maxHistory>
<totalSizeCap>512MB</totalSizeCap>
<cleanHistoryOnStart>true</cleanHistoryOnStart>
</rollingPolicy>
<encoder>
<Pattern>%date %level %msg%n%n</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="nacos-address"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/nacos-address.log</file>
<append>true</append>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/nacos-address.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
<maxFileSize>2GB</maxFileSize>
<maxHistory>7</maxHistory>
<totalSizeCap>7GB</totalSizeCap>
<cleanHistoryOnStart>true</cleanHistoryOnStart>
</rollingPolicy>
<encoder>
<Pattern>%date %level %msg%n%n</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="istio-main"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/istio-main.log</file>
<append>true</append>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/istio-main.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
<maxFileSize>2GB</maxFileSize>
<maxHistory>7</maxHistory>
<totalSizeCap>7GB</totalSizeCap>
<cleanHistoryOnStart>true</cleanHistoryOnStart>
</rollingPolicy>
<encoder>
<Pattern>%date %level %msg%n%n</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="core-auth"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/core-auth.log</file>
<append>true</append>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/core-auth.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
<maxFileSize>2GB</maxFileSize>
<maxHistory>7</maxHistory>
<totalSizeCap>7GB</totalSizeCap>
<cleanHistoryOnStart>true</cleanHistoryOnStart>
</rollingPolicy>
<encoder>
<Pattern>%date %level %msg%n%n</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="protocol-raft"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/protocol-raft.log</file>
<append>true</append>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/protocol-raft.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
<maxFileSize>2GB</maxFileSize>
<maxHistory>7</maxHistory>
<totalSizeCap>7GB</totalSizeCap>
<cleanHistoryOnStart>true</cleanHistoryOnStart>
</rollingPolicy>
<encoder>
<Pattern>%date %level %msg%n%n</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="protocol-distro"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/protocol-distro.log</file>
<append>true</append>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/protocol-distro.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
<maxFileSize>2GB</maxFileSize>
<maxHistory>7</maxHistory>
<totalSizeCap>7GB</totalSizeCap>
<cleanHistoryOnStart>true</cleanHistoryOnStart>
</rollingPolicy>
<encoder>
<Pattern>%date %level %msg%n%n</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="nacos-cluster"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/nacos-cluster.log</file>
<append>true</append>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/nacos-cluster.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
<maxFileSize>2GB</maxFileSize>
<maxHistory>7</maxHistory>
<totalSizeCap>7GB</totalSizeCap>
<cleanHistoryOnStart>true</cleanHistoryOnStart>
</rollingPolicy>
<encoder>
<Pattern>%date %level %msg%n%n</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="alipay-jraft"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/alipay-jraft.log</file>
<append>true</append>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/alipay-jraft.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
<maxFileSize>2GB</maxFileSize>
<maxHistory>7</maxHistory>
<totalSizeCap>7GB</totalSizeCap>
<cleanHistoryOnStart>true</cleanHistoryOnStart>
</rollingPolicy>
<encoder>
<Pattern>%date %level %msg%n%n</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!--TPS control -->
<appender name="tps-control"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/tps-control.log</file>
<append>true</append>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/tps-control.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
<maxFileSize>2GB</maxFileSize>
<maxHistory>7</maxHistory>
<totalSizeCap>7GB</totalSizeCap>
<cleanHistoryOnStart>true</cleanHistoryOnStart>
</rollingPolicy>
<encoder>
<Pattern>%date %level %msg%n%n</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="tps-control-digest"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/tps-control-digest.log</file>
<append>true</append>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/tps-control-digest.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
<maxFileSize>2GB</maxFileSize>
<maxHistory>7</maxHistory>
<totalSizeCap>7GB</totalSizeCap>
<cleanHistoryOnStart>true</cleanHistoryOnStart>
</rollingPolicy>
<encoder>
<Pattern>%date %level %msg%n%n</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="tps-control-detail"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/tps-control-detail.log</file>
<append>true</append>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/tps-control-detail.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
<maxFileSize>2GB</maxFileSize>
<maxHistory>7</maxHistory>
<totalSizeCap>7GB</totalSizeCap>
<cleanHistoryOnStart>true</cleanHistoryOnStart>
</rollingPolicy>
<encoder>
<Pattern>%date %level %msg%n%n</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="remote"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/remote.log</file>
<append>true</append>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/remote.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
<maxFileSize>2GB</maxFileSize>
<maxHistory>7</maxHistory>
<totalSizeCap>7GB</totalSizeCap>
<cleanHistoryOnStart>true</cleanHistoryOnStart>
</rollingPolicy>
<encoder>
<Pattern>%date %level %msg%n%n</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="remote-digest"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/remote-digest.log</file>
<append>true</append>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/remote-digest.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
<maxFileSize>2GB</maxFileSize>
<maxHistory>7</maxHistory>
<totalSizeCap>7GB</totalSizeCap>
<cleanHistoryOnStart>true</cleanHistoryOnStart>
</rollingPolicy>
<encoder>
<Pattern>%date %level %msg%n%n</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="remote-push"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/remote-push.log</file>
<append>true</append>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/remote-push.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
<maxFileSize>2GB</maxFileSize>
<maxHistory>7</maxHistory>
<totalSizeCap>7GB</totalSizeCap>
<cleanHistoryOnStart>true</cleanHistoryOnStart>
</rollingPolicy>
<encoder>
<Pattern>%date %level %msg%n%n</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="nacos-persistence"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/nacos-persistence.log</file>
<append>true</append>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/nacos-persistence.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
<maxFileSize>2GB</maxFileSize>
<maxHistory>7</maxHistory>
<totalSizeCap>7GB</totalSizeCap>
<cleanHistoryOnStart>true</cleanHistoryOnStart>
</rollingPolicy>
<encoder>
<Pattern>%date %level %msg%n%n</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<logger name="com.alibaba.nacos.address.main" additivity="false">
<level value="INFO"/>
<appender-ref ref="nacos-address"/>
</logger>
<logger name="com.alibaba.nacos.cmdb.main" additivity="false">
<level value="INFO"/>
<appender-ref ref="cmdb-main"/>
</logger>
<logger name="com.alibaba.nacos.core.remote" additivity="false">
<level value="DEBUG"/>
<appender-ref ref="remote"/>
</logger>
<logger name="com.alibaba.nacos.core.remote.push" additivity="false">
<level value="DEBUG"/>
<appender-ref ref="remote-push"/>
</logger>
<logger name="com.alibaba.nacos.core.remote.digest" additivity="false">
<level value="DEBUG"/>
<appender-ref ref="remote-digest"/>
</logger>
<!-- TPS Control-->
<logger name="com.alibaba.nacos.core.remote.control.digest" additivity="false">
<level value="DEBUG"/>
<appender-ref ref="tps-control-digest"/>
</logger>
<logger name="com.alibaba.nacos.core.remote.control.detail" additivity="false">
<level value="DEBUG"/>
<appender-ref ref="tps-control-detail"/>
</logger>
<logger name="com.alibaba.nacos.core.remote.control" additivity="false">
<level value="DEBUG"/>
<appender-ref ref="tps-control"/>
</logger>
<logger name="com.alibaba.nacos.naming.main" additivity="false">
<level value="INFO"/>
<appender-ref ref="async-naming-server"/>
</logger>
<logger name="com.alibaba.nacos.naming.raft" additivity="false">
<level value="INFO"/>
<appender-ref ref="async-naming-raft"/>
</logger>
<logger name="com.alibaba.nacos.naming.distro" additivity="false">
<level value="INFO"/>
<appender-ref ref="async-naming-distro"/>
</logger>
<logger name="com.alibaba.nacos.naming.event" additivity="false">
<level value="INFO"/>
<appender-ref ref="async-naming-event"/>
</logger>
<logger name="com.alibaba.nacos.naming.push" additivity="false">
<level value="INFO"/>
<appender-ref ref="naming-push"/>
</logger>
<logger name="com.alibaba.nacos.naming.rt" additivity="false">
<level value="INFO"/>
<appender-ref ref="naming-rt"/>
</logger>
<logger name="com.alibaba.nacos.naming.performance" additivity="false">
<level value="INFO"/>
<appender-ref ref="naming-performance"/>
</logger>
<logger name="com.alibaba.nacos.config.dumpLog" additivity="false">
<level value="INFO"/>
<appender-ref ref="dumpFile"/>
</logger>
<logger name="com.alibaba.nacos.config.pullLog" additivity="false">
<level value="INFO"/>
<appender-ref ref="pullFile"/>
</logger>
<logger name="com.alibaba.nacos.config.pullCheckLog" additivity="false">
<level value="INFO"/>
<appender-ref ref="pullCheckFile"/>
</logger>
<logger name="com.alibaba.nacos.config.fatal" additivity="false">
<level value="INFO"/>
<appender-ref ref="fatalFile"/>
</logger>
<logger name="com.alibaba.nacos.config.monitorLog" additivity="false">
<level value="INFO"/>
<appender-ref ref="memoryFile"/>
</logger>
<logger name="com.alibaba.nacos.config.clientLog" additivity="false">
<level value="info"/>
<appender-ref ref="clientLog"/>
</logger>
<logger name="com.alibaba.nacos.config.notifyLog" additivity="false">
<level value="INFO"/>
<appender-ref ref="notifyLog"/>
</logger>
<logger name="com.alibaba.nacos.config.traceLog" additivity="false">
<level value="info"/>
<appender-ref ref="traceLog"/>
</logger>
<logger name="com.alibaba.nacos.config.startLog" additivity="false">
<level value="INFO"/>
<appender-ref ref="startLog"/>
</logger>
<logger name="com.alibaba.nacos.istio.main" additivity="false">
<level value="DEBUG"/>
<appender-ref ref="istio-main"/>
</logger>
<logger name="com.alibaba.nacos.core.auth" additivity="false">
<level value="DEBUG"/>
<appender-ref ref="core-auth"/>
</logger>
<logger name="com.alibaba.nacos.core.protocol.raft" additivity="false">
<level value="INFO"/>
<appender-ref ref="protocol-raft"/>
</logger>
<logger name="com.alipay.sofa.jraft" additivity="false">
<level value="INFO"/>
<appender-ref ref="alipay-jraft"/>
</logger>
<logger name="com.alibaba.nacos.core.protocol.distro" additivity="false">
<level value="INFO"/>
<appender-ref ref="protocol-distro"/>
</logger>
<logger name="com.alibaba.nacos.core.cluster" additivity="false">
<level value="INFO"/>
<appender-ref ref="nacos-cluster"/>
</logger>
<logger name="com.alibaba.nacos.persistence" additivity="false">
<level value="INFO"/>
<appender-ref ref="nacos-persistence"/>
</logger>
<springProfile name="standalone">
<logger name="org.springframework">
<appender-ref ref="CONSOLE"/>
<level value="INFO"/>
</logger>
<logger name="org.apache.catalina.startup.DigesterFactory">
<appender-ref ref="CONSOLE"/>
<level value="INFO"/>
</logger>
<logger name="org.apache.catalina.util.LifecycleBase">
<appender-ref ref="CONSOLE"/>
<level value="ERROR"/>
</logger>
<logger name="org.apache.coyote.http11.Http11NioProtocol">
<appender-ref ref="CONSOLE"/>
<level value="WARN"/>
</logger>
<logger name="org.apache.tomcat.util.net.NioSelectorPool">
<appender-ref ref="CONSOLE"/>
<level value="WARN"/>
</logger>
</springProfile>
<logger name="com.alibaba.nacos.core.listener.StartingApplicationListener">
<appender-ref ref="CONSOLE"/>
<level value="INFO"/>
</logger>
<logger name="com.alibaba.nacos.common.notify.NotifyCenter">
<appender-ref ref="CONSOLE"/>
<level value="INFO"/>
</logger>
<logger name="com.alibaba.nacos.sys.file.WatchFileCenter">
<appender-ref ref="CONSOLE"/>
<level value="INFO"/>
</logger>
<logger name="com.alibaba.nacos.common.executor.ThreadPoolManager">
<appender-ref ref="CONSOLE"/>
<level value="INFO"/>
</logger>
<root>
<level value="INFO"/>
<appender-ref ref="rootFile"/>
</root>
</configuration>

View File

@ -0,0 +1,5 @@
server:
port: 9090
spring:
application:
name: deploy-ease-core

View File

@ -0,0 +1,5 @@
server:
port: 0
spring:
application:
name: deploy-ease-synchronizer-${TENANT_CODE}

View File

@ -0,0 +1,33 @@
spring:
jpa:
show-sql: true
properties:
hibernate:
format_sql: true
type: trace
datasource:
url: jdbc:mysql://localhost:3306/deploy_ease_dev?useUnicode=true&characterEncoding=utf-8&useSSL=false&serverTimezone=Asia/Shanghai
username: root
password: root
driver-class-name: com.mysql.cj.jdbc.Driver
type: com.alibaba.druid.pool.DruidDataSource
initialSize: 5
minIdle: 5
maxActive: 201
maxWait: 60000
timeBetweenEvictionRunsMillis: 60000
minEvictableIdleTimeMillis: 300000
validationQuery: SELECT 1 FROM DUAL
testWhileIdle: true
testOnBorrow: false
testOnReturn: false
poolPreparedStatements: true
maxPoolPreparedStatementPerConnectionSize: 20
useGlobalDataSourceStat: true
connectionProperties: druid.stat.mergeSql=true;druid.stat.slowSqlMillis=500
management:
endpoints:
web:
exposure:
include: "*"

View File

@ -0,0 +1,67 @@
spring:
redis:
host: 117.78.26.85
port: 6383
password: Redis@Aps123456
lettuce:
pool:
max-active: 300
datasource:
driver-class-name: com.mysql.cj.jdbc.Driver
url: jdbc:mysql://192.168.2.201:3308/iscmtech-dev?characterEncoding=UTF-8&useUnicode=true&useSSL=false&allowPublicKeyRetrieval=true&serverTimezone=GMT%2B8&allowMultiQueries=true
username: iscmtech-dev
password: iscmtech-dev
###################以下为druid增加的配置###########################
type: com.alibaba.druid.pool.DruidDataSource
druid:
#初始化大小
initialSize: 7
#最小值
minIdle: 7
#最大值
maxActive: 10
#最大等待时间配置获取连接等待超时时间单位都是毫秒ms
maxWait: 28000
#配置间隔多久才进行一次检测,检测需要关闭的空闲连接
timeBetweenEvictionRunsMillis: 60000
#配置一个连接在池中最小生存的时间
minEvictableIdleTimeMillis: 300000
#用来检测连接是否有效的sql 必须是一个查询语句
#mysql中为 select 'x'
#oracle中为 select 1 from dual
validationQuery: select 'x'
#申请连接的时候检测如果空闲时间大于timeBetweenEvictionRunsMillis执行validationQuery检测连接是否有效。
testWhileIdle: true
#申请连接时会执行validationQuery检测连接是否有效,开启会降低性能,默认为true
testOnBorrow: false
#归还连接时会执行validationQuery检测连接是否有效,开启会降低性能,默认为true
testOnReturn: false
poolPreparedStatements: true
# 配置监控统计拦截的filters去掉后监控界面sql无法统计
#'wall'用于防火墙SpringBoot中没有log4j
filters: stat,config
#最大PSCache连接
#当值大于0时poolPreparedStatements会自动修改为true
maxPoolPreparedStatementPerConnectionSize: 20
useGlobalDataSourceStat: true
# 通过connectProperties属性来打开mergeSql功能慢SQL记录
connectionProperties: druid.stat.mergeSql=true;druid.stat.slowSqlMillis=500;config.decrypt=false;config.decrypt.key=123456
# 配置StatFilter
web-stat-filter:
#默认为false设置为true启动
enabled: true
url-pattern: "/*"
exclusions: "*.js,*.gif,*.jpg,*.bmp,*.png,*.css,*.ico,/druid/*"
#配置StatViewServlet
stat-view-servlet:
url-pattern: "/druid/*"
#允许那些ip
allow:
login-username: juyunshuzi
login-password: juyunshuzi2020
#禁止那些ip
deny: 192.168.11.102
#是否可以重置
reset-enable: true
#启用
enabled: true

View File

@ -0,0 +1,151 @@
server:
port: 9303
spring:
application:
name: scp-service-manager
mvc:
throw-exception-if-no-handler-found: true
servlet:
multipart:
max-request-size: "2MB"
max-file-size: "2MB"
jackson:
time-zone: GMT+8
serialization:
write-dates-as-timestamps: true
main:
allow-bean-definition-overriding: true
management:
endpoints:
web:
exposure:
include: '*'
feign:
sentinel:
enabled: true
okhttp:
enabled: true
client:
config:
default:
connectTimeout: 20000
readTimeout: 20000
minio:
bucket: "scpfile"
host: "http://192.168.2.200:9090"
url: "${minio.host}/${minio.bucket}/"
access-key: "minio"
secret-key: "minio@123"
ribbon:
ReadTimeout: ${stone.ribbon.ReadTimeout:20000} #单位毫秒
ConnectTimeout: ${stone.ribbon.ConnectTimeout:20000} #单位毫秒
logging:
#提高日志打印级别
level:
com:
alibaba:
nacos: warn
mybatis-plus:
global-config:
db-config:
logic-delete-field: flag # 全局逻辑删除的实体字段名(since 3.3.0,配置后可以忽略不配置步骤2)
logic-delete-value: 1 # 逻辑已删除值(默认为 1)
logic-not-delete-value: 0 # 逻辑未删除值(默认为 0)
configuration:
log-impl: org.apache.ibatis.logging.stdout.StdOutImpl
type-enums-package: com.stone.commons.core.enums,com.stone.data.cebtre.model.enums
mapper-locations: classpath:/mapper/**/*Mapper.xml
#实体扫描多个package用逗号或者分号分隔
typeAliasesPackage: com.lianyu.aps.provider.model.po
datahub:
job:
default:
# 任务执行CRON表达式
jobCron: "* * * ? * * 9999"
# 执行器主键ID
jobGroup: 1
# 执行器路由策略
executorRouteStrategy: ROUND
# 执行器任务Handler名称
executorHandler: executorJobHandler
# 执行器,任务参数-----空
executorParam:
# 阻塞处理策略
executorBlockStrategy: SERIAL_EXECUTION
# 任务执行超时时间,单位秒
executorTimeout: 0
# 失败重试次数
executorFailRetryCount: 0
# GLUE类型
glueType: BEAN
# 调度状态0-停止1-运行
triggerStatus: 0
# 增量日期格式
replaceParamType: Timestamp
# 所属项目Id
projectId: 1
# 订阅式通用接口标识-调用方应用名
appName: aps
# 执行方式0定时1实时。默认为0
executeMode: 0
# 校验范围0单个数据校验失败即抛出异常1全部数据校验记录所有失败数据最后抛出异常。默认为0
validRange: 0
# 回调类型1-api;2-webservice
rCallBackType: 1
# 回调信息 -需要修改[url、apiType](修改成调用启动任务接收任务状态的接口)
rCallBackInfo: "{\"url\":\"http://10.201.224.60:80/process/engine/connectedCallBack\",\"apiType\":\"POST\",\"authUserName\":\"\",\"authPassWord\":\"\",\"authType\":NULL,\"header\":{},\"param\":{},\"resultStatus\":{\"path\":\"result\",\"sucessfulVal\":\"Y\",\"failVal\":\"N\"}}"
# 更新人id
userId: 1
# 默认根目录名称
dataPath: "data"
# mysql数据库驱动
jdbcDriverClass: "com.mysql.jdbc.Driver"
# 批量传输数量
batchSize: 1000
# oracle数据驱动
oracleJdbcDriverClass: "oracle.jdbc.OracleDriver"
# 是否分页
isPage: 1
# metaDataPath
metaDataPath: "valueList"
# metaUrl-逻辑实体以及其他
metaUrl: "http://10.201.224.59:80/meta/mg/logical_entitys/add_alter_statistic"
#metaRequestType
metaRequestType: "post"
# 返回格式(订阅式-推)
defaultReturnPattern: "{\"messageId\":{\"SETMODEL\":\"ORIGIN_HEADER\",\"PATH\":\"Message-Id\",\"ISDATE\":false,\"DEFAULT\":\"\",\"PATTERN\":\"\"},\"status\":{\"SETMODEL\":\"SUCCFLAG\",\"SUCCESS\":\"S\",\"FAIL\":\"E\"},\"code\":{\"SETMODEL\":\"SUCCFLAG\",\"SUCCESS\":\"S\",\"FAIL\":\"E\"},\"message\":{\"SETMODEL\":\"SUCCFLAG\",\"SUCCESS\":\"已经接收到消息\",\"FAIL\":\"接收消息失败\"},\"requeue\":{\"SETMODEL\":\"SUCCFLAG\",\"SUCCESS\":\"false\",\"FAIL\":\"true\"}}"
# 备用返回格式 (E-推)
spareReturnPattern: "{\"esbInfo\":{\"instId\":{\"SETMODEL\":\"ORIGIN\",\"PATH\":\"esbInfo.instId\",\"ISDATE\":true,\"DEFAULT\":\"\",\"PATTERN\":\"\"},\"returnStatus\":{\"SETMODEL\":\"SUCCFLAG\",\"SUCCESS\":\"S\",\"FAIL\":\"E\"},\"returnCode\":{\"SETMODEL\":\"CUSTOM\"},\"returnMsg\":{\"SETMODEL\":\"SUCCMSG\",\"DEFAULT\":\"\"},\"requestTime\":{\"SETMODEL\":\"ORIGIN\",\"PATH\":\"esbInfo.requestTime\",\"ISDATE\":true,\"DEFAULT\":\"SYSDATE\",\"PATTERN\":\"YYYY-MM-DD\"},\"responseTime\":{\"SETMODEL\":\"FIXED\",\"VALUE\":\"SYSDATE\",\"ISDATE\":true,\"PATTERN\":\"yyyy-MM-dd HH:mm:ss\"},\"requeue\":{\"SETMODEL\":\"SUCCFLAG\",\"SUCCESS\":\"false\",\"FAIL\":\"true\"},\"attr1\":{\"SETMODEL\":\"FIXED\",\"VALUE\":\"NULL\",\"ISDATE\":false,\"DEFAULT\":\"\"},\"attr2\":{\"SETMODEL\":\"FIXED\",\"VALUE\":\"NULL\",\"ISDATE\":false,\"DEFAULT\":\"\"},\"attr3\":{\"SETMODEL\":\"FIXED\",\"VALUE\":\"NULL\",\"ISDATE\":false,\"DEFAULT\":\"\"}}}"
# 文件名
fileName: "ConfigurationDescription.txt"
# 业务视图
businessViewUrl: "http://10.201.224.59:80/meta/mg/logical_entitys/add_alter_statistic"
# 时间时序
timeSeriesUrl: "http://10.201.224.59:80/meta/mg/planning_time_seriess/timeSeriesData"
# 分页条数
fetchSize: "1000"
# reader逻辑实体
readerLogicalUrl: "http://192.168.2.201:30102/meta/mg/logical_entitys/key_statistic"
# reader逻辑实体
readertimeUrl: "http://192.168.2.201:30102/meta/mg/planning_time_seriess/getTimeSeriesData"
#当前页字段名
pageColumn: "page.pageNo"
#当前页字段值
pageInitValue: "1"
#分页条数字段名
perPageCountColumn: "page.pageSize"
#分页条数
perPageCount: "10"
#页数限制,分页请求时,表示每页数据量的值
pageCountLimit: "3"
#分页结束类型标识字段,分页请求时表示请求结束的方式
# "01"按【分页数据结果list返回为空】判断"02"按【分页记录总数】判断
pageEndType: "01"
#分页结束字段值全路径
pageEndPath: "data.valueList"
#时序pageNo
isTimePage: "0"

1
bin/nacos-start.cmd Normal file
View File

@ -0,0 +1 @@
D:\work\github-workspace\deploy-ease-backend\bin\nacos-server-2.3.0\bin\startup.cmd -m standalone

View File

@ -128,11 +128,6 @@
<artifactId>deploy-ease-api</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.qc.soft</groupId>
<artifactId>deploy-ease-tenant-adapter</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.alibaba.cloud</groupId>
<artifactId>spring-cloud-starter-alibaba-nacos-discovery</artifactId>
@ -256,9 +251,10 @@
</resource>
</resources>
</build>
<parent>
<groupId>com.qc.soft</groupId>
<artifactId>deploy-ease</artifactId>
<artifactId>deploy-ease-parent</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>

View File

@ -12,19 +12,10 @@ import java.net.URLClassLoader;
@SpringBootConfiguration
@EnableAutoConfiguration
@ComponentScan("com.qc.soft.deploy.ease.*")
//@MapperScan("com.qc.soft.longi.deployment.dao")
//@EnableScheduling
@EnableAsync
public class DeployEaseApplication {
public static void main(String[] args) {
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
if (classLoader instanceof URLClassLoader) {
URL[] urls = ((URLClassLoader) classLoader).getURLs();
for (URL url : urls) {
System.out.println(url.getFile());
}
}
SpringApplication.run(DeployEaseApplication.class, args);
}
}

View File

@ -0,0 +1,77 @@
package com.qc.soft.deploy.ease.config;
import com.alibaba.cloud.nacos.NacosDiscoveryProperties;
import com.qc.soft.deploy.ease.consts.Consts;
import com.qc.soft.deploy.ease.service.ITenantService;
import lombok.extern.slf4j.Slf4j;
import org.apache.logging.log4j.util.Strings;
import org.springframework.boot.CommandLineRunner;
import org.springframework.stereotype.Component;
import javax.annotation.Resource;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.util.Properties;
import java.util.concurrent.Executors;
@Component
@Slf4j
public class TenantAdapterRunner implements CommandLineRunner {
@Resource
private ITenantService tenantService;
@Resource
private NacosDiscoveryProperties nacosDiscoveryProperties;
@Override
public void run(String... args) {
String env;
String classLoaderName = getClass().getClassLoader().getClass().getName();
if (classLoaderName.contains("LaunchedURLClassLoader")) {
System.out.println("Application started via java -jar or from a fat JAR.");
env = "prod";
} else {
env = "dev";
}
// -DNACOS_CONFIG_SERVER=192.168.0.166:8848
// -DTENANT_CODE=longi
// -DDEPLOY_ENV=deploy-ease-dev
// -DNACOS_USER=nacos
// -DNACOS_PWD=qichen5210523
Properties nacosProperties = nacosDiscoveryProperties.getNacosProperties();
System.out.println(nacosProperties);
String tenantAdapterJarPath = Consts.TENANT_ADAPTER_SERVICE_JAR_PATH.get(env);
String nacosOptions = String.format("-DNACOS_CONFIG_SERVER=%s -DDEPLOY_ENV=%s -DTENANT_CODE=longi -DNACOS_USER=%s -DNACOS_PWD=%s",
nacosProperties.get("serverAddr"),
nacosProperties.get("namespace"),
nacosProperties.get("username"),
nacosProperties.get("password")
);
String[] command = {"java", "-jar", nacosOptions, tenantAdapterJarPath};
try {
ProcessBuilder processBuilder = new ProcessBuilder(command);
processBuilder.inheritIO();
processBuilder.redirectErrorStream(true);
Process process = processBuilder.start();
Executors.newSingleThreadExecutor().submit(() -> {
try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream()))) {
String line;
while ((line = reader.readLine()) != null) {
System.out.println(line);
}
} catch (Exception e) {
e.printStackTrace();
}
});
// 等待进程结束
int exitCode = process.waitFor();
System.out.println("Process exited with code: " + exitCode);
} catch (Exception e) {
log.error("Failed to start the adapter jar", e);
}
}
}

View File

@ -1,6 +1,14 @@
package com.qc.soft.deploy.ease.consts;
import java.util.HashMap;
import java.util.Map;
public class Consts {
public static final String TENANT_ADAPTER_SERVICE_NAME = "deploy-ease-adapter-%s";
public static final Map<String, String> TENANT_ADAPTER_SERVICE_JAR_PATH = new HashMap<>();
static {
TENANT_ADAPTER_SERVICE_JAR_PATH.put("dev", "D://workgithub-workspace//deploy-ease-backend//deploy-ease-tenant-adapter//target//deploy-ease-tenant-adapter-1.0.0.jar");
}
}

View File

@ -1,7 +1,7 @@
spring:
cloud:
nacos:
server-addr: ${NACOS_CONFIG_SERVER:192.168.0.166:8848}
server-addr: ${NACOS_CONFIG_SERVER}
username: ${NACOS_USER}
password: ${NACOS_PWD}
discovery:

File diff suppressed because one or more lines are too long

View File

@ -120,7 +120,7 @@
<parent>
<groupId>com.qc.soft</groupId>
<artifactId>deploy-ease</artifactId>
<artifactId>deploy-ease-parent</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>

View File

@ -0,0 +1,27 @@
spring:
cloud:
nacos:
server-addr: ${NACOS_CONFIG_SERVER}
username: ${NACOS_USER}
password: ${NACOS_PWD}
discovery:
group: ${DEPLOY_ENV}
namespace: ${DEPLOY_ENV}
enabled: true
config:
group: ${DEPLOY_ENV}
namespace: ${DEPLOY_ENV}
extension-configs[0]:
data-id: common.yml
group: common
refresh: false
extension-configs[1]:
data-id: deploy-ease-synchronizer.yml
group: business
refresh: false
config:
override-none: false
allow-override: true
override-system-properties: false
main:
allow-bean-definition-overriding: true

View File

@ -0,0 +1,3 @@
artifactId=deploy-ease-tenant-adapter
groupId=com.qc.soft
version=1.0.0

View File

@ -0,0 +1 @@
com\qc\soft\deploy\ease\synchronizer\DeployEaseTenantAdapterApplication.class

View File

@ -0,0 +1 @@
D:\work\github-workspace\deploy-ease-backend\deploy-ease-tenant-adapter\src\main\java\com\qc\soft\deploy\ease\synchronizer\DeployEaseTenantAdapterApplication.java

View File

@ -4,7 +4,7 @@
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.qc.soft</groupId>
<artifactId>deploy-ease</artifactId>
<artifactId>deploy-ease-parent</artifactId>
<version>1.0-SNAPSHOT</version>
<packaging>pom</packaging>
@ -73,8 +73,8 @@
<modules>
<module>deploy-ease-core</module>
<module>deploy-ease-api</module>
<module>deploy-ease-core</module>
<module>deploy-ease-tenant-adapter</module>
</modules>