easyviews ui
### etl
读取中心配置
添加数据集 sql
```
update easyviews.etl_dataset set centerid=1 where centerid=2;
update etl_center set id=1 where id=2;
update etl_center_ip set cid=1 where cid=2;
update bpm_application_center set centerid=1 where centerid=2;
update bpm_monitor set centerid=1 where centerid=2;
update etl_attribute set etlid=1 where etlid=2 and type=1;
update etl_instance set centerid=1 where centerid=2;
update etl_link set centerid=1 where centerid=2;
update etl_datasource set centerid=1 where centerid=2;
update bpm_component set centerid=1 where centerid=2;
INSERT iNTO easyviews.`etl_attribute`( `etlid`, `type`, `key`, `value`, `remarks`, `state`) VALUES ( 1, 1, 'kafka.k8s.service', '10.7.1.191:19092', '', 0);
INSERT iNTO easyviews.`etl_attribute`( `etlid`, `type`, `key`, `value`, `remarks`, `state`) VALUES ( 1, 1, 'clickhouse.k8s.username', 'admin', '', 0);
INSERT iNTO easyviews.`etl_attribute`( `etlid`, `type`, `key`, `value`, `remarks`, `state`) VALUES ( 1, 1, 'clickhouse.k8s.password', 'ssqj@easyviews.pw', '', 0);
INSERT iNTO easyviews.`etl_attribute`( `etlid`, `type`, `key`, `value`, `remarks`, `state`) VALUES ( 1, 1, 'clickhouse.k8s.db.all.name', 'easyviews', '', 0);
INSERT iNTO easyviews.`etl_attribute`( `etlid`, `type`, `key`, `value`, `remarks`, `state`) VALUES ( 1, 1, 'easyviews.etlservice.url', '10.1.125.12:8088', '', 0);
INSERT iNTO easyviews.`etl_attribute`( `etlid`, `type`, `key`, `value`, `remarks`, `state`) VALUES ( 1, 1, 'easyviews.web.url', '10.1.125.39:8089', '', 0);
INSERT iNTO easyviews.`etl_attribute`( `etlid`, `type`, `key`, `value`, `remarks`, `state`) VALUES ( 1, 1, 'easyviews.npds.manager.callback.url', 'npds-manager.npds.svc.cluster.local:13334', '', 0);
INSERT iNTO easyviews.`etl_attribute`( `etlid`, `type`, `key`, `value`, `remarks`, `state`) VALUES ( 1, 1, 'easyviews.npds.manager.captor.url', 'manager.npds.svc.cluster.local', '', 0);
INSERT iNTO easyviews.`etl_attribute`( `etlid`, `type`, `key`, `value`, `remarks`, `state`) VALUES ( 1, 1, 'easyviews.bigdata.alarm.url', '10.1.125.39:8093', '', 0);
INSERT iNTO easyviews.`etl_attribute`( `etlid`, `type`, `key`, `value`, `remarks`, `state`) VALUES ( 1, 1, 'apollo.k8s.url', 'NULL', '', 0);
INSERT iNTO easyviews.`etl_attribute`( `etlid`, `type`, `key`, `value`, `remarks`, `state`) VALUES ( 1, 1, 'kafka.probe.service', '10.1.125.23:19092', '', 0);
INSERT iNTO easyviews.`etl_attribute`( `etlid`, `type`, `key`, `value`, `remarks`, `state`) VALUES ( 1, 1, 'easyviews.bigdata.indicator.bpm.url', '10.1.125.39:7080', '', 0);
INSERT iNTO easyviews.`etl_attribute`( `etlid`, `type`, `key`, `value`, `remarks`, `state`) VALUES ( 1, 1, 'easyviews.bigdata.indicator.npm.url', 'npm-indicator.bigdata.svc.cluster.local:8090', '', 0);
INSERT iNTO easyviews.`etl_attribute`( `etlid`, `type`, `key`, `value`, `remarks`, `state`) VALUES ( 1, 1, 'clickhouse.k8s.http.url.service', '10.1.125.42:31123', '', 0);
INSERT iNTO easyviews.`etl_attribute`( `etlid`, `type`, `key`, `value`, `remarks`, `state`) VALUES ( 1, 1, 'clickhouse.k8s.tcp.url.service', '10.1.125.42:31124', '', 0);
INSERT iNTO easyviews.`etl_attribute`( `etlid`, `type`, `key`, `value`, `remarks`, `state`) VALUES ( 1, 1, 'clickhouse.k8s.http.url.nodeport', '10.1.125.42:31123', '', 0);
INSERT iNTO easyviews.`etl_attribute`( `etlid`, `type`, `key`, `value`, `remarks`, `state`) VALUES ( 1, 1, 'clickhouse.k8s.tcp.url.nodeport', '10.1.125.42:31124', '', 0);
INSERT iNTO easyviews.`etl_attribute`( `etlid`, `type`, `key`, `value`, `remarks`, `state`) VALUES ( 1, 1, 'clickhouse.k8s.tcp.url.hostport', '10.1.125.42:39000,10.1.125.40:39000', '', 0);
INSERT iNTO easyviews.`etl_attribute`( `etlid`, `type`, `key`, `value`, `remarks`, `state`) VALUES ( 1, 1, 'clickhouse.k8s.http.url.hostport', '10.1.125.42:38123,10.1.125.40:38123', '', 0);
INSERT iNTO easyviews.`etl_attribute`( `etlid`, `type`, `key`, `value`, `remarks`, `state`) VALUES ( 1, 1, 'clickhouse.k8s.tcp.url.npm.hostport', '10.1.125.42:39000,10.1.125.40:39000', '', 0);
INSERT iNTO easyviews.`etl_attribute`( `etlid`, `type`, `key`, `value`, `remarks`, `state`) VALUES ( 1, 1, 'easyviews.bigdata.indicator.tidm.url', '10.1.125.39:8091', '', 0);
INSERT iNTO easyviews.`etl_attribute`( `etlid`, `type`, `key`, `value`, `remarks`, `state`) VALUES ( 1, 1, 'easyviews.bigdata.alarmSimulation.url', '10.1.125.39:8092', '', 0);
INSERT iNTO easyviews.`etl_attribute`( `etlid`, `type`, `key`, `value`, `remarks`, `state`) VALUES ( 1, 1, 'easyviews.npds.event.callback.url', 'npds-event.npds.svc.cluster.local:13444', '', 0);
```
# 部署
```
yum install -y ntpdate lrzsz vim net-tools wget zip unzip
/usr/sbin/ntpdate cn.pool.ntp.org
echo '*/30 * * * * /usr/sbin/ntpdate cn.pool.ntp.org &> /dev/null' >>/var/spool/cron/root
crontab -l
cat >> /etc/hosts <<EOF
10.1.128.18 harbor.ev.com
10.1.128.92 master01
10.1.128.86 node01
10.1.128.87 node02
10.1.128.89 master02
EOF
#安装harbor rancher
cd /mnt
tar -zxvf harbor-app.tar.gz -C /mnt
tar xf harbor-data*.tar.gz -C /
cp /mnt/harbor/docker-compose /usr/bin
. /mnt/harbor/install.sh
docker login harbor.ev.com:8000 -u admin -p Greattimes601
docker run --name rancher-server -d --restart=unless-stopped -p 180:80 -p 1443:443 -v /mnt/rancher:/var/lib/rancher/ -v /root/var/log/auditlog:/var/log/auditlog -e AUDIT_LEVEL=3 harbor.ev.com:8000/rancher/rancher:v2.4.6
#修改kubernetes部署时默认拉取的镜像仓库为部署自己搭建的harbor仓库
#rancher >>系统设置 >> system-default-registry 编辑 >> harbor.ev.com:8000
#添加集群 >> 自定义部署方式 >> 高级集群设置 NodePOrt 0-65535 >> Docker根目录 /mnt/docker # × 编辑yaml v1.18.6-rancher1-2
#添加主机 主机都执行完命令后集群需要拉取镜像并初始化,稍等一段时间后,集群便搭建完成
#安装helm
cd /mnt
tar -zxvf helm-app.tar.gz
cd /mnt/helm
rpm -ivh kubectl-1.15.1-0.x86_64.rpm
#编辑kubeconf文件
mkdir -p /root/.kube
vim /root/.kube/config
#helm初始化
cd /mnt/helm/
cp helm /usr/bin
kubectl apply -f rbac-config.yaml
helm init --service-account tiller --skip-refresh --tiller-image harbor.ev.com:8000/rancher/tiller:v2.16.8
helm version
#部署--------------------------------------------------------------------
cd local-path-helm/
helm install --name local-path-clickhouse local-path-clickhouse
helm install --name local-path-kafka local-path-kafka
helm install --name local-path-other local-path-other
kubectl delete crd $(kubectl get crd | grep "monitoring.coreos.com" | grep -v NAME | awk '{print $1}')
cd ../prometheus-operator-helm
helm install --name prometheus-operator prometheus-operator
cd ../zookeeper-helm
helm install --name zookeeper zookeeper
cd ../mysql
helm install --name mysql .
##*******************************************************************
# mysql -uroot -h 任意一台服务器IP -P 30306 -pssqj@easyviews.pw
update easyviews.deploy set `values`='192.168.xxx.xxx,192.168.xxx.xxx' where`key`='center.1.ip'
#update easyviews.deploy set `values`='所有大数据集群IP,IP地址之间使用,分开' where id=2;
#update easyviews.deploy set values='任意一台服务器IP:31123' where id=4;
#update easyviews.deploy set values='任意一台服务器IP:8088' where id=8;
#update easyviews.deploy set values='任意一台服务器IP:8089' where id=9;
#update easyviews.deploy set values='任意一台服务器IP:31517' where id=12;
#update easyviews.deploy set values='看下面注释' where id=14;
#/*kafka服务填写模板:kafka.cluster.local:31090,kafka集群第一个端口为31090,每多一个
#kafka服务器,端口就依次往上加一,有几台kafka服务就填几个地址,使用英文,分隔。注意:如果使用物理
#方式部署kafka,只填写kafka.cluster.local:9092就可以*/
#center.1.kafka.k8s.service easyviews-kafka.default.svc.cluster.local:9092
#center.1.kafka.probe.service 10.1.128.89:9092
#center.1.clickhouse.k8s.tcp.url.hostport 10.1.128.17:39000,10.1.128.18:39000
###*****************************************************************************************************
#先安装 clickhouse-operator-crd 在安装 clickhouse-operator
cd ../clickhouse-operator-crd-helm
helm install --name clickhouse-operator-crd clickhouse-operator-crd
cd ../clickhouse-operator-helm
helm install --name clickhouse-operator clickhouse-operator --set fullnameOverride="clickhouse-operator"
# helm install ./clickhouse-operator --name clickhouse-operator --namespace=default --set fullnameOverride="clickhouse-operator"
kubectl label node node01 node02 clickhouse=allow
cd ../clickhouse-helm
vim clickhouse/values.yaml
#resources: 都注释
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: "30"
# memory: 110Gi
# requests:
# cpu: "10"
# memory: 10Gi
default:
resource:
cpu: "10"
#单位 Gi
memory: 20
max_rows: "15625000000" 加000
max_bytes: "1073741824000" 加000
docker push harbor.ev.com:8000/bpm/clickhouse/clickhouse-server:22.6
helm install ./clickhouse --name clickhouse --namespace=default --set shardsCount=1 --set persistence.volume=hdd_or_nvme --set persistence.enabled=true --set persistence.hostPath.enabled=false --set persistence.localPath.enabled=true --set init.ev.enabled=true --set name.cluster=business --set dict.mysql.service=easyviews-mysql.default.svc.cluster.local
``delete clickhouse
kubectl delete crd `kubectl get crd|grep clickhouseinstallations|awk '{print $1}'`
kubectl patch crd/clickhouseinstallations.clickhouse.altinity.com -p '{"metadata":{"finalizers":[]}}' --type=merge
helm del --purge clickhouse-operator-crd
helm del --purge clickhouse-operator
helm del --purge clickhouse
kubectl delete pvc `kubectl get pvc|grep chi|awk '{print $1}'`
kubectl delete pv `kubectl get pv|grep chi|awk '{print $1}'`
``
./zookeeper-install.sh
10.1.128.86:12181,10.1.128.87:12181,10.1.128.89:12181
#多节点-----------------
/mnt/kafka/bin/kafka-topics.sh --create --bootstrap-server 10.1.128.92:9092 --replication-factor 1 --partitions 12 --topic alarmAlgorithmData
/mnt/kafka/bin/kafka-topics.sh --create --bootstrap-server 10.1.128.92:9092 --replication-factor 1 --partitions 12 --topic alarmAlgorithmResult
/mnt/kafka/bin/kafka-topics.sh --create --bootstrap-server 10.1.128.92:9092 --replication-factor 1 --partitions 12 --topic NetReassemble
/mnt/kafka/bin/kafka-topics.sh --create --bootstrap-server 10.1.128.92:9092 --replication-factor 1 --partitions 12 --topic notifyWeb
/mnt/kafka/bin/kafka-topics.sh --create --bootstrap-server 10.1.128.92:9092 --replication-factor 1 --partitions 12 --topic CacheErrorEvent
/mnt/kafka/bin/kafka-topics.sh --create --bootstrap-server 10.1.128.92:9092 --replication-factor 1 --partitions 12 --topic SupplementaryCacheInfo
/mnt/kafka/bin/kafka-topics.sh --create --bootstrap-server 10.1.128.92:9092 --replication-factor 1 --partitions 12 --topic processFailedCacheInfo-in-0
/mnt/kafka/bin/kafka-topics.sh --create --bootstrap-server 10.1.128.92:9092 --replication-factor 1 --partitions 12 --topic processSupplementaryCacheInfo-in-0
/mnt/kafka/bin/kafka-topics.sh --create --bootstrap-server 10.1.128.92:9092 --replication-factor 1 --partitions 12 --topic NPM
/mnt/kafka/bin/kafka-topics.sh --create --bootstrap-server 10.1.128.92:9092 --replication-factor 1 --partitions 12 --topic BpmRaw
/mnt/kafka/bin/kafka-topics.sh --create --bootstrap-server 10.1.128.92:9092 --replication-factor 1 --partitions 12 --topic BpmPacket
/mnt/kafka/bin/kafka-topics.sh --create --bootstrap-server 10.1.128.92:9092 --replication-factor 1 --partitions 12 --topic BpmEvent
/mnt/kafka/bin/kafka-topics.sh --create --bootstrap-server 10.1.128.92:9092 --replication-factor 1 --partitions 12 --topic BpmPairing
/mnt/kafka/bin/kafka-topics.sh --create --bootstrap-server 10.1.128.92:9092 --replication-factor 1 --partitions 12 --topic alarmSimulationData
/mnt/kafka/bin/kafka-topics.sh --create --bootstrap-server 10.1.128.92:9092 --replication-factor 1 --partitions 12 --topic alarmSimulationResult
/mnt/kafka/bin/kafka-topics.sh --create --bootstrap-server 10.1.128.92:9092 --replication-factor 1 --partitions 12 --topic testlog
/mnt/kafka/bin/kafka-topics.sh --create --bootstrap-server 10.1.128.92:9092 --replication-factor 1 --partitions 12 --topic logEvent
/mnt/kafka/bin/kafka-topics.sh --create --bootstrap-server 10.1.128.92:9092 --replication-factor 1 --partitions 12 --topic TidmEvent
/mnt/kafka/bin/kafka-topics.sh --create --bootstrap-server 10.1.128.92:9092 --replication-factor 1 --partitions 12 --topic NpmPacket
/mnt/kafka/bin/kafka-topics.sh --list --bootstrap-server 10.1.128.92:9092
/mnt/kafka/bin/kafka-topics.sh --list --zookeeper 223.83.4.254:2181
/mnt/kafka/bin/kafka-topics.sh --create --bootstrap-server 10.1.125.40:9092 --replication-factor 1 --partitions 3 --topic weblog
/mnt/kafka/bin/kafka-topics.sh --create --zookeeper 10.1.128.89:12181 --replication-factor 1 --partitions 12 --topic testccc
#单节点---------------------------------------
/mnt/kafka/bin/kafka-topics.sh --create --zookeeper 10.1.128.89:2181 --replication-factor 1 --partitions 12 --topic BpmEvent
./kafka-topics.sh --create --zookeeper 10.1.128.89:2181 --replication-factor 1 --partitions 12 --topic BpmEventDebug
./kafka-topics.sh --create --zookeeper 10.1.128.89:2181 --replication-factor 1 --partitions 1 --topic BpmLog
./kafka-topics.sh --create --zookeeper 10.1.128.89:2181 --replication-factor 1 --partitions 12 --topic BpmPacket
./kafka-topics.sh --create --zookeeper 10.1.128.89:2181 --replication-factor 1 --partitions 12 --topic BpmPacketDebug
./kafka-topics.sh --create --zookeeper 10.1.128.89:2181 --replication-factor 1 --partitions 12 --topic BpmPairing
./kafka-topics.sh --create --zookeeper 10.1.128.89:2181 --replication-factor 1 --partitions 12 --topic BpmPairingDebug
./kafka-topics.sh --create --zookeeper 10.1.128.89:2181 --replication-factor 1 --partitions 12 --topic BpmRaw
./kafka-topics.sh --create --zookeeper 10.1.128.89:2181 --replication-factor 1 --partitions 12 --topic BpmRawDebug
./kafka-topics.sh --create --zookeeper 10.1.128.89:2181 --replication-factor 1 --partitions 1 --topic CacheErrorEvent
./kafka-topics.sh --create --zookeeper 10.1.128.89:2181 --replication-factor 1 --partitions 12 --topic NPM
./kafka-topics.sh --create --zookeeper 10.1.128.89:2181 --replication-factor 1 --partitions 6 --topic Packetdecrypt
./kafka-topics.sh --create --zookeeper 10.1.128.89:2181 --replication-factor 1 --partitions 12 --topic SupplementaryCacheInfo
./kafka-topics.sh --create --zookeeper 10.1.128.89:2181 --replication-factor 1 --partitions 12 --topic alarmAlgorithmData
./kafka-topics.sh --create --zookeeper 10.1.128.89:2181 --replication-factor 1 --partitions 1 --topic alarmAlgorithmResult
./kafka-topics.sh --create --zookeeper 10.1.128.89:2181 --replication-factor 1 --partitions 12 --topic alarmSimulationData
./kafka-topics.sh --create --zookeeper 10.1.128.89:2181 --replication-factor 1 --partitions 12 --topic alarmSimulationResult
./kafka-topics.sh --create --zookeeper 10.1.128.89:2181 --replication-factor 1 --partitions 48 --topic captor
./kafka-topics.sh --create --zookeeper 10.1.128.89:2181 --replication-factor 1 --partitions 1 --topic notifyWeb
./kafka-topics.sh --create --zookeeper 10.1.128.89:2181 --replication-factor 1 --partitions 1 --topic processFailedCacheInfo-in-0
./kafka-topics.sh --create --zookeeper 10.1.128.89:2181 --replication-factor 1 --partitions 1 --topic processSupplementaryCacheInfo-in-0
./kafka-topics.sh --create --zookeeper 10.1.128.89:2181 --replication-factor 1 --partitions 12 --topic NpmPacket
./bin/kafka-topics.sh --create --bootstrap-server 192.159.174.121:9092 --replication-factor 1 --partitions 10 --topic testlog
./kafka-topics.sh --zookeeper localhost:2181 --list
#--------------------------
cd ../kafdrop-helm/
helm install --name kafdrop kafdrop
cd ../redis
helm install --name redis .
kubectl create ns web
kubectl create ns npds
kubectl create ns bigdata
#命名空间>>移动>Default
cd ../../02_web-helm
helm install --name easyviews-eureka easyviews-eureka
helm install --name easyviews-system easyviews-system
helm install --name easyviews-etl easyviews-etl
# 如果kafka使用物理方式部署 sed -i "s/enabled: false/enabled: true/g" easyviews-indicator/values.yaml
helm install --name easyviews-indicator easyviews-indicator
helm install --name easyviews-api easyviews-api
#添加环境变量
xml.ip 10.1.128.92
xml.password Greattimes601
xml.path /mnt/upload
xml.username root
JAVA_OPTS -Deasyviews.system.url=easyviews-system.web.svc.cluster.local:9999
#固定一个node运行
harbor.ev.com:8000/web/easyviews-eureka:V5.1.3.3
harbor.ev.com:8000/web/easyviews-provider-system:V5.1.4.12
harbor.ev.com:8000/web/easyviews-etl:V5.1.4.12
harbor.ev.com:8000/web/easyviews-provider-indicator:V5.1.4.12
harbor.ev.com:8000/web/easyviews-api:V5.1.4.12
cd ../03_npds-helm/
helm install --name npds-manager npds-manager
helm install --name npds-client npds-client
helm install --name npds-pair npds-pair
harbor.ev.com:8000/npds/npds-manager/snapshots:v5.1.4.8-202210181913
harbor.ev.com:8000/npds/npds-client/snapshots:v5.1.4.8-202210181913
harbor.ev.com:8000/npds/npds-pair/snapshots:v5.1.4.8-202210181913
1、bigdata 中kakfa 地址的修改
sed -i "s/10.1.128.18/172.24.48.40/g" `grep "10.1.128.18" -rl .`
docker集群版的kafka 需要改,物理版本kafka 的就不要改这个地方了,因为物理版本的kakfa的时候就不读这个地方了
2、gateway 及 gateway-postgresql 现场可以关闭,只是排查问题用,但是必须得安装上!
3、升级 bigdata 下服务,记得先删除pvc,以免影响 csvc 服务
4、bpm-daily-statistics排除节假日环境变量:EXCLUDE_HOLIDAY=True
api参数管理:exclude_holiday = 1
cd ../04_bigdata-helm/
helm install ./bpm-indicator-helm --name bpm-indicator --namespace bigdata --set easyviews.etl.url=10.1.128.92:8088
helm install ./algorithm-clustering-helm --name algorithm-clustering --namespace bigdata --set easyviews.etl.url=10.1.128.92:8088
helm install ./algorithm-threshold-helm --name algorithm-threshold --namespace bigdata --set easyviews.etl.url=10.1.128.92:8088
helm install ./bpm-daily-statistics-helm --name bpm-daily-statistics --namespace bigdata --set easyviews.etl.url=10.1.128.92:8088
helm install ./npm-daily-statistics-helm --name npm-daily-statistics --namespace bigdata --set easyviews.etl.url=10.1.128.92:8088
helm install ./npm-indicator-helm --name npm-indicator --namespace bigdata --set easyviews.etl.url=10.1.128.92:8088
helm install ./alarm-simulation-helm --name alarm-simulation --namespace bigdata --set easyviews.etl.url=10.1.128.92:8088
helm install ./csvc-helm --name jhipster-registry --namespace=bigdata
helm install ./gateway-helm --name gateway --namespace=bigdata
helm install ./clickhouse-helm --name bigdata-clickhouse --namespace=bigdata --set easyviews.etl.url=10.1.128.92:8088
helm install ./alarmmanager-helm --name alarmmanager --namespace=bigdata --set easyviews.etl.url=10.1.128.92:8088
helm install ./schedule-helm --name schedule --namespace bigdata --set easyviews.etl.url=10.1.128.92:8088
helm install ./db-clear-helm --name dbclear --namespace bigdata --set easyviews.etl.url=10.1.128.92:8088
npm-indicator env SECONDINDEX_FIELD => datetime
useradd -s /sbin/nologin ftpuser
echo ssqj@easyviews.pw | passwd --stdin ftpuser
echo "/sbin/nologin" >> /etc/shells
a=(glib2-devel libpcap libpcap-devel libgcrypt-devel glib2-devel qt-devel qt5-qtbase-devel qt5-linguist qt5-qtmultimedia-devel qt5-qtsvg-devel libcap-devel libcap-ng-devel gnutls-devel krb5-devel libxml2-devel lua-devel lz4-devel snappy-devel spandsp-devel libssh2-devel bcg729-devel libmaxminddb-devel sbc-devel libsmi-devel libnl3-devel libnghttp2-devel libssh-devel libpcap-devel c-ares-devel redhat-rpm-config rpm-build gtk+-devel gtk3-devel desktop-file-utils portaudio-devel rubygem-asciidoctor docbook5-style-xsl docbook-style-xsl systemd-devel git gcc gcc-c++ flex bison doxygen gettext-devel libxslt openssl-devel
)
for i in ${a[@]};do yum -y install $i; done
#wget https://github.com/Kitware/CMake/releases/download/v3.25.0-rc2/cmake-3.25.0-rc2-linux-x86_64.tar.gz
tar -zvxf cmake-3.25.0-rc2.tar.gz
cd cmake-3.25.0-rc2/
./bootstrap --parallel=20
make -j 20
make install
#wget https://2.na.dl.wireshark.org/src/all-versions/wireshark-3.6.9.tar.xz --no-check-certificate
tar -vxf wireshark-3.6.9.tar.xz
cd wireshark-3.6.9
mkdir build
cd build
/usr/local/bin/cmake ../
make -j 20
make install
(make -j 20 , 20为编译的线程数,不要超过服务器的逻辑CPU数)
```
-- Could NOT find OpenSSL, try to set the path to OpenSSL root folder in the system variable OPENSSL_ROOT_DIR (missing: OPENSSL_CRYPTO_LIBRARY OPENSSL_INCLUDE_DIR)
CMake Error at Utilities/cmcurl/CMakeLists.txt:454 (message):
Could not find OpenSSL. Install an OpenSSL development package or
configure CMake with -DCMAKE_USE_OPENSSL=OFF to build without OpenSSL.
```
就在顶层CMakeLists.txt文件的开头加入
set(CMAKE_USE_OPENSSL OFF)
```