一、Envoy xDS示例
代碼克隆參照Envoy示例博文
- eds-filesystem
cd servicemesh_in_practise/Dynamic-Configuration/eds-filesystem
# 修改docker-compose配置文件
vim docker-compose.yaml
services:
envoy:
image: envoyproxy/envoy-alpine:v1.21-latest
environment: # 添加環境變量
- ENVOY_UID=0 # 添加環境變量
# 啟動
docker-compose up
# 驗證
curl 172.31.11.2
注:該示例的docker-compose中有三個envoy鏡像位置,需要添加三次;而且envoy監聽的listener端口和端點發現端口,可以一樣,不會沖突
修改配置文件,模擬添加節點,讓eds動態發現
docker exec -it edsfilesystem_envoy_1 sh
cd /etc/envoy/eds.conf.d
cat eds.yaml.v2 > eds.yaml
mv eds.yaml bak && mv bak eds.yaml # 此步驟是在容器的時候需要運行,在宿主機不需要運行,是為了強制改變文件,以便基于inode監視的工作機制可被觸發,讓envoy動態發現新增節點
curl 172.31.11.2:9901/clusters # 可以通過這個命令查看新增接節點,mv命令執行前,雖然已經新增接節點,但是envoy并沒有發現,mv命令執行后,可以查看到
# 驗證
curl 172.31.11.2
- lds-cds-filesystem
cd servicemesh_in_practise/Dynamic-Configuration/lds-cds-filesystem
# 修改docker-compose配置文件
vim docker-compose.yaml
services:
envoy:
image: envoyproxy/envoy-alpine:v1.21-latest
environment: # 添加環境變量
- ENVOY_UID=0 # 添加環境變量
...
- webserver01-sidecar
- webserver # 添加別名,后續修改cds.yaml配置文件中的域名驗證
...
- webserver02-sidecar
- webserver # 添加別名,后續修改cds.yaml配置文件中的域名驗證
# 啟動
docker-compose up
# 驗證
curl 172.31.12.2
環境變量添加三次
修改配置文件,監聽端口驗證
docker exec -it ldscdsfilesystem_envoy_1 sh
cd /etc/envoy/conf.d
# 修改listener端口,從80改為10080
vi lds.yaml
resources:
- "@type": type.googleapis.com/envoy.config.listener.v3.Listener
name: listener_http
address:
socket_address: { address: 0.0.0.0, port_value: 10080 }
mv lds.yaml bak && mv bak lds.yaml
curl 172.31.12.2:9901/listeners # 查看監聽端口
# 驗證
curl 172.31.12.2:10080
修改配置文件,刪除節點驗證
vi cds.yaml
resources:
- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster
name: webcluster
connect_timeout: 1s
type: STRICT_DNS
load_assignment:
cluster_name: webcluster
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: webserver01
port_value: 80
mv cds.yaml bak && mv bak cds.yaml
curl 172.31.12.2:9901/clusters # 驗證集群節點信息
# 驗證
curl 172.31.12.2:10080
修改域名解析驗證
vi cds.yaml
mv cds.yaml bak && mv bak cds.yaml
curl 172.31.12.2:9901/clusters # 驗證集群節點信息
# 驗證
curl 172.31.12.2:10080
- lds-cds-grpc
cd servicemesh_in_practise/Dynamic-Configuration/lds-cds-grpc
# 修改docker-compose配置文件
vim docker-compose.yaml
services:
envoy:
image: envoyproxy/envoy-alpine:v1.21-latest
environment: # 添加環境變量
- ENVOY_UID=0 # 添加環境變量
# 啟動
docker-compose up
curl 172.31.15.2:9901/clusters # 查看集群信息
curl 172.31.15.2:9901/listeners # 查看監聽端口信息
# 驗證
curl 172.31.15.2
修改配置文件,監聽端口和添加節點驗證
docker exec -it ldscdsgrpc_xdsserver_1 sh
cd /etc/envoy-xds-server/config
cat config.yaml-v2 > config.yaml # 由于配置文件實時生效,最好不要直接編輯config.yaml文件,vi的自動保存功能會讓xDS server下發配置
curl 172.31.15.2:9901/clusters # 查看集群信息
curl 172.31.15.2:9901/listeners # 查看監聽端口信息
yum install jq -y # 安裝jq命令
# 根據config_dump接口查出來的信息,用jq命令過濾出根據配置信息動態發現的集群
curl -s 172.31.15.2:9901/config_dump | jq '.configs[1].dynamic_active_clusters'
[
{
"version_info": "411",
"cluster": {
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
"name": "webcluster", # 對應集群的名字
"type": "EDS",
"eds_cluster_config": {
"eds_config": {
"api_config_source": {
"api_type": "GRPC", # api協議
"grpc_services": [
{
"envoy_grpc": {
"cluster_name": "xds_cluster" # 發現webcluster對應的上游動態服務(xDS server)集群名字
}
}
],
"set_node_on_first_message_only": true,
"transport_api_version": "V3" # api版本
},
"resource_api_version": "V3"
}
},
"connect_timeout": "5s",
"dns_lookup_family": "V4_ONLY"
},
"last_updated": "20xx-04-25Txx:13:45.024Z"
}
]
# 查看listener詳細信息
curl -s 172.31.15.2:9901/config_dump?resource=dynamic_listeners | jq '.configs[0].active_state.listener'
{
"@type": "type.googleapis.com/envoy.config.listener.v3.Listener",
"name": "listener_http",
"address": {
"socket_address": {
"address": "0.0.0.0",
"port_value": 10080
}
},
"filter_chains": [
{
"filters": [
{
"name": "envoy.filters.network.http_connection_manager",
"typed_config": {
"@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager",
"stat_prefix": "http",
"rds": {
"config_source": {
"api_config_source": {
"api_type": "GRPC",
"grpc_services": [
{
"envoy_grpc": {
"cluster_name": "xds_cluster"
}
}
],
"set_node_on_first_message_only": true,
"transport_api_version": "V3"
},
"resource_api_version": "V3"
},
"route_config_name": "local_route"
},
"http_filters": [
{
"name": "envoy.filters.http.router"
}
]
}
}
]
}
]
}
# 驗證
curl 172.31.15.2:10080
修改配置文件,listener名字驗證
vi config.yaml
name: myconfig
spec:
listeners:
- name: listener_http1 # 修改名字
address: 0.0.0.0
port: 10081 # 修改端口號
curl 172.31.15.2:9901/listeners # 查看監聽端口信息
# 驗證
curl 172.31.15.2:10080
curl 172.31.15.2:10081
修改listener名字會生成一個新的listener,并且原來的listener也存在且可以訪問
image.png
- 健康檢測(health-check)
cd servicemesh_in_practise/Cluster-Manager/health-check
# 修改docker-compose配置文件
vim docker-compose.yaml
services:
envoy:
image: envoyproxy/envoy-alpine:v1.21-latest
environment: # 添加環境變量
- ENVOY_UID=0 # 添加環境變量
# 啟動
docker-compose up
curl 172.31.18.2:9901/clusters # 查看集群信息
curl 172.31.18.2:9901/listeners # 查看監聽端口信息
# 驗證
curl 172.31.18.2
修改livez檢測值為FAIL驗證
curl -XPOST -d "livez=FAIL" 172.31.18.11/livez # 修改web服務sidercar的livez值
curl -I 172.31.18.11/livez # 修改后驗證響應碼為506
HTTP/1.1 506 Variant Also Negotiates
content-type: text/html; charset=utf-8
content-length: 4
server: envoy
date: Tue, 26 Apr 20xx xx:54:29 GMT
x-envoy-upstream-service-time: 1
# 驗證
curl 172.31.18.2
修改livez檢測值為OK驗證
curl -XPOST -d "livez=OK" 172.31.18.11/livez
curl -I 172.31.18.11/livez
HTTP/1.1 200 OK
content-type: text/html; charset=utf-8
content-length: 2
server: envoy
date: Tue, 26 Apr 20xx xx:57:38 GMT
x-envoy-upstream-service-time: 1
# 驗證
curl 172.31.18.2
- 異常檢測(outlier-detection)
cd servicemesh_in_practise/Cluster-Manager/outlier-detection
# 修改docker-compose配置文件
vim docker-compose.yaml
services:
envoy:
image: envoyproxy/envoy-alpine:v1.21-latest
environment: # 添加環境變量
- ENVOY_UID=0 # 添加環境變量
# 啟動
docker-compose up
# 驗證
curl 172.31.20.2
模擬故障,修改livez的值為FAIL驗證
curl -XPOST -d 'livez=FAIL' 172.31.20.12/livez
curl -I 172.31.20.12/livez
# 驗證
while true; do curl 172.31.20.2/livez; sleep 0.5; done
恢復故障驗證
curl -XPOST -d 'livez=OK' 172.31.20.12/livez
curl -I 172.31.20.12/livez
# 驗證
while true; do curl 172.31.20.2/livez; sleep 0.5; done
- least-requests
cd servicemesh_in_practise/Cluster-Manager/least-requests
# 修改docker-compose配置文件
vim docker-compose.yaml
services:
envoy:
image: envoyproxy/envoy-alpine:v1.21-latest
environment: # 添加環境變量
- ENVOY_UID=0 # 添加環境變量
# 啟動
docker-compose up
# 驗證
./send-request.sh 172.31.22.2 # 使用腳本發送測試
Send 300 requests, and print the result. This will take a while.
Weight of all endpoints:
Red:Blue:Green = 1:3:5
Response from:
Red:Blue:Green = 35:92:173
- ring-hash
cd servicemesh_in_practise/Cluster-Manager/ring-hash
# 修改docker-compose配置文件
vim docker-compose.yaml
services:
envoy:
image: envoyproxy/envoy-alpine:v1.21-latest
environment: # 添加環境變量
- ENVOY_UID=0 # 添加環境變量
# 啟動
docker-compose up
# 驗證
while true; do index=$[$RANDOM%3]; curl -H "User-Agent: Browser_${index}" 172.31.25.2/user-agent && curl -H "User-Agent: Browser_${index}" 172.31.25.2/hostname && echo; sleep .1; done # 使用循環測試
User-Agent: Browser_0
ServerName: green
User-Agent: Browser_0
ServerName: green
User-Agent: Browser_2
ServerName: red
User-Agent: Browser_0
ServerName: green
User-Agent: Browser_2
ServerName: red
User-Agent: Browser_0
ServerName: green
User-Agent: Browser_2
ServerName: red
User-Agent: Browser_1
ServerName: blue
相同的瀏覽器id,請求落在相同的主機
- priority-levels
cd servicemesh_in_practise/Cluster-Manager/priority-levels
# 修改docker-compose配置文件
vim docker-compose.yaml
services:
envoy:
image: envoyproxy/envoy-alpine:v1.21-latest
environment: # 添加環境變量
- ENVOY_UID=0 # 添加環境變量
# 啟動
docker-compose up
# 驗證
while true; do curl 172.31.29.2; sleep .5;done # 正常五個節點,只發給配置高優先級的三個節點
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: green, ServerIP: 172.31.29.13!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: red, ServerIP: 172.31.29.11!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: blue, ServerIP: 172.31.29.12!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: green, ServerIP: 172.31.29.13!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: green, ServerIP: 172.31.29.13!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: red, ServerIP: 172.31.29.11!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: blue, ServerIP: 172.31.29.12!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: red, ServerIP: 172.31.29.11!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: green, ServerIP: 172.31.29.13!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: blue, ServerIP: 172.31.29.12!
模擬故障驗證
curl -XPOST -d 'livez=FAIL' 172.31.29.13/livez
while true; do curl 172.31.29.2; sleep .5;done # 按照默認超配因子1.4計算,有大約6.8%的流量會調度到低優先級的兩個節點上
v3版本的優先級調度暫時沒調通
image.png
- lb-subsets
環境介紹
endpoint | stage | version | type | xlarge |
---|---|---|---|---|
e1 | prod | 1.0 | std | true |
e2 | prod | 1.0 | std | |
e3 | prod | 1.1 | std | |
e4 | prod | 1.1 | std | |
e5 | prod | 1.0 | bigmem | |
e6 | prod | 1.1 | bigmem | |
e7 | dev | 1.2-pre | std |
標簽分類
keys: [stage,type],subnets如下
[prod,std] - e1,e2,e3,e4
[prod,bigmem] - e5,e6
[dev,std] - e7
[dev,bigmem] - 沒有
keys: [stage,version]
[prod,1.0] - e1,e2,e5
[prod,1.1] - e3,e4,e6
[prod,1.2-pre] - 沒有
[dev,1.0] - 沒有
[dev,1.1] - 沒有
[dev,1.2-pre] - e7
keys:[version]
[1.0] - e1,e2,e5
[1.1] - e3,e4,e6
[1.2-pre] - e7
keys:[xlarge,version]
[true,1.0] - e1
[true,1.1] - 沒有
[true,1.2-pre] - 沒有
共生成如上10種子集(subnet),還有一個默認的自己
stage=prod,type=std,version=1.0 - e1,e2
cd servicemesh_in_practise/Cluster-Manager/lb-subsets
# 修改docker-compose配置文件
vim docker-compose.yaml
services:
envoy:
image: envoyproxy/envoy-alpine:v1.21-latest
environment: # 添加環境變量
- ENVOY_UID=0 # 添加環境變量
# 替換routes部分為如下內容
routes:
- match:
prefix: "/"
headers:
- name: x-custom-version
exact_match: pre-release
- name: x-environment-state
exact_match: dev
route:
cluster: webcluster1
metadata_match:
filter_metadata:
envoy.lb:
version: "1.2-pre"
stage: "dev"
- match:
prefix: "/"
headers:
- name: x-custom-version
exact_match: v1.0
- name: x-environment-state
exact_match: prod
route:
cluster: webcluster1
metadata_match:
filter_metadata:
envoy.lb:
version: "1.0"
stage: "prod"
- match:
prefix: "/"
headers:
- name: x-custom-version
exact_match: v1.1
- name: x-environment-state
exact_match: prod
route:
cluster: webcluster1
metadata_match:
filter_metadata:
envoy.lb:
version: "1.1"
stage: "prod"
- match:
prefix: "/"
headers:
- name: x-hardware-test
exact_match: memory
- name: x-environment-state
exact_match: prod
route:
cluster: webcluster1
metadata_match:
filter_metadata:
envoy.lb:
type: "bigmem"
stage: "prod"
- match:
prefix: "/"
headers:
- name: x-hardware-test
exact_match: std
- name: x-environment-state
exact_match: prod
route:
cluster: webcluster1
metadata_match:
filter_metadata:
envoy.lb:
type: "std"
stage: "prod"
- match:
prefix: "/"
headers:
- name: x-hardware-test
exact_match: std
- name: x-environment-state
exact_match: dev
route:
cluster: webcluster1
metadata_match:
filter_metadata:
envoy.lb:
type: "std"
stage: "dev"
- match:
prefix: "/"
headers:
- name: x-custom-version
exact_match: v1.0
route:
cluster: webcluster1
metadata_match:
filter_metadata:
envoy.lb:
version: "1.0"
- match:
prefix: "/"
headers:
- name: x-custom-version
exact_match: v1.1
route:
cluster: webcluster1
metadata_match:
filter_metadata:
envoy.lb:
version: "1.1"
- match:
prefix: "/"
headers:
- name: x-custom-version
exact_match: pre-release
route:
cluster: webcluster1
metadata_match:
filter_metadata:
envoy.lb:
version: "1.2-pre"
- match:
prefix: "/"
headers:
- name: x-custom-xlarge
exact_match: isTrue
- name: x-custom-version
exact_match: pre-release
route:
cluster: webcluster1
metadata_match:
filter_metadata:
envoy.lb:
version: "1.0"
xlarge: "true"
- match:
prefix: "/"
route:
weighted_clusters:
clusters:
- name: webcluster1
weight: 90
metadata_match:
filter_metadata:
envoy.lb:
version: "1.0"
- name: webcluster1
weight: 10
metadata_match:
filter_metadata:
envoy.lb:
version: "1.1"
metadata_match:
filter_metadata:
envoy.lb:
stage: "prod"
http_filters:
- name: envoy.filters.http.router
# 啟動
docker-compose up
# 驗證
./test.sh 172.31.33.2 # 發送200次,根據組合比例,大概是9:1
Requests: v1.0:v1.1 = 184:16
curl -H "x-custom-version: v1.0" -H "x-environment-state: prod" 172.31.33.2/hostname # 調用1.0,prod子集
ServerName: e1
ServerName: e2
ServerName: e5
curl -H "x-custom-version: v1.1" -H "x-environment-state: prod" 172.31.33.2/hostname # 調用1.1,prod子集
curl -H "x-custom-version: pre-release" -H "x-environment-state: dev" 172.31.33.2/hostname # 調用1.2-pre,dev子集
ServerName: e7
curl -H "x-environment-state: prod" -H "x-hardware-test: memory" 172.31.33.2/hostname # 調用prod,bigmem子集
ServerName: e5
ServerName: e6
curl -H "x-environment-state: prod" -H "x-hardware-test: std" 172.31.33.2/hostname # 調用prod,std子集
ServerName: e1
ServerName: e2
ServerName: e3
ServerName: e4
curl -H "x-environment-state: dev" -H "x-hardware-test: std" 172.31.33.2/hostname # 調用dev,std子集
ServerName: e7
curl -H "x-custom-version: v1.0" 172.31.33.2/hostname # 調用1.0子集
ServerName: e1
ServerName: e2
ServerName: e5
curl -H "x-custom-version: v1.1" 172.31.33.2/hostname # 調用1.1子集
ServerName: e3
ServerName: e4
ServerName: e6
curl -H "x-custom-version: pre-release" 172.31.33.2/hostname # 調用1.2-pre子集
ServerName: e7
curl -H "x-custom-version: pre-release" -H "x-custom-xlarge: isTrue" 172.31.33.2/hostname # 調用true,1.0子集
ServerName: e1
curl 172.31.33.2/hostname # 調用默認子集
ServerName: e1
ServerName: e2
- circuit-breaker
cd servicemesh_in_practise/Cluster-Manager/circuit-breaker
# 修改docker-compose配置文件
vim docker-compose.yaml
services:
envoy:
image: envoyproxy/envoy-alpine:v1.21-latest
environment: # 添加環境變量
- ENVOY_UID=0 # 添加環境變量
# 啟動
docker-compose up
# 驗證
./send-requests.sh http://172.31.35.2 300 # 對集群一發送300次請求,超過最大連接數,會有報錯503的響應碼出現
./send-requests.sh http://172.31.35.2/livez 300 # 對集群二發送300次請求,超過最大連接數,會有報錯503的響應碼出現
模擬故障驗證
curl -XPOST -d 'livez=FAIL' 172.31.35.14/livez
./send-requests.sh http://172.31.35.2/livez 300 # 此時發送請求,會觸發outlier_detection設置,看日志會發現,在請求三次報錯506之后,會將故障主機踢出集群,所有請求會有更多的503出現