flink on k8s

按照最新的1.11.0版本官方文檔裝的,中間有些修改,裝完后運(yùn)行了下wordcount是可以運(yùn)行的,但是web界面任務(wù)狀態(tài)刷不出來,有些異常

官網(wǎng)地址:

https://ci.apache.org/projects/flink/flink-docs-stable/ops/deployment/kubernetes.html

kubectl apply 以下幾個(gè)文件

flink配置文件

flink-configuration-configmap.yaml

apiVersion: v1

kind: ConfigMap

metadata:

? name: flink-config

? labels:

? ? app: flink

data:

? flink-conf.yaml: |+

? ? jobmanager.rpc.address: flink-jobmanager

? ? taskmanager.numberOfTaskSlots: 20

? ? blob.server.port: 6124

? ? jobmanager.rpc.port: 6123

? ? taskmanager.rpc.port: 6122

? ? queryable-state.proxy.ports: 6125

? ? jobmanager.memory.process.size: 1600m

? ? taskmanager.memory.process.size: 18000m

? ? parallelism.default: 2

? ? classloader.resolve-order: child-first

? log4j-console.properties: |+

? ? # This affects logging for both user code and Flink

? ? rootLogger.level = INFO

? ? rootLogger.appenderRef.console.ref = ConsoleAppender

? ? rootLogger.appenderRef.rolling.ref = RollingFileAppender

? ? # Uncomment this if you want to _only_ change Flink's logging

? ? #logger.flink.name = org.apache.flink

? ? #logger.flink.level = INFO

? ? # The following lines keep the log level of common libraries/connectors on

? ? # log level INFO. The root logger does not override this. You have to manually

? ? # change the log levels here.

? ? logger.akka.name = akka

? ? logger.akka.level = INFO

? ? logger.kafka.name= org.apache.kafka

? ? logger.kafka.level = INFO

? ? logger.hadoop.name = org.apache.hadoop

? ? logger.hadoop.level = INFO

? ? logger.zookeeper.name = org.apache.zookeeper

? ? logger.zookeeper.level = INFO

? ? # Log all infos to the console

? ? appender.console.name = ConsoleAppender

? ? appender.console.type = CONSOLE

? ? appender.console.layout.type = PatternLayout

? ? appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n

? ? # Log all infos in the given rolling file

? ? appender.rolling.name = RollingFileAppender

? ? appender.rolling.type = RollingFile

? ? appender.rolling.append = false

? ? appender.rolling.fileName = ${sys:log.file}

? ? appender.rolling.filePattern = ${sys:log.file}.%i

? ? appender.rolling.layout.type = PatternLayout

? ? appender.rolling.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n

? ? appender.rolling.policies.type = Policies

? ? appender.rolling.policies.size.type = SizeBasedTriggeringPolicy

? ? appender.rolling.policies.size.size=100MB

? ? appender.rolling.strategy.type = DefaultRolloverStrategy

? ? appender.rolling.strategy.max = 10

? ? # Suppress the irrelevant (wrong) warnings from the Netty channel handler

? ? logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline

? ? logger.netty.level = OFF

? log4j-cli.properties: |+

? ? log4j.rootLogger=INFO, file

? ? #Log all infos in the given file

? ? log4j.appender.file=org.apache.log4j.FileAppender

? ? log4j.appender.file.file=${log.file}

? ? log4j.appender.file.append=false

? ? log4j.appender.file.layout=org.apache.log4j.PatternLayout

? ? log4j.appender.file.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n

? ? # Log output from org.apache.flink.yarn to the console. This is used by the

? ? # CliFrontend class when using a per-job YARN cluster.

? ? log4j.logger.org.apache.flink.yarn=INFO, console

? ? log4j.logger.org.apache.flink.yarn.cli.FlinkYarnSessionCli=INFO, console

? ? log4j.logger.org.apache.hadoop=INFO, console


? ? log4j.appender.console=org.apache.log4j.ConsoleAppender

? ? log4j.appender.console.layout=org.apache.log4j.PatternLayout

? ? log4j.appender.console.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n


? ? # suppress the warning that hadoop native libraries are not loaded (irrelevant for the client)

? ? log4j.logger.org.apache.hadoop.util.NativeCodeLoader=OFF


? ? # suppress the irrelevant (wrong) warnings from the netty channel handler

? ? log4j.logger.org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline=ERROR, file

? log4j.properties: |+

? ? log4j.rootLogger=INFO, file

? ? log4j.logger.akka=INFO

? ? log4j.logger.org.apache.kafka=INFO

? ? log4j.logger.org.apache.hadoop=INFO

? ? log4j.logger.org.apache.zookeeper=INFO

? ? log4j.appender.file=org.apache.log4j.FileAppender

? ? log4j.appender.file.file=${log.file}

? ? log4j.appender.file.layout=org.apache.log4j.PatternLayout

? ? log4j.appender.file.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n

? ? log4j.logger.org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline=ERROR, file


jobmanager外部端口service

jobmanager-rest-service.yaml

apiVersion: v1

kind: Service

metadata:

? name: flink-jobmanager-rest

spec:

? type: NodePort

? ports:

? - name: rest

? ? port: 8081

? ? targetPort: 8081

? ? nodePort: 30081

? selector:

? ? app: flink

? ? component: jobmanager

jobmanager通信service

jobmanager-service.yaml

apiVersion: v1

kind: Service

metadata:

? name: flink-jobmanager

spec:

? type: ClusterIP

? ports:

? - name: rpc

? ? port: 6123

? - name: blob-server

? ? port: 6124

? - name: webui

? ? port: 8081

? selector:

? ? app: flink

? ? component: jobmanager

taskmanager外部端口service

taskmanager-query-stat-service.yaml

apiVersion: v1

kind: Service

metadata:

? name: flink-taskmanager-query-state

spec:

? type: NodePort

? ports:

? - name: query-state

? ? port: 6125

? ? targetPort: 6125

? ? nodePort: 30025

? selector:

? ? app: flink

? ? component: taskmanager

taskmanager會話

taskmanager-session-deployment.yaml

apiVersion: apps/v1

kind: Deployment

metadata:

? name: flink-taskmanager

spec:

? replicas: 3

? selector:

? ? matchLabels:

? ? ? app: flink

? ? ? component: taskmanager

? template:

? ? metadata:

? ? ? labels:

? ? ? ? app: flink

? ? ? ? component: taskmanager

? ? spec:

? ? ? containers:

? ? ? - name: taskmanager

? ? ? #? image: flink:1.7.2-scala_2.11

? ? ? ? image: 172.16.22.162:5000/myflink:v2

? ? ? ? args: ["taskmanager"]

? ? ? ? ports:

? ? ? ? - containerPort: 6121

? ? ? ? ? name: data

? ? ? ? - containerPort: 6122

? ? ? ? ? name: rpc

? ? ? ? - containerPort: 6125

? ? ? ? ? name: query-state

? ? ? ? livenessProbe:

? ? ? ? ? tcpSocket:

? ? ? ? ? ? port: 6122

? ? ? ? ? initialDelaySeconds: 30

? ? ? ? ? periodSeconds: 60

? ? ? ? volumeMounts:

? ? ? ? - name: flink-config-volume

? ? ? ? ? mountPath: /opt/flink/conf/

? ? ? ? securityContext:

? ? ? ? ? runAsUser: 9999? # refers to user _flink_ from official flink image, change if necessary

? ? ? ? env:

? ? ? volumes:

? ? ? - name: flink-config-volume

? ? ? ? configMap:

? ? ? ? ? name: flink-config

? ? ? ? ? items:

? ? ? ? ? - key: flink-conf.yaml

? ? ? ? ? ? path: flink-conf.yaml

? ? ? ? ? - key: log4j-console.properties

? ? ? ? ? ? path: log4j-console.properties

? ? ? ? ? - key: log4j-cli.properties

? ? ? ? ? ? path: log4j-cli.properties

? ? ? ? ? - key: log4j.properties

? ? ? ? ? ? path: log4j.properties

jobmanager會話

jobmanager-session-deployment.yaml

apiVersion: apps/v1

kind: Deployment

metadata:

? name: flink-jobmanager

spec:

? replicas: 1

? selector:

? ? matchLabels:

? ? ? app: flink

? ? ? component: jobmanager

? template:

? ? metadata:

? ? ? labels:

? ? ? ? app: flink

? ? ? ? component: jobmanager

? ? spec:

? ? ? containers:

? ? ? - name: jobmanager

? ? ? #? image: flink:1.7.2-scala_2.11

? ? ? ? image: 172.16.22.162:5000/myflink:v2

#? ? ? ? command: ['chown -R flink:flink /']

? ? ? ? args: ["jobmanager"]

#? ? ? ? command: ["chown -R flink:flink /opt/flink"]

? ? ? ? ports:

? ? ? ? - containerPort: 6123

? ? ? ? ? name: rpc

? ? ? ? - containerPort: 6124

? ? ? ? ? name: blob-server

? ? ? ? - containerPort: 8081

? ? ? ? ? name: webui

? ? ? ? livenessProbe:

? ? ? ? ? tcpSocket:

? ? ? ? ? ? port: 6123

? ? ? ? ? initialDelaySeconds: 30

? ? ? ? ? periodSeconds: 60

? ? ? ? volumeMounts:

? ? ? ? - name: flink-config-volume

? ? ? ? ? mountPath: /opt/flink/conf

? ? ? ? securityContext:

? ? ? ? ? runAsUser: 9999? # refers to user _flink_ from official flink image, change if necessary

? ? ? volumes:

? ? ? - name: flink-config-volume

? ? ? ? configMap:

? ? ? ? ? name: flink-config

? ? ? ? ? items:

? ? ? ? ? - key: flink-conf.yaml

? ? ? ? ? ? path: flink-conf.yaml

? ? ? ? ? - key: log4j-console.properties

? ? ? ? ? ? path: log4j-console.properties

? ? ? ? ? - key: log4j-cli.properties

? ? ? ? ? ? path: log4j-cli.properties

? ? ? ? ? - key: log4j.properties

? ? ? ? ? ? path: log4j.properties


kubectl apply -f?flink-configuration-configmap.yaml

kubectl apply -f jobmanager-service.yaml

kubectl apply -f?jobmanager-rest-service.yaml

kubectl apply -f?taskmanager-query-stat-service.yaml

kubectl apply -f?jobmanager-session-deployment.yaml

kubectl apply -f taskmanager-session-deployment.yaml

去nodeport端口30081訪問即可

最后編輯于
?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請聯(lián)系作者
平臺聲明:文章內(nèi)容(如有圖片或視頻亦包括在內(nèi))由作者上傳并發(fā)布,文章內(nèi)容僅代表作者本人觀點(diǎn),簡書系信息發(fā)布平臺,僅提供信息存儲服務(wù)。