为了账号安全,请及时绑定邮箱和手机立即绑定

Openshift 常见运维脚本

标签:
容器

扩容计算结点

# /etc/ansible/hosts[OSEv3:children]
masters
nodes
etcd
new_nodes

...

[new_nodes]
node04.internal.aws.testdrive.openshift.com openshift_node_labels="{'region': 'apps'}" openshift_hostname=node04.internal.aws.testdrive.openshift.com openshift_public_hostname=node04.580763383722.aws.testdrive.openshift.com
node05.internal.aws.testdrive.openshift.com openshift_node_labels="{'region': 'apps'}" openshift_hostname=node05.internal.aws.testdrive.openshift.com openshift_public_hostname=node05.580763383722.aws.testdrive.openshift.com
node06.internal.aws.testdrive.openshift.com openshift_node_labels="{'region': 'apps'}" openshift_hostname=node06.internal.aws.testdrive.openshift.com openshift_public_hostname=node06.580763383722.aws.testdrive.openshift.com

...
ansible-playbook /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-node/scaleup.yml

OpenShift Metrics

...
[OSEv3:vars]
...
openshift_metrics_install_metrics=trueopenshift_metrics_cassandra_storage_type=pv
openshift_metrics_cassandra_pvc_size=10Gi
openshift_metrics_hawkular_hostname=metrics.apps.580763383722.aws.testdrive.openshift.com
...
ansible-playbook /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-cluster/openshift-metrics.yml

OpenShift Logging

...

[OSEv3:vars]
...
openshift_logging_install_logging=trueopenshift_logging_namespace=logging
openshift_logging_es_pvc_size=10Gi
openshift_logging_kibana_hostname=kibana.apps.580763383722.aws.testdrive.openshift.com
openshift_logging_public_master_url=https://kibana.apps.580763383722.aws.testdrive.openshift.com...
ansible-playbook /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-cluster/openshift-logging.yml

OpenShift Multitenant Networking

os_sdn_network_plugin_name=redhat/openshift-ovs-multitenant
# net-proj.sh#!/bin/bash# create NetworkA, NetworkB projects/usr/bin/oc new-project netproj-a/usr/bin/oc new-project netproj-b# deploy the DC definition into the projects/usr/bin/oc create -f /opt/lab/support/ose.yaml -n netproj-a/usr/bin/oc create -f /opt/lab/support/ose.yaml -n netproj-b
#ose.yamlapiVersion: v1
kind: DeploymentConfig
metadata:
  name: ose
  labels:
    run: ose
spec:
  strategy:    type: Rolling
    rollingParams:
      updatePeriodSeconds: 1
      intervalSeconds: 1
      timeoutSeconds: 600
      maxUnavailable: 25%
      maxSurge: 25%
    resources:
  triggers:
    -      type: ConfigChange
  replicas: 1  test: false
  selector:
    run: ose
  template:
    metadata:
      creationTimestamp: null
      labels:
        run: ose
    spec:
      containers:
        -
          name: ose
          image: 'registry.access.redhat.com/openshift3/ose:v3.5'
          command:
            - bash
            - '-c'
            - 'while true; do sleep 60; done'
          resources:
          terminationMessagePath: /dev/termination-log
          imagePullPolicy: IfNotPresent
      restartPolicy: Always
      terminationGracePeriodSeconds: 30
      dnsPolicy: ClusterFirst
      securityContext:
#podbip.sh#!/bin/bash/usr/bin/oc get pod -n netproj-b $(oc get pod -n netproj-b | awk '/ose-/ {print $1}') -o jsonpath='{.status.podIP}{"\n"}'

将netproj-a网络与netproj-b网络连接

oc adm pod-network join-projects netproj-a --to=netproj-b
oc get netnamespace

将netproj-a网络脱离

oc adm pod-network isolate-projects netproj-aoc get netnamespace
oc exec -n netproj-a $POD_A_NAME -- ping -c1 -W1 $POD_B_IP

Node管理

将Node隔离出集群

oc adm manage-node node02.internal.aws.testdrive.openshift.com --schedulable=false

查看指定Node上运行的pod

 oc adm manage-node node02.internal.aws.testdrive.openshift.com --list-pods

迁移指定Node上的pod

模拟迁移

oc adm manage-node node02.internal.aws.testdrive.openshift.com --evacuate --dry-run

迁移

oc adm manage-node node02.internal.aws.testdrive.openshift.com --evacuate

恢复Node的可调度

oc adm manage-node node02.internal.aws.testdrive.openshift.com --schedulable=true

创建volume

oc volume dc/file-uploader --add --name=my-shared-storage \
-t pvc --claim-mode=ReadWriteMany --claim-size=5Gi \
--claim-name=my-shared-storage --mount-path=/opt/app-root/src/uploaded

Increasing Storage Capacity in CNS

[...][cns]node01.580763383722.aws.testdrive.openshift.comnode02.580763383722.aws.testdrive.openshift.comnode03.580763383722.aws.testdrive.openshift.comnode04.580763383722.aws.testdrive.openshift.comnode05.580763383722.aws.testdrive.openshift.comnode06.580763383722.aws.testdrive.openshift.com[...]
ansible-playbook /opt/lab/support/configure-firewall.yaml
oc label node/node04.internal.aws.testdrive.openshift.com storagenode=glusterfs
oc label node/node05.internal.aws.testdrive.openshift.com storagenode=glusterfs
oc label node/node06.internal.aws.testdrive.openshift.com storagenode=glusterfs
export HEKETI_CLI_SERVER=http://heketi-container-native-storage.apps.580763383722.aws.testdrive.openshift.comexport HEKETI_CLI_USER=adminexport HEKETI_CLI_KEY=myS3cr3tpassw0rd
#/opt/lab/support/topology-extended.json{    "clusters": [
        {            "nodes": [
                {                    "node": {                        "hostnames": {                            "manage": [                                "node01.internal.aws.testdrive.openshift.com"
                            ],                            "storage": [                                "10.0.1.30"
                            ]
                        },                        "zone": 1
                    },                    "devices": [                        "/dev/xvdd"
                    ]
                },
                {                    "node": {                        "hostnames": {                            "manage": [                                "node02.internal.aws.testdrive.openshift.com"
                            ],                            "storage": [                                "10.0.3.130"
                            ]
                        },                        "zone": 2
                    },                    "devices": [                        "/dev/xvdd"
                    ]
                },
                {                    "node": {                        "hostnames": {                            "manage": [                                "node03.internal.aws.testdrive.openshift.com"
                            ],                            "storage": [                                "10.0.4.150"
                            ]
                        },                        "zone": 3
                    },                    "devices": [                        "/dev/xvdd"
                    ]
                }
            ]
        },
        {            "nodes": [
                {                    "node": {                        "hostnames": {                            "manage": [                                "node04.internal.aws.testdrive.openshift.com"
                            ],                            "storage": [                                "10.0.1.23"
                            ]
                        },                        "zone": 1
                    },                    "devices": [                        "/dev/xvdd"
                    ]
                },
                {                    "node": {                        "hostnames": {                            "manage": [                                "node05.internal.aws.testdrive.openshift.com"
                            ],                            "storage": [                                "10.0.3.141"
                            ]
                        },                        "zone": 2
                    },                    "devices": [                        "/dev/xvdd"
                    ]
                },
                {                    "node": {                        "hostnames": {                            "manage": [                                "node06.internal.aws.testdrive.openshift.com"
                            ],                            "storage": [                                "10.0.4.234"
                            ]
                        },                        "zone": 3
                    },                    "devices": [                        "/dev/xvdd"
                    ]
                }
            ]
        }
    ]
}
heketi-cli topology load --json=/opt/lab/support/topology-extended.json
heketi-cli topology info ##得到Cluster ID
# /opt/lab/support/second-cns-storageclass.yamlapiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
  name: cns-silver
provisioner: kubernetes.io/glusterfs
parameters:
  resturl: "http://heketi-container-native-storage.apps.580763383722.aws.testdrive.openshift.com"
  restauthenabled: "true"
  restuser: "admin"
  volumetype: "replicate:3"
  clusterid: "INSERT-CLUSTER-ID-HERE" 
  secretNamespace: "default"
  secretName: "cns-secret"

添加已有节点的盘

# 获取NODEIDheketi-cli node list | grep ca777ae0285ef6d8cd7237c862bd591c(CLUSTERID)

heketi-cli device add --name=/dev/xvde --node=33e0045354db4be29b18728cbe817605(NODEID)

移除有问题的盘

heketi-cli node info 33e0045354db4be29b18728cbe817605(NODEID)

以上的结果如下:

Node Id: 33e0045354db4be29b18728cbe817605
State: online
Cluster Id: ca777ae0285ef6d8cd7237c862bd591c
Zone: 1Management Hostname: node04.internal.aws.testdrive.openshift.com
Storage Hostname: 10.0.1.23Devices:
Id:01c94798bf6b1af87974573b420c4dff   Name:/dev/xvdd           State:online    Size (GiB):9       Used (GiB):1       Free (GiB):8Id:da91a2f1c9f62d9916831de18cc09952   Name:/dev/xvde           State:online    Size (GiB):9       Used (GiB):1       Free (GiB):8

移除盘

heketi-cli device disable 01c94798bf6b1af87974573b420c4dff

给Registry组件添加Volume

oc volume dc/docker-registry --add --name=registry-storage -t pvc \
--claim-mode=ReadWriteMany --claim-size=5Gi \
--claim-name=registry-storage --overwrite

更改dc的镜像

 oc patch dc nginx -p '{"spec":{"template":{"spec":{"containers":[{"name":"nginx","image":"harbor.apps.example.com/public/nginx:1.14"}]}}}}'

给A项目授予拉取B项目IS的权限

oc policy add-role-to-user system:image-puller system:serviceaccount:A:default -n B

给Jenkins授予管理A项目资源的权限

oc policy add-role-to-user edit system:serviceaccount:jenkins:jenkins -n A

手动维护etcd

export ETCDCTL_API=3etcdctl --cacert=/etc/origin/master/master.etcd-ca.crt --cert=/etc/origin/master/master.etcd-client.crt --key=/etc/origin/master/master.etcd-client.key --endpoints=https://master1.os10.openshift.com:2379,https://master2.os10.openshift.com:2379,https://master3.os10.openshift.com:2379 endpoint healthETCDCTL_API=3 etcdctl --cacert=/etc/origin/master/master.etcd-ca.crt --cert=/etc/origin/master/master.etcd-client.crt --key=/etc/origin/master/master.etcd-client.key --endpoints=https://master1.os10.openshift.com:2379,https://master2.os10.openshift.com:2379,https://master3.os10.openshift.com:2379 get / --prefix --keys-onlyETCDCTL_API=3 etcdctl --cacert=/etc/origin/master/master.etcd-ca.crt --cert=/etc/origin/master/master.etcd-client.crt --key=/etc/origin/master/master.etcd-client.key --endpoints=https://master1.os10.openshift.com:2379,https://master2.os10.openshift.com:2379,https://master3.os10.openshift.com:2379 del /kubernetes.io/pods/bookinfo/nginx-4-bkdb4

执行镜像对应的任务

--restart=Always 默认值,创建一个deploymentconfig
--restart=OnFailure 创建一个Job(但是实践证实为一个Pod)
--restart=OnFailure --schedule="0/5 * * * *" 创建一个Cron Job
--restart=Never 创建一个单独的Pod

oc run nginx -it --rm  --image=nginx --restart=OnFailure  ls
oc run nginx -it --rm  --image=nginx --restart=OnFailure  bash

清理主机容器

当容器存储docker-storage的storage-driver引擎使用devicemapper时会出现如下错误:devmapper: Thin Pool has 162394 free data blocks which is less than minimum required 163840 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior。这个时候需要清理下容器主机的存储。
具体操作如下:

# 清理exited进程:docker rm $(docker ps -q -f status=exited)# 清理dangling volumes:docker volume rm $(docker volume ls -qf dangling=true)# 清理dangling image:docker rmi $(docker images --filter "dangling=true" -q --no-trunc)

参考文档 http://www.cnblogs.com/mhc-fly/p/9324425.html

Node节点内存与CPU预留

/etc/origin/node/node-config.yaml

kubeletArguments:
  system-reserved:
  - cpu=200m
  - memory=1G
  kube-reserved:
  - cpu=200m
  - memory=1G

用oc get只查看dc的镜像名

[root@maser]$ oc get dc test-app --template={{range.spec.template.spec.containers}}{{.image}}{{end}}
registry.example.com/test/test-app:1.13

Openshift Webconsole支持私有镜像仓库

  • 创建私有镜像仓库的证书

[root@registry ~]# mkdir /etc/crts/ && cd /etc/crts[root@registry ~]# openssl req \
   -newkey rsa:2048 -nodes -keyout example.com.key \
   -x509 -days 365 -out example.com.crt -subj \   "/C=CN/ST=GD/L=SZ/O=Global Security/OU=IT Department/CN=*.example.com"
  • 将私有镜像仓库的CA文件拷贝到镜像仓库所在服务器的/etc/pki/ca-trust/source/anchors/目录下

  • 在镜像仓库中配置tls,如果是docker-distribution /etc/docker-distribution/registry/config.yml

    http:
       addr: :443
       tls:
           certificate: /etc/crts/example.com.crt
           key: /etc/crts/example.com.key
  • 重启docker-distribution

    [root@registry ~]# systemctl daemon-reload && systemctl restart docker-distribution && systemctl enable docker-distribution
  • 在镜像仓库所在服务器上执行update-ca-trust extract

  • 将私有镜像仓库的CA文件拷贝到每台Openshift节点的/etc/pki/ca-trust/source/anchors/目录下

  • 每台Openshift节点上执行update-ca-trust extract

查看etcd数据

etcdctl --cert-file=/etc/origin/master/master.etcd-client.crt --key-file /etc/origin/master/master.etcd-client.key --ca-file /etc/origin/master/master.etcd-ca.crt --endpoints="https://master1.os10.openshift.example.com:2379,https://master2.os10.openshift.example.com:2379,https://master3.os10.openshift.example.com:2379"export ETCDCTL_API=3etcdctl --cacert=/etc/origin/master/master.etcd-ca.crt --cert=/etc/origin/master/master.etcd-client.crt --key=/etc/origin/master/master.etcd-client.key --endpoints=https://master1.os10.openshift.example.com:2379,https://master2.os10.openshift.example.com:2379,https://master3.os10.openshift.example.com:2379 endpoint healthETCDCTL_API=3 etcdctl --cacert=/etc/origin/master/master.etcd-ca.crt --cert=/etc/origin/master/master.etcd-client.crt --key=/etc/origin/master/master.etcd-client.key --endpoints=https://master1.os10.openshift.example.com:2379,https://master2.os10.openshift.example.com:2379,https://master3.os10.openshift.example.com:2379 get / --prefix --keys-only

计算某个项目project下所有pod的limits cpu/memory的总和

## 计算pod总的limits cpu的总和data=$(pods=`oc get pod|awk '{print $1}'|grep -v NAME`; for pod in $pods; do oc get pod $pod --template={{range.spec.containers}}{{.resources.limits.cpu}}{{println}}{{end}}; done); i=0; for j in $(echo $data); do i=$(($i+$j)); done ; echo $i;## 计算pod总的limits memory的总和data=$(pods=`oc get pod|awk '{print $1}'|grep -v NAME`; for pod in $pods; do oc get pod $pod --template={{range.spec.containers}}{{.resources.limits.memory}}{{println}}{{end}}; done);i=0; for j in $(echo $data); do mj=$(echo $j|cut -dG -f1); i=$(($i+$mj)); done; echo $i;

DNSMasq启动失败报错“DBus error: Connection ":1.180" is not allowed to own the service "uk.org.thekelleys.dnsmasq" ”

$ cat /etc/dbus-1/system.d/dnsmasq.conf<!DOCTYPE busconfig PUBLIC
 "-//freedesktop//DTD D-BUS Bus Configuration 1.0//EN"
 "http://www.freedesktop.org/standards/dbus/1.0/busconfig.dtd"><busconfig>
    <policy user="root">
        <allow own="uk.org.thekelleys.dnsmasq"/>
        <allow send_destination="uk.org.thekelleys.dnsmasq"/>
    </policy>
    <policy context="default">
                <allow own="uk.org.thekelleys.dnsmasq"/>
                <allow send_destination="uk.org.thekelleys.dnsmasq"/>
        </policy></busconfig>



作者:潘晓华Michael
链接:https://www.jianshu.com/p/4510ed5bce8b


点击查看更多内容
TA 点赞

若觉得本文不错,就分享一下吧!

评论

作者其他优质文章

正在加载中
  • 推荐
  • 评论
  • 收藏
  • 共同学习,写下你的评论
感谢您的支持,我会继续努力的~
扫码打赏,你说多少就多少
赞赏金额会直接到老师账户
支付方式
打开微信扫一扫,即可进行扫码打赏哦
今天注册有机会得

100积分直接送

付费专栏免费学

大额优惠券免费领

立即参与 放弃机会
意见反馈 帮助中心 APP下载
官方微信

举报

0/150
提交
取消