yinxin
作者yinxin·2018-10-25 11:41
项目经理·某金融机构

Openshift 常见运维脚本

字数 10713阅读 1537评论 0赞 1

扩容计算结点

/etc/ansible/hosts

[OSEv3:children]
masters
nodes
etcd
new_nodes

...

[new_nodes]
node04.internal.aws.testdrive.openshift.com openshift_node_labels="{'region': 'apps'}" openshift_hostname=node04.internal.aws.testdrive.openshift.com openshift_public_hostname=node04.580763383722.aws.testdrive.openshift.com
node05.internal.aws.testdrive.openshift.com openshift_node_labels="{'region': 'apps'}" openshift_hostname=node05.internal.aws.testdrive.openshift.com openshift_public_hostname=node05.580763383722.aws.testdrive.openshift.com
node06.internal.aws.testdrive.openshift.com openshift_node_labels="{'region': 'apps'}" openshift_hostname=node06.internal.aws.testdrive.openshift.com openshift_public_hostname=node06.580763383722.aws.testdrive.openshift.com

...

ansible-playbook /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-node/scaleup.yml

OpenShift Metrics

...
[OSEv3:vars]
...
openshift_metrics_install_metrics=true
openshift_metrics_cassandra_storage_type=pv
openshift_metrics_cassandra_pvc_size=10Gi
openshift_metrics_hawkular_hostname=metrics.apps.580763383722.aws.testdrive.openshift.com
...

ansible-playbook /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-cluster/openshift-metrics.yml

OpenShift Logging

...

[OSEv3:vars]
...
openshift_logging_install_logging=true
openshift_logging_namespace=logging
openshift_logging_es_pvc_size=10Gi
openshift_logging_kibana_hostname=kibana.apps.580763383722.aws.testdrive.openshift.com
openshift_logging_public_master_url=https://kibana.apps.580763383722.aws.testdrive.openshift.com
...

ansible-playbook /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-cluster/openshift-logging.yml

OpenShift Multitenant Networking

os_sdn_network_plugin_name=redhat/openshift-ovs-multitenant

net-proj.sh

!/bin/bash

create NetworkA, NetworkB projects

/usr/bin/oc new-project netproj-a
/usr/bin/oc new-project netproj-b

deploy the DC definition into the projects

/usr/bin/oc create -f /opt/lab/support/ose.yaml -n netproj-a
/usr/bin/oc create -f /opt/lab/support/ose.yaml -n netproj-b

ose.yaml

apiVersion: v1
kind: DeploymentConfig
metadata:
name: ose
labels:

run: ose

spec:
strategy:

type: Rolling
rollingParams:
  updatePeriodSeconds: 1
  intervalSeconds: 1
  timeoutSeconds: 600
  maxUnavailable: 25%
  maxSurge: 25%
resources:

triggers:

-
  type: ConfigChange

replicas: 1
test: false
selector:

run: ose

template:

metadata:
  creationTimestamp: null
  labels:
    run: ose
spec:
  containers:
    -
      name: ose
      image: 'registry.access.redhat.com/openshift3/ose:v3.5'
      command:
        - bash
        - '-c'
        - 'while true; do sleep 60; done'
      resources:
      terminationMessagePath: /dev/termination-log
      imagePullPolicy: IfNotPresent
  restartPolicy: Always
  terminationGracePeriodSeconds: 30
  dnsPolicy: ClusterFirst
  securityContext:

podbip.sh

!/bin/bash

/usr/bin/oc get pod -n netproj-b $(oc get pod -n netproj-b | awk '/ose-/ {print $1}') -o jsonpath='{.status.podIP}{"\n"}'

将netproj-a网络与netproj-b网络连接

oc adm pod-network join-projects netproj-a --to=netproj-b

将netproj-a网络脱离

oc adm pod-network isolate-projects netproj-a
oc exec -n netproj-a $POD_A_NAME -- ping -c1 -W1 $POD_B_IP

Node管理
将Node隔离出集群

oc adm manage-node node02.internal.aws.testdrive.openshift.com --schedulable=false

查看指定Node上运行的pod

oc adm manage-node node02.internal.aws.testdrive.openshift.com --list-pods

迁移指定Node上的pod
模拟迁移

oc adm manage-node node02.internal.aws.testdrive.openshift.com --evacuate --dry-run

迁移

oc adm manage-node node02.internal.aws.testdrive.openshift.com --evacuate

恢复Node的可调度

oc adm manage-node node02.internal.aws.testdrive.openshift.com --schedulable=true

创建volume

oc volume dc/file-uploader --add --name=my-shared-storage \
-t pvc --claim-mode=ReadWriteMany --claim-size=5Gi \
--claim-name=my-shared-storage --mount-path=/opt/app-root/src/uploaded

Increasing Storage Capacity in CNS

[...]

[cns]
node01.580763383722.aws.testdrive.openshift.com
node02.580763383722.aws.testdrive.openshift.com
node03.580763383722.aws.testdrive.openshift.com
node04.580763383722.aws.testdrive.openshift.com
node05.580763383722.aws.testdrive.openshift.com
node06.580763383722.aws.testdrive.openshift.com

[...]

ansible-playbook /opt/lab/support/configure-firewall.yaml

oc label node/node04.internal.aws.testdrive.openshift.com storagenode=glusterfs
oc label node/node05.internal.aws.testdrive.openshift.com storagenode=glusterfs
oc label node/node06.internal.aws.testdrive.openshift.com storagenode=glusterfs

export HEKETI_CLI_SERVER=http://heketi-container-native-storage.apps.580763383722.aws.testdrive.openshift.com
export HEKETI_CLI_USER=admin
export HEKETI_CLI_KEY=myS3cr3tpassw0rd

/opt/lab/support/topology-extended.json

{

"clusters": [
    {
        "nodes": [
            {
                "node": {
                    "hostnames": {
                        "manage": [
                            "node01.internal.aws.testdrive.openshift.com"
                        ],
                        "storage": [
                            "10.0.1.30"
                        ]
                    },
                    "zone": 1
                },
                "devices": [
                    "/dev/xvdd"
                ]
            },
            {
                "node": {
                    "hostnames": {
                        "manage": [
                            "node02.internal.aws.testdrive.openshift.com"
                        ],
                        "storage": [
                            "10.0.3.130"
                        ]
                    },
                    "zone": 2
                },
                "devices": [
                    "/dev/xvdd"
                ]
            },
            {
                "node": {
                    "hostnames": {
                        "manage": [
                            "node03.internal.aws.testdrive.openshift.com"
                        ],
                        "storage": [
                            "10.0.4.150"
                        ]
                    },
                    "zone": 3
                },
                "devices": [
                    "/dev/xvdd"
                ]
            }
        ]
    },
    {
        "nodes": [
            {
                "node": {
                    "hostnames": {
                        "manage": [
                            "node04.internal.aws.testdrive.openshift.com"
                        ],
                        "storage": [
                            "10.0.1.23"
                        ]
                    },
                    "zone": 1
                },
                "devices": [
                    "/dev/xvdd"
                ]
            },
            {
                "node": {
                    "hostnames": {
                        "manage": [
                            "node05.internal.aws.testdrive.openshift.com"
                        ],
                        "storage": [
                            "10.0.3.141"
                        ]
                    },
                    "zone": 2
                },
                "devices": [
                    "/dev/xvdd"
                ]
            },
            {
                "node": {
                    "hostnames": {
                        "manage": [
                            "node06.internal.aws.testdrive.openshift.com"
                        ],
                        "storage": [
                            "10.0.4.234"
                        ]
                    },
                    "zone": 3
                },
                "devices": [
                    "/dev/xvdd"
                ]
            }
        ]
    }
]

}

heketi-cli topology load --json=/opt/lab/support/topology-extended.json

heketi-cli topology info ##得到Cluster ID

/opt/lab/support/second-cns-storageclass.yaml

apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
name: cns-silver
provisioner: kubernetes.io/glusterfs
parameters:
resturl: "http://heketi-container-native-storage.apps.580763383722.aws.testdrive.openshift.com"
restauthenabled: "true"
restuser: "admin"
volumetype: "replicate:3"
clusterid: "INSERT-CLUSTER-ID-HERE"
secretNamespace: "default"
secretName: "cns-secret"

添加已有节点的盘

获取NODEID

heketi-cli node list | grep ca777ae0285ef6d8cd7237c862bd591c(CLUSTERID)

heketi-cli device add --name=/dev/xvde --node=33e0045354db4be29b18728cbe817605(NODEID)

移除有问题的盘

heketi-cli node info 33e0045354db4be29b18728cbe817605(NODEID)

以上的结果如下:

Node Id: 33e0045354db4be29b18728cbe817605
State: online
Cluster Id: ca777ae0285ef6d8cd7237c862bd591c
Zone: 1
Management Hostname: node04.internal.aws.testdrive.openshift.com
Storage Hostname: 10.0.1.23
Devices:
Id:01c94798bf6b1af87974573b420c4dff Name:/dev/xvdd State:online Size (GiB):9 Used (GiB):1 Free (GiB):8
Id:da91a2f1c9f62d9916831de18cc09952 Name:/dev/xvde State:online Size (GiB):9 Used (GiB):1 Free (GiB):8

移除盘

heketi-cli device disable 01c94798bf6b1af87974573b420c4dff

给Registry组件添加Volume

oc volume dc/docker-registry --add --name=registry-storage -t pvc \
--claim-mode=ReadWriteMany --claim-size=5Gi \
--claim-name=registry-storage --overwrite

更改dc的镜像

oc patch dc nginx -p '{"spec":{"template":{"spec":{"containers":[{"name":"nginx","image":"harbor.apps.example.com/public/nginx:1.14"}]}}}}'

给A项目授予拉取B项目IS的权限

oc policy add-role-to-user system:image-puller system:serviceaccount:A:default -n B

给Jenkins授予管理A项目资源的权限

oc policy add-role-to-user edit system:serviceaccount:jenkins:jenkins -n A

手动维护etcd

export ETCDCTL_API=3
etcdctl --cacert=/etc/origin/master/master.etcd-ca.crt --cert=/etc/origin/master/master.etcd-client.crt --key=/etc/origin/master/master.etcd-client.key --endpoints=https://master1.os10.openshift.com:2379,https://master2.os10.openshift.com:2379,https://master3.os10.openshift.com:2379 endpoint health

ETCDCTL_API=3 etcdctl --cacert=/etc/origin/master/master.etcd-ca.crt --cert=/etc/origin/master/master.etcd-client.crt --key=/etc/origin/master/master.etcd-client.key --endpoints=https://master1.os10.openshift.com:2379,https://master2.os10.openshift.com:2379,https://master3.os10.openshift.com:2379 get / --prefix --keys-only

ETCDCTL_API=3 etcdctl --cacert=/etc/origin/master/master.etcd-ca.crt --cert=/etc/origin/master/master.etcd-client.crt --key=/etc/origin/master/master.etcd-client.key --endpoints=https://master1.os10.openshift.com:2379,https://master2.os10.openshift.com:2379,https://master3.os10.openshift.com:2379 del /kubernetes.io/pods/bookinfo/nginx-4-bkdb4

执行镜像对应的任务

--restart=Always 默认值,创建一个deploymentconfig
--restart=OnFailure 创建一个Job(但是实践证实为一个Pod)
--restart=OnFailure --schedule="0/5 " 创建一个Cron Job
--restart=Never 创建一个单独的Pod

oc run nginx -it --rm --image=nginx --restart=OnFailure ls
oc run nginx -it --rm --image=nginx --restart=OnFailure bash

如果觉得我的文章对您有用,请点赞。您的支持将鼓励我继续创作!

1

添加新评论0 条评论

Ctrl+Enter 发表

作者其他文章

相关文章

相关问题

相关资料

X社区推广