2016-05-27 11 views
0

Мы пытаемся перейти от k8s 1.1 до 1.2.4, а код, который у нас есть для создания часов на контроллерах репликации , кажется, больше не работает: часы создаются, но мы получили no 'Added' или 'Modified' для них.does fabric8 и kubernetes 1.2.4 не поддерживают часы на контроллерах репликации? (код, который работал в версии 1.1, больше не работает)

Я создал наименьший воспроизводимый случай, который я могу, и включил его ниже. Для удобства любой интересный взгляд на это я также включаю скрипты для запуска K8s 1.1 и k8s 1.2.4.

Пожалуйста, обратите внимание на следующее о тесте:

0. We use fabric8 1.3.91 to connect to k8s 1.2.4 and fabric8 1.2.2 to connect to k8s 1.1. 
1. There are slight changes in the fabric8 API that will require you to 
    tweak the REPRODUCIBLE TEST CASE accoding to the version you run on. 

     Please run the k8s 1.1 scenario first. 
     As-shipped, the test program will compile against that version. 
     After that, please tweak as follows: 

     comment out the following for new fabric8 
      import io.fabric8.kubernetes.client.DefaultKubernetesClient.ConfigBuilder 

     replace masterUrl with: withMasterUrl 

     comment out the onClose method of new Watcher { ... } 


2. you should start the appropriate the appropriate single node k8s script before you run the test program. 

3. make sure you create the namespace 'junk6', which you can do by saving 
    the lines below in a file ns.yaml, then typing kubectl create -f ns.yaml 

      kind: Namespace 
      apiVersion: v1 
      metadata: 
       name: junk6 
       labels: 
       name: junk6 

4. For k8s 1.1 you will see a log message containing the string REPCONTROLLER, and the message 
    future is done...  You will not see these when you run under k8s 1.2.4 because it seems 
    the watch message is never received. 

5. The name of the rep controller is spark master, but the image is redis. Please ignore that. 

непередаваемы ИСПЫТАНИЙ ПРИ

package com.foo.blah 

import com.fasterxml.jackson.databind.ObjectMapper 
import com.typesafe.scalalogging.StrictLogging 
import io.fabric8.kubernetes.api.model.ReplicationController 
import io.fabric8.kubernetes.client.DefaultKubernetesClient.ConfigBuilder 

//import io.fabric8.kubernetes.client.DefaultKubernetesClient.ConfigBuilder /* comment this out for new fabric8 */ 
import io.fabric8.kubernetes.client.Watcher.Action 
import io.fabric8.kubernetes.client._ 

import scala.collection.JavaConverters._ 
import scala.concurrent.{Future, Promise} 
import scala.util.Try 

// To run: 
//   make sure you have a namespace called junk5 ! 

object BugReport extends App with StrictLogging { 

    class RepControllerAndWatchCreator(val rc: io.fabric8.kubernetes.api.model.ReplicationController, 
            namespace: String) { 

    def instantiate(kube: KubernetesClient, name: String): Future[Unit] = { 
     { 
     val promise = Promise[Unit]() 
     logger.debug(s"setting up 'create complete' watch for component $name ") 
     kube.replicationControllers().inNamespace(namespace).create(rc) /* create the rc ! */ 
     val rcWatcher = getRcWatch(name, namespace, promise) 
     val rcWatch = kube.replicationControllers().inNamespace(namespace).withName(name).watch(rcWatcher) 
     logger.debug(s"after rc watch - name $name") 
     promise.future 
     } 
    } 

    private[this] def getRcWatch(name: String, 
           nameSpace: String, 
           promise: Promise[Unit]): Watcher[ReplicationController] = { 
     logger.debug(s"setting up 'create complete' watch for component $name ns=$namespace") 

     new Watcher[ReplicationController]() { 
     def eventReceived(action: Action, watchedRc: ReplicationController) { 
      logger.debug(s"event recv'd for REPCONTROLLER $name. action=$action [ $watchedRc ] ") 
      promise.success() 
     } 
     /*  Uncomment this for newer version of fabric8 API. 

     override def onClose(cause: KubernetesClientException): Unit = { 
      if (!promise.isCompleted) { 
      logger.trace("Watcher is close but promise is not completed.") 
      } 
     } 

     */ 
     } 
    } 

    private[this] def isRcComplete(rc: ReplicationController) = { 
     val retval = rc.getSpec.getReplicas == rc.getStatus.getReplicas 
     logger.debug(s"isRcComplete [ ${rc.getMetadata.getName} ] = $retval") 
     retval 
    } 
    } 


    val k8sUrl = "http://localhost:8080" 
    val namespaceName = "junk6" 

    def go(): Unit = { 
    import scala.concurrent.ExecutionContext.Implicits.global 
    val kube: KubernetesClient = getConnection 
    val rc: ReplicationController = getRc 

    val result: Future[Unit] = new RepControllerAndWatchCreator(rc, namespaceName). instantiate(kube ,"spark-master-rc") 
    result onComplete { (fut: Try[Unit]) => 
     println(s"future is done: $fut") 
    } 

    Thread.sleep(500 * 1000) 
    } 

    def getRc: ReplicationController = { 
    val jsonTemplate = 
     """ 
    |{ 
    | "kind": "ReplicationController", 
    | "apiVersion": "v1", 
    | "metadata": { 
    | "name": "spark-master-rc", 
    | "labels": { 
    |  "name": "spark-master" 
    | } 
    | }, 
    | "spec": { 
    | "replicas": 1, 
    | "selector": { 
    |  "name": "spark-master" 
    | }, 
    | "template": { 
    |  "metadata": { 
    |  "labels": { 
    |   "name": "spark-master" 
    |  } 
    |  }, 
    |  "spec": { 
    |  "containers": [ 
    |   { 
    |   "name": "spark-master", 
    |   "image": "redis", 
    |   "imagePullPolicy": "Always", 
    |   "ports": [ 
    |    { 
    |    "containerPort": 7077 
    |    }, 
    |    { 
    |    "containerPort": 8080 
    |    } 
    |   ], 
    |   "resources": { 
    |    "resources": { 
    |    "cpu": "2000m", 
    |    "memory": "4Gi" 
    |    }, 
    |    "limits": { 
    |    "cpu": "2000m", 
    |    "memory": "4Gi" 
    |    } 
    |   } 
    |   } 
    |  ] 
    |  } 
    | } 
    | } 
    |} 
     """. 
    stripMargin 
    System.out.println("json:" + jsonTemplate); 
    new ObjectMapper().readValue(jsonTemplate, classOf[ReplicationController]) 
    } 

    def getConnection = { 
    //val configBuilder = new ConfigBuilder()  /* For newer fabric8, replace with: Config.builder() */ 
    val configBuilder = new ConfigBuilder()  /* For newer fabric8, replace with: Config.builder() */ 
    val config = 
     configBuilder. 
     //masterUrl(k8sUrl).     /* For newer fabric8, replace with: withMasterUrl */ 
     //withMasterUrl(k8sUrl).     /* For older fabric8, replace with: masterUrl */ 
     masterUrl(k8sUrl).     /* For newer fabric8, replace with: withMasterUrl */ 
     build() 
    new DefaultKubernetesClient(config) 
    } 


    go() 
} 

STARTUP SCRIPT ДЛЯ K8S 1.1

#!/usr/bin/env bash 

# magic selinux context set command is required. for details, see: http://stackoverflow.com/questions/34777111/cannot-create-a-shared-volume-mount-via-emptydir-on-single-node-kubernetes-on 
# 
sudo chcon -Rt svirt_sandbox_file_t /var/lib/kubelet 


docker run --net=host -d gcr.io/google_containers/etcd:2.0.12 /usr/local/bin/etcd --addr=127.0.0.1:4001 --bind-addr=0.0.0.0:4001 --data-dir=/var/etcd/data 


docker run \ 
    --volume=/:/rootfs:ro \ 
    --volume=/sys:/sys:ro \ 
    --volume=/dev:/dev \ 
    --vol 

STARTUP СЦЕНАРИЙ ДЛЯ K8S 1.2.4

#!/usr/bin/env bash 

# magic selinux context set command is required. for details, see: http://stackoverflow.com/questions/34777111/cannot-create-a-shared-volume-mount-via-emptydir-on-single-node-kubernetes-on 
# 
sudo chcon -Rt svirt_sandbox_file_t /var/lib/kubelet 

#docker run --net=host -d gcr.io/google_containers/etcd:2.0.12 /usr/local/bin/etcd --addr=127.0.0.1:4001 --bind-addr=0.0.0.0:4001 --data-dir=/var/etcd/data 

# Update k8s local cluster to latest stable version according to "Running Kubernetes Locally via Docker" 
# http://kubernetes.io/docs/getting-started-guides/docker/ 
# export K8S_VERSION=$(curl -sS https://storage.googleapis.com/kubernetes-release/release/stable.txt) 
export K8S_VERSION=v1.2.4 
export ARCH=amd64 

docker run \ 
    --volume=/:/rootfs:ro \ 
    --volume=/sys:/sys:ro \ 
    --volume=/dev:/dev \ 
    --volume=/var/lib/docker/:/var/lib/docker:ro \ 
    --volume=/var/lib/kubelet/:/var/lib/kubelet:rw \ 
    --volume=/var/run:/var/run:rw \ 
    --net=host \ 
    --pid=host \ 
    --privileged=true \ 
    -d \ 
    gcr.io/google_containers/hyperkube-${ARCH}:${K8S_VERSION} \ 
    /hyperkube kubelet \ 
    --containerized \ 
    --hostname-override="127.0.0.1" \ 
    --address="0.0.0.0" \ 
    --api-servers=http://localhost:8080 \ 
    --config=/etc/kubernetes/manifests \ 
    --allow-privileged --v=2 

#docker run -d --net=host --privileged gcr.io/google_containers/hyperkube:v1.0.1 /hyperkube proxy --master=http://127.0.0.1:8080 --v=2 

sleep 5 # give everything time to launch 

ответ

1

Так что ответ от моего коллеги Виджая выражен в программе проверки ниже.

Ключевой вопрос (также упоминаемый в this bug report для fabric8 people) - это порядок создания объекта и наблюдения на объекте. Спасибо, Виджей!

Резюмируя отчет об ошибке:

если вы переключили порядок этих заявлений

client.replicationControllers().inNamespace(namespace).withLabel("l", "v").watch(watcher); 
createRc(client, namespace, podName, image); 

к этому:

createRc(client, namespace, podName, image); 
client.replicationControllers().inNamespace(namespace).withLabel("l", "v").watch(watcher); 

``` 

программа прекратит работать. Насколько я могу сказать, из теста, который я сделал, переключение порядка было бы в 1,2.2.

** Vijays Решение **

import com.fasterxml.jackson.databind.ObjectMapper; 
import com.typesafe.scalalogging.StrictLogging; 
import io.fabric8.kubernetes.api.model.Quantity; 
import io.fabric8.kubernetes.api.model.ReplicationController; 
import io.fabric8.kubernetes.client.Watcher.Action; 
import io.fabric8.kubernetes.client.*; 

import java.util.HashMap; 
import java.util.Map; 


public class Vijay { 


    public static DefaultKubernetesClient getConnection() { 
    ConfigBuilder 
     configBuilder = Config.builder() ; 
    Config config = 
     configBuilder. 
     withMasterUrl("http://localhost:8080"). 
     build(); 
    return new DefaultKubernetesClient(config); 
    } 


    public static void main(String[] args) throws Exception { 
    DefaultKubernetesClient client = getConnection(); 

    String namespace = "junk6"; 
    String podName = "prom"; 
    String image = "nginx"; 

    Watcher<ReplicationController> watcher = new Watcher<ReplicationController>() { 

     @Override 
     public void onClose(KubernetesClientException cause) { 
     // TODO Auto-generated method stub 

     } 

     @Override 
     public void eventReceived(Action action, ReplicationController resource) { 
     System.out.println(action + ":" + resource); 

     } 
    }; 


    client.replicationControllers().inNamespace(namespace).withLabel("l", "v").watch(watcher); 

    createRc(client, namespace, podName, image); 

    } 

    private static void createRc(DefaultKubernetesClient client, String namespace, String podName, String image) { 
    try { 
     Map<String, String> labels = new HashMap<String, String>(); 
     labels.put("l", "v"); 
     ReplicationController rc = client.replicationControllers().inNamespace(namespace) 
      .createNew() 
      .withNewMetadata() 
      .withName(podName) 
      .addToLabels(labels) 
      .endMetadata() 
      .withNewSpec() 
      .withReplicas(1) 
      .withSelector(labels) 
      .withNewTemplate() 
      .withNewMetadata() 
      .addToLabels(labels) 
      .endMetadata() 
      .withNewSpec() 
      .addNewContainer() 
      .withName(podName) 
      .withImage(image) 
      .withImagePullPolicy("Always") 
      .withNewResources() 
      .addToLimits("cpu", new Quantity("100m")) 
      .addToLimits("memory", new Quantity("100Mi")) 
      .endResources() 
      .endContainer() 
      .endSpec() 
      .endTemplate() 
      .endSpec() 
      .done(); 
    } catch (Exception e) { 
     e.printStackTrace(); 
    } 
    } 

} 
+0

Это должно быть исправлена –

 Смежные вопросы

  • Нет связанных вопросов^_^