Skip to content

Latest commit

 

History

History
300 lines (256 loc) · 11.6 KB

CoreV1Node.org

File metadata and controls

300 lines (256 loc) · 11.6 KB

Mock Ticket Template

Identify an untested feature Using APISnoop

According to this APIsnoop query, there are still some remaining Node endpoints which are untested.

SELECT
  operation_id,
  -- k8s_action,
  path,
  description
  FROM untested_stable_core_endpoints
  where path not like '%volume%'
  and path not like 'delete%'
  and path not like 'create%'
  and operation_id ilike '%NodeStatus'
 ORDER BY operation_id desc
 -- LIMIT 25
       ;
     operation_id      |            path             |                  description                  
-----------------------+-----------------------------+-----------------------------------------------
 readCoreV1NodeStatus  | /api/v1/nodes/{name}/status | read status of the specified Node
 patchCoreV1NodeStatus | /api/v1/nodes/{name}/status | partially update status of the specified Node
(2 rows)

Use API Reference to Lightly Document the Feature

The mock test

Test outline

  1. List all Nodes, find the latest created Node
  2. Patch the NodeStatus Ready condition
  3. Get the NodeStatus to ensure Ready is False

Example in Go

package main

import (
  "encoding/json"
  "fmt"
  "flag"
  "os"

  v1 "k8s.io/api/core/v1"
  metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  "k8s.io/client-go/kubernetes"
  "k8s.io/client-go/dynamic"
  "k8s.io/apimachinery/pkg/runtime/schema"
  "k8s.io/apimachinery/pkg/types"
  "k8s.io/client-go/tools/clientcmd"
)

func main() {
  // uses the current context in kubeconfig
  kubeconfig := flag.String("kubeconfig", fmt.Sprintf("%v/%v/%v", os.Getenv("HOME"), ".kube", "config"), "(optional) absolute path to the kubeconfig file")
  flag.Parse()
  config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
  if err != nil {
      fmt.Println(err)
      return
  }
  // make our work easier to find in the audit_event queries
  config.UserAgent = "live-test-writing"
  // creates the clientset
  ClientSet, _ := kubernetes.NewForConfig(config)
  DynamicClientSet, _ := dynamic.NewForConfig(config)
  nodeResource := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "nodes"}

  var testSelectedNode v1.Node
  var testSelectedNodeCreationTimestamp int64

  // access the API to list Nodes
  nodes, err := ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
  if err != nil {
      fmt.Println("[error] failed to list Nodes") 
      return
  }

  fmt.Println("[status] finding latest created node")
  for _, node := range nodes.Items {
      nodeCreationTimestamp := node.ObjectMeta.CreationTimestamp.Unix()
      if testSelectedNodeCreationTimestamp == 0 || nodeCreationTimestamp > testSelectedNodeCreationTimestamp {
          testSelectedNodeCreationTimestamp = nodeCreationTimestamp
          testSelectedNode = node
      }
  }

  updatedNodeStatusConditions := testSelectedNode.Status.Conditions
  foundReadyCondition := false
  for pos, cond := range testSelectedNode.Status.Conditions {
      if cond.Type == "Ready" {
          updatedNodeStatusConditions[pos] = cond
          updatedNodeStatusConditions[pos].Status = "False"
          foundReadyCondition = true
          break
      }
  }
  if foundReadyCondition == false {
      fmt.Println("[error] failed to find Ready NodeStatus Condition")
      return
  }

  nodeLabelPatchPayload, err := json.Marshal(v1.Node{
      Status: v1.NodeStatus{
          Conditions: updatedNodeStatusConditions,
      },
  })
  if err != nil {
     fmt.Println(err)
     return
  }
  _, err = DynamicClientSet.Resource(nodeResource).Patch(testSelectedNode.ObjectMeta.Name, types.StrategicMergePatchType, []byte(nodeLabelPatchPayload), metav1.PatchOptions{}, "status")
  if err != nil {
     fmt.Println(err)
     return
  }
  fmt.Println("[status] patched NodeStatus Ready Condition to be False")

  nodeStatusPatchUnstructured, err := DynamicClientSet.Resource(nodeResource).Get(testSelectedNode.ObjectMeta.Name, metav1.GetOptions{}, "status")
  if err != nil {
     fmt.Println(err)
     return
  }
  fmt.Println("[status] fetched patched NodeStatus")
  var nodeStatus v1.Node
  nodeStatusUjson, _ := json.Marshal(nodeStatusPatchUnstructured)
  json.Unmarshal(nodeStatusUjson, &nodeStatus)

  foundReadyConditionFalse := false
  for _, cond := range nodeStatus.Status.Conditions {
      if cond.Type == "Ready" && cond.Status == "False" {
          foundReadyConditionFalse = true
          break
      }
  }
  if foundReadyConditionFalse == false {
      fmt.Println("[error] failed to find Ready NodeStatus Condition")
      return
  }
  fmt.Println("[status] Node was patched successfuly")
  // write test here
  fmt.Println("[status] complete")

}

Verify Increase it Coverage with APISnoop

Discover useragents:

select distinct useragent from audit_event where bucket='apisnoop' and useragent not like 'kube%' and useragent not like 'coredns%' and useragent not like 'kindnetd%' and useragent like 'live%';
     useragent     
-------------------
 live-test-writing
(1 row)

List endpoints hit by the test:

select * from endpoints_hit_by_new_test where useragent like 'live%'; 
     useragent     |     operation_id      | hit_by_ete | hit_by_new_test 
-------------------+-----------------------+------------+-----------------
 live-test-writing | listCoreV1Node        | t          |               1
 live-test-writing | patchCoreV1NodeStatus | f          |               2
 live-test-writing | readCoreV1NodeStatus  | f          |               1
(3 rows)

Display endpoint coverage change:

select * from projected_change_in_coverage;
   category    | total_endpoints | old_coverage | new_coverage | change_in_number 
---------------+-----------------+--------------+--------------+------------------
 test_coverage |             438 |          190 |          192 |                2
(1 row)

Final notes

If a test with these calls gets merged, **test coverage will go up by 2 points**

This test is also created with the goal of conformance promotion.


/sig testing

/sig architecture

/area conformance