$ kubectl create deploy nginx --image=nginx
$ kubectl expose deploy nginx --port=80 --target-port=80 --cluster-ip='fd00::1'
$ kubectl get services nginx NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE nginx ClusterIP fd00::1 <none> 80/TCP 15s
$ kubectl run multitool --image=praqma/network-multitool pod/multitool created
$ kubectl exec -it multitool -- bash
bash-5.1# curl -I -k http://[fd00::1]:80 HTTP/1.1 200 OK Server: nginx/1.21.6 Date: Fri, 08 Apr 2022 20:55:20 GMT Content-Type: text/html Content-Length: 615 Last-Modified: Tue, 25 Jan 2022 15:03:52 GMT Connection: keep-alive ETag: "61f01158-267" Accept-Ranges: bytes
bash-5.1# nslookup google.com Server: 10.96.0.10 Address: 10.96.0.10#53 Non-authoritative answer: Name: google.com Address: 172.217.162.110 Name: google.com Address: 2800:3f0:4001:80f::200e
bash-5.1# nslookup kubernetes Server: 10.96.0.10 Address: 10.96.0.10#53 Name: kubernetes.default.svc.cluster.local Address: 10.96.0.1
$ cat /etc/cni/net.d/10-calico.conflist { "name": "k8s-pod-network", "cniVersion": "0.3.1", "plugins": [ { "type": "calico", "datastore_type": "kubernetes", "mtu": 0, "nodename_file_optional": false, "log_level": "Info", "log_file_path": "/var/log/calico/cni/cni.log", "ipam": { "type": "calico-ipam", "assign_ipv4" : "true", "assign_ipv6" : "true"}, "container_settings": { "allow_ip_forwarding": false }, "policy": { "type": "k8s" }, "kubernetes": { "k8s_api_root":"https://10.96.0.1:443", "kubeconfig": "/etc/cni/net.d/calico-kubeconfig" } }, { "type": "bandwidth", "capabilities": {"bandwidth": true} }, {"type": "portmap", "snat": true, "capabilities": {"portMappings": true}} ] }
$ kubectl get installation -o yaml apiVersion: v1 items: - apiVersion: operator.tigera.io/v1 kind: Installation metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operator.tigera.io/v1","kind":"Installation","metadata":{"annotations":{},"name":"default"},"spec":{"calicoNetwork":{"ipPools":[{"blockSize":26,"cidr":"10.85.0.0/16","encapsulation":"VXLANCrossSubnet","natOutgoing":"Enabled","nodeSelector":"all()"},{"blockSize":122,"cidr":"1100:200::/64","encapsulation":"None","natOutgoing":"Enabled","nodeSelector":"all()"}]}}} creationTimestamp: "2022-04-08T19:54:47Z" generation: 2 name: default resourceVersion: "1248" uid: 0ba3ed49-2649-4853-9745-09728868694c spec: calicoNetwork: bgp: Enabled hostPorts: Enabled ipPools: - blockSize: 26 cidr: 10.85.0.0/16 encapsulation: VXLANCrossSubnet natOutgoing: Enabled nodeSelector: all() - blockSize: 122 cidr: 1100:200::/64 encapsulation: None natOutgoing: Enabled nodeSelector: all() linuxDataplane: Iptables multiInterfaceMode: None nodeAddressAutodetectionV4: firstFound: true nodeAddressAutodetectionV6: firstFound: true cni: ipam: type: Calico type: Calico controlPlaneReplicas: 2 flexVolumePath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/ nodeUpdateStrategy: rollingUpdate: maxUnavailable: 1 type: RollingUpdate nonPrivileged: Disabled variant: Calico status: computed: calicoNetwork: bgp: Enabled hostPorts: Enabled ipPools: - blockSize: 26 cidr: 10.85.0.0/16 encapsulation: VXLANCrossSubnet natOutgoing: Enabled nodeSelector: all() - blockSize: 122 cidr: 1100:200::/64 encapsulation: None natOutgoing: Enabled nodeSelector: all() linuxDataplane: Iptables multiInterfaceMode: None nodeAddressAutodetectionV4: firstFound: true nodeAddressAutodetectionV6: firstFound: true cni: ipam: type: Calico type: Calico controlPlaneReplicas: 2 flexVolumePath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/ nodeUpdateStrategy: rollingUpdate: maxUnavailable: 1 type: RollingUpdate nonPrivileged: Disabled variant: Calico mtu: 1450 variant: Calico kind: List metadata: resourceVersion: "" selfLink: ""
REF: https://projectcalico.docs.tigera.io/maintenance/troubleshoot/commands
--- apiVersion: v1 kind: Pod metadata: name: ping spec: containers: - name: ping-container image: alpine:latest command: ["/bin/ping", "google.com"] securityContext: capabilities: add: - NET_RAW drop: - ALL
$ kubectl apply -f ping.yml pod/ping created
kubectl logs -f pod/ping PING 8.8.8.8 (8.8.8.8): 56 data bytes
--- apiVersion: v1 kind: Pod metadata: name: traceroute spec: containers: - name: traceroute-container image: alpine:latest command: ["/usr/bin/traceroute6", "2804:694:4c00:4001::13"] securityContext: capabilities: add: - NET_RAW drop: - ALL
$ kubectl apply -f traceroute.yml pod/traceroute created
$ kubectl logs -f pod/traceroute traceroute to 2804:694:4c00:4001::13 (2804:694:4c00:4001::13), 30 hops max, 72 byte packets 1 2804:694:4c00:4007::100 (2804:694:4c00:4007::100) 0.013 ms 0.014 ms 0.009 ms
kubectl logs <POD> --all-containers
kubectl logs <POD> -c <CONTAINER>
kubectl get events -n <NAMESPACE>
kubectl get events --sort-by=.metadata.creationTimestamp -n <NAMESPACE>
kubectl logs -n --previous <POD> -n <NAMESPACE>