diff --git a/README.md b/README.md index 351dff3169..b4c9348e30 100644 --- a/README.md +++ b/README.md @@ -1056,6 +1056,98 @@ k9s: --- +## Custom Workload View + +You can customize the workload view with CRDs or any resources you want to see on this view. + +To do so, you will need to update your config to add a new field `workloadGVRs` following this pattern: +``` +k9s: + workloadGVRs: + - name: "v1/pods" + - name: "test.com/v1alpha1/myCRD" + status: + cellName: "State" + readiness: + cellName: "Current" + # The cellExtraName will be added as `cellName/cellExtraName` + cellExtraName: "Desired" + validity: + replicas: + cellCurrentName: "Current" + cellDesiredName: "Desired" + matchs: + - cellName: "State" + cellValue: "Ready" + - name: "external-secrets.io/v1beta1/externalsecrets" + status: + na: true + validity: + matchs: + - cellName: Ready + cellValue: True + - cellName: Status + cellValue: SecretSynced +``` +The first one (`v1/pods`) will be recognized by k9s and will set it's default values for the readiness, validity and status. + +The second one (`test.com/v1alpha1/myCRD`) will be an unknown GVR, it will use this configuration to be shown on the workload view. + +The third one (`external-secrets.io/v1beta1/externalsecrets`) will be an unknown GVR, it will use this configuration to be shown on the workload view, but as the readiness is not set, it will use the default values it. About the status, it's set as `na: true` not applicable (for example the secrets does not need a status). + +The default values applied for an unknown GVR are if they are not set and if they are not flagged as not applicable are: +``` + status: + cellName: "Status" + validity: + matchs: + - cellName: "Ready" + cellValue: "True" + readiness: + cellName: "Ready" +``` + +The known GVRs from k9s are: +``` + - v1/pods + - apps/v1/replicasets + - v1/serviceaccounts + - v1/persistentvolumeclaims + - scheduling.k8s.io/v1/priorityclasses + - v1/configmaps + - v1/secrets + - v1/services + - apps/v1/daemonsets + - apps/v1/statefulSets +``` + +The full structure about the configuration is: +``` + workloadGVRs: + - name: string + status: + cellName: string + na: bool + readiness: + cellName: string + cellExtraName: string + na: bool + validity: + matchs: + - cellName: string + cellValue: string + - cellName: string + cellValue: string + ... + replicas: + cellCurrentName: string + cellDesiredName: string + cellAllName: string + na: bool +``` + +--- + ## Contributors Without the contributions from these fine folks, this project would be a total dud! diff --git a/internal/config/json/schemas/k9s.json b/internal/config/json/schemas/k9s.json index e217bbec29..e14def8b9b 100644 --- a/internal/config/json/schemas/k9s.json +++ b/internal/config/json/schemas/k9s.json @@ -125,6 +125,54 @@ } } } + }, + "workloadGVRs": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "status": { + "type":"object", + "properties": { + "cellName": {"type": "string"}, + "na": {"type": "boolean"} + } + }, + "readiness": { + "type": "object", + "properties": { + "cellName": {"type": "string"}, + "cellExtraName": {"type": "string"}, + "na": {"type": "boolean"} + } + }, + "validity": { + "type": "object", + "properties": { + "matchs": { + "type": "array", + "items": { + "type" :"object", + "properties": { + "cellName": {"type": "string"}, + "cellValue": {"type": "string"} + } + } + }, + "replicas": { + "type": "object", + "properties": { + "cellCurrentName": {"type":"string"}, + "cellDesiredName": {"type":"string"}, + "cellAllName": {"type":"string"} + } + }, + "na": {"type": "boolean"} + } + } + } + } } } } diff --git a/internal/config/k9s.go b/internal/config/k9s.go index 953fb7ca3b..ca3d781bd4 100644 --- a/internal/config/k9s.go +++ b/internal/config/k9s.go @@ -18,19 +18,20 @@ import ( // K9s tracks K9s configuration options. type K9s struct { - LiveViewAutoRefresh bool `json:"liveViewAutoRefresh" yaml:"liveViewAutoRefresh"` - ScreenDumpDir string `json:"screenDumpDir" yaml:"screenDumpDir,omitempty"` - RefreshRate int `json:"refreshRate" yaml:"refreshRate"` - MaxConnRetry int `json:"maxConnRetry" yaml:"maxConnRetry"` - ReadOnly bool `json:"readOnly" yaml:"readOnly"` - NoExitOnCtrlC bool `json:"noExitOnCtrlC" yaml:"noExitOnCtrlC"` - UI UI `json:"ui" yaml:"ui"` - SkipLatestRevCheck bool `json:"skipLatestRevCheck" yaml:"skipLatestRevCheck"` - DisablePodCounting bool `json:"disablePodCounting" yaml:"disablePodCounting"` - ShellPod ShellPod `json:"shellPod" yaml:"shellPod"` - ImageScans ImageScans `json:"imageScans" yaml:"imageScans"` - Logger Logger `json:"logger" yaml:"logger"` - Thresholds Threshold `json:"thresholds" yaml:"thresholds"` + LiveViewAutoRefresh bool `json:"liveViewAutoRefresh" yaml:"liveViewAutoRefresh"` + ScreenDumpDir string `json:"screenDumpDir" yaml:"screenDumpDir,omitempty"` + RefreshRate int `json:"refreshRate" yaml:"refreshRate"` + MaxConnRetry int `json:"maxConnRetry" yaml:"maxConnRetry"` + ReadOnly bool `json:"readOnly" yaml:"readOnly"` + NoExitOnCtrlC bool `json:"noExitOnCtrlC" yaml:"noExitOnCtrlC"` + UI UI `json:"ui" yaml:"ui"` + SkipLatestRevCheck bool `json:"skipLatestRevCheck" yaml:"skipLatestRevCheck"` + DisablePodCounting bool `json:"disablePodCounting" yaml:"disablePodCounting"` + ShellPod ShellPod `json:"shellPod" yaml:"shellPod"` + ImageScans ImageScans `json:"imageScans" yaml:"imageScans"` + Logger Logger `json:"logger" yaml:"logger"` + Thresholds Threshold `json:"thresholds" yaml:"thresholds"` + WorkloadGVRs []WorkloadGVR `json:"workloadGVRs,omitempty" yaml:"workloadGVRs,omitempty"` manualRefreshRate int manualHeadless *bool manualLogoless *bool @@ -56,6 +57,7 @@ func NewK9s(conn client.Connection, ks data.KubeSettings) *K9s { Thresholds: NewThreshold(), ShellPod: NewShellPod(), ImageScans: NewImageScans(), + WorkloadGVRs: NewWorkloadGVRs(), dir: data.NewDir(AppContextsDir), conn: conn, ks: ks, @@ -108,6 +110,9 @@ func (k *K9s) Merge(k1 *K9s) { if k1.Thresholds != nil { k.Thresholds = k1.Thresholds } + if k1.WorkloadGVRs != nil { + k.WorkloadGVRs = k1.WorkloadGVRs + } } // AppScreenDumpDir fetch screen dumps dir. diff --git a/internal/config/k9s_test.go b/internal/config/k9s_test.go index d74c59c39d..ab681ca53a 100644 --- a/internal/config/k9s_test.go +++ b/internal/config/k9s_test.go @@ -98,11 +98,13 @@ func TestK9sMerge(t *testing.T) { ImageScans: config.ImageScans{}, Logger: config.Logger{}, Thresholds: nil, + WorkloadGVRs: nil, }, k2: &config.K9s{ LiveViewAutoRefresh: true, MaxConnRetry: 100, ShellPod: config.NewShellPod(), + WorkloadGVRs: nil, }, ek: &config.K9s{ LiveViewAutoRefresh: true, @@ -118,6 +120,7 @@ func TestK9sMerge(t *testing.T) { ImageScans: config.ImageScans{}, Logger: config.Logger{}, Thresholds: nil, + WorkloadGVRs: nil, }, }, } diff --git a/internal/config/testdata/configs/default.yaml b/internal/config/testdata/configs/default.yaml index abf8432ba4..575d14a3d8 100644 --- a/internal/config/testdata/configs/default.yaml +++ b/internal/config/testdata/configs/default.yaml @@ -39,3 +39,74 @@ k9s: memory: critical: 90 warn: 70 + workloadGVRs: + - name: apps/v1/daemonsets + readiness: + na: false + cellName: Ready + cellExtraName: Desired + validity: + na: false + replicas: + cellCurrentName: Ready + cellDesiredName: Desired + cellAllName: "" + - name: apps/v1/deployments + readiness: + na: false + cellName: Ready + cellExtraName: "" + validity: + na: false + replicas: + cellCurrentName: "" + cellDesiredName: "" + cellAllName: Ready + - name: apps/v1/replicasets + readiness: + na: false + cellName: Current + cellExtraName: Desired + validity: + na: false + replicas: + cellCurrentName: Current + cellDesiredName: Desired + cellAllName: "" + - name: apps/v1/statefulSets + status: + na: false + cellName: Ready + readiness: + na: false + cellName: Ready + cellExtraName: "" + validity: + na: false + replicas: + cellCurrentName: "" + cellDesiredName: "" + cellAllName: Ready + - name: scheduling.k8s.io/v1/priorityclasses + - name: v1/configmaps + - name: v1/persistentvolumeclaims + - name: v1/pods + status: + na: false + cellName: Status + readiness: + na: false + cellName: Ready + cellExtraName: "" + validity: + na: false + matchs: + - cellName: Status + cellValue: Running + replicas: + cellCurrentName: "" + cellDesiredName: "" + cellAllName: Ready + - name: v1/secrets + - name: v1/serviceaccounts + - name: v1/services diff --git a/internal/config/testdata/configs/expected.yaml b/internal/config/testdata/configs/expected.yaml index e85a32f160..f028af37ef 100644 --- a/internal/config/testdata/configs/expected.yaml +++ b/internal/config/testdata/configs/expected.yaml @@ -39,3 +39,74 @@ k9s: memory: critical: 90 warn: 70 + workloadGVRs: + - name: apps/v1/daemonsets + readiness: + na: false + cellName: Ready + cellExtraName: Desired + validity: + na: false + replicas: + cellCurrentName: Ready + cellDesiredName: Desired + cellAllName: "" + - name: apps/v1/deployments + readiness: + na: false + cellName: Ready + cellExtraName: "" + validity: + na: false + replicas: + cellCurrentName: "" + cellDesiredName: "" + cellAllName: Ready + - name: apps/v1/replicasets + readiness: + na: false + cellName: Current + cellExtraName: Desired + validity: + na: false + replicas: + cellCurrentName: Current + cellDesiredName: Desired + cellAllName: "" + - name: apps/v1/statefulSets + status: + na: false + cellName: Ready + readiness: + na: false + cellName: Ready + cellExtraName: "" + validity: + na: false + replicas: + cellCurrentName: "" + cellDesiredName: "" + cellAllName: Ready + - name: scheduling.k8s.io/v1/priorityclasses + - name: v1/configmaps + - name: v1/persistentvolumeclaims + - name: v1/pods + status: + na: false + cellName: Status + readiness: + na: false + cellName: Ready + cellExtraName: "" + validity: + na: false + matchs: + - cellName: Status + cellValue: Running + replicas: + cellCurrentName: "" + cellDesiredName: "" + cellAllName: Ready + - name: v1/secrets + - name: v1/serviceaccounts + - name: v1/services diff --git a/internal/config/testdata/configs/k9s.yaml b/internal/config/testdata/configs/k9s.yaml index 8f3546d357..4cc0515450 100644 --- a/internal/config/testdata/configs/k9s.yaml +++ b/internal/config/testdata/configs/k9s.yaml @@ -39,3 +39,74 @@ k9s: memory: critical: 90 warn: 70 + workloadGVRs: + - name: apps/v1/daemonsets + readiness: + na: false + cellName: Ready + cellExtraName: Desired + validity: + na: false + replicas: + cellCurrentName: Ready + cellDesiredName: Desired + cellAllName: "" + - name: apps/v1/deployments + readiness: + na: false + cellName: Ready + cellExtraName: "" + validity: + na: false + replicas: + cellCurrentName: "" + cellDesiredName: "" + cellAllName: Ready + - name: apps/v1/replicasets + readiness: + na: false + cellName: Current + cellExtraName: Desired + validity: + na: false + replicas: + cellCurrentName: Current + cellDesiredName: Desired + cellAllName: "" + - name: apps/v1/statefulSets + status: + na: false + cellName: Ready + readiness: + na: false + cellName: Ready + cellExtraName: "" + validity: + na: false + replicas: + cellCurrentName: "" + cellDesiredName: "" + cellAllName: Ready + - name: scheduling.k8s.io/v1/priorityclasses + - name: v1/configmaps + - name: v1/persistentvolumeclaims + - name: v1/pods + status: + na: false + cellName: Status + readiness: + na: false + cellName: Ready + cellExtraName: "" + validity: + na: false + matchs: + - cellName: Status + cellValue: Running + replicas: + cellCurrentName: "" + cellDesiredName: "" + cellAllName: Ready + - name: v1/secrets + - name: v1/serviceaccounts + - name: v1/services diff --git a/internal/config/workload.go b/internal/config/workload.go new file mode 100644 index 0000000000..0e11baa064 --- /dev/null +++ b/internal/config/workload.go @@ -0,0 +1,149 @@ +package config + +import ( + "sort" + + "github.com/derailed/k9s/internal/client" +) + +var ( + // defaultGvr represent the default values uses if a custom gvr is set without status, validity or readiness + defaultGvr = WorkloadGVR{ + Status: &GVRStatus{CellName: "Status"}, + Validity: &GVRValidity{Matchs: []Match{{CellName: "Ready", Value: "True"}}}, + Readiness: &GVRReadiness{CellName: "Ready"}, + } + + // defaultConfigGVRs represents the default configurations + defaultConfigGVRs = map[string]WorkloadGVR{ + "apps/v1/deployments": { + Name: "apps/v1/deployments", + Readiness: &GVRReadiness{CellName: "Ready"}, + Validity: &GVRValidity{ + Replicas: Replicas{CellAllName: "Ready"}, + }, + }, + "apps/v1/daemonsets": { + Name: "apps/v1/daemonsets", + Readiness: &GVRReadiness{CellName: "Ready", CellExtraName: "Desired"}, + Validity: &GVRValidity{ + Replicas: Replicas{CellDesiredName: "Desired", CellCurrentName: "Ready"}, + }, + }, + "apps/v1/replicasets": { + Name: "apps/v1/replicasets", + Readiness: &GVRReadiness{CellName: "Current", CellExtraName: "Desired"}, + Validity: &GVRValidity{ + Replicas: Replicas{CellDesiredName: "Desired", CellCurrentName: "Current"}, + }, + }, + "apps/v1/statefulSets": { + Name: "apps/v1/statefulSets", + Status: &GVRStatus{CellName: "Ready"}, + Readiness: &GVRReadiness{CellName: "Ready"}, + Validity: &GVRValidity{ + Replicas: Replicas{CellAllName: "Ready"}, + }, + }, + "scheduling.k8s.io/v1/priorityclasses": {Name: "scheduling.k8s.io/v1/priorityclasses"}, + "v1/configmaps": {Name: "v1/configmaps"}, + "v1/persistentvolumeclaims": {Name: "v1/persistentvolumeclaims"}, + "v1/pods": { + Name: "v1/pods", + Status: &GVRStatus{CellName: "Status"}, + Readiness: &GVRReadiness{CellName: "Ready"}, + Validity: &GVRValidity{ + Matchs: []Match{ + {CellName: "Status", Value: "Running"}, + }, + Replicas: Replicas{CellAllName: "Ready"}, + }, + }, + "v1/secrets": {Name: "v1/secrets"}, + "v1/serviceaccounts": {Name: "v1/serviceaccounts"}, + "v1/services": {Name: "v1/services"}, + } +) + +type CellName string + +type GVRStatus struct { + NA bool `json:"na" yaml:"na"` + CellName CellName `json:"cellName" yaml:"cellName"` +} + +type GVRReadiness struct { + NA bool `json:"na" yaml:"na"` + CellName CellName `json:"cellName" yaml:"cellName"` + CellExtraName CellName `json:"cellExtraName" yaml:"cellExtraName"` +} + +type Match struct { + CellName CellName `json:"cellName" yaml:"cellName"` + Value string `json:"cellValue" yaml:"cellValue"` +} + +type Replicas struct { + CellCurrentName CellName `json:"cellCurrentName" yaml:"cellCurrentName"` + CellDesiredName CellName `json:"cellDesiredName" yaml:"cellDesiredName"` + CellAllName CellName `json:"cellAllName" yaml:"cellAllName"` +} + +type GVRValidity struct { + NA bool `json:"na" yaml:"na"` + Matchs []Match `json:"matchs,omitempty" yaml:"matchs,omitempty"` + Replicas Replicas `json:"replicas" yaml:"replicas"` +} + +type WorkloadGVR struct { + Name string `json:"name" yaml:"name"` + Status *GVRStatus `json:"status,omitempty" yaml:"status,omitempty"` + Readiness *GVRReadiness `json:"readiness,omitempty" yaml:"readiness,omitempty"` + Validity *GVRValidity `json:"validity,omitempty" yaml:"validity,omitempty"` +} + +// NewWorkloadGVRs returns the default GVRs to use if no custom config is set +func NewWorkloadGVRs() []WorkloadGVR { + defaultWorkloadGVRs := make([]WorkloadGVR, 0) + for _, gvr := range defaultConfigGVRs { + defaultWorkloadGVRs = append(defaultWorkloadGVRs, gvr) + } + + sort.Slice(defaultWorkloadGVRs, func(i, j int) bool { + return defaultWorkloadGVRs[i].Name < defaultWorkloadGVRs[j].Name + }) + + return defaultWorkloadGVRs +} + +// GetGVR will return the GVR defined by the WorkloadGVR's name +func (wgvr WorkloadGVR) GetGVR() client.GVR { + return client.NewGVR(wgvr.Name) +} + +// ApplyDefault will complete the GVR with missing values +// If it's an existing GVR's name, it will apply their corresponding default values +// If it's an unknown resources without readiness, status or validity it will use the default ones +func (wkgvr *WorkloadGVR) ApplyDefault() { + // Apply default values + existingGvr, ok := defaultConfigGVRs[wkgvr.Name] + if ok { + wkgvr.applyDefaultValues(existingGvr) + } else { + wkgvr.applyDefaultValues(defaultGvr) + } +} + +func (wkgvr *WorkloadGVR) applyDefaultValues(defaultGVR WorkloadGVR) { + if wkgvr.Status == nil { + wkgvr.Status = defaultGVR.Status + } + + if wkgvr.Readiness == nil { + wkgvr.Readiness = defaultGVR.Readiness + } + + if wkgvr.Validity == nil { + wkgvr.Validity = defaultGVR.Validity + } +} diff --git a/internal/dao/workload.go b/internal/dao/workload.go index 604c6ca9ad..f943185742 100644 --- a/internal/dao/workload.go +++ b/internal/dao/workload.go @@ -13,6 +13,7 @@ import ( "github.com/derailed/k9s/internal" "github.com/derailed/k9s/internal/client" + "github.com/derailed/k9s/internal/config" "github.com/derailed/k9s/internal/render" "github.com/rs/zerolog/log" "k8s.io/apimachinery/pkg/api/meta" @@ -21,23 +22,22 @@ import ( ) const ( - StatusOK = "OK" DegradedStatus = "DEGRADED" + NotAvailable = "n/a" ) var ( - SaGVR = client.NewGVR("v1/serviceaccounts") - PvcGVR = client.NewGVR("v1/persistentvolumeclaims") - PcGVR = client.NewGVR("scheduling.k8s.io/v1/priorityclasses") - CmGVR = client.NewGVR("v1/configmaps") - SecGVR = client.NewGVR("v1/secrets") - PodGVR = client.NewGVR("v1/pods") - SvcGVR = client.NewGVR("v1/services") - DsGVR = client.NewGVR("apps/v1/daemonsets") - StsGVR = client.NewGVR("apps/v1/statefulSets") - DpGVR = client.NewGVR("apps/v1/deployments") - RsGVR = client.NewGVR("apps/v1/replicasets") - resList = []client.GVR{PodGVR, SvcGVR, DsGVR, StsGVR, DpGVR, RsGVR} + SaGVR = client.NewGVR("v1/serviceaccounts") + PvcGVR = client.NewGVR("v1/persistentvolumeclaims") + PcGVR = client.NewGVR("scheduling.k8s.io/v1/priorityclasses") + CmGVR = client.NewGVR("v1/configmaps") + SecGVR = client.NewGVR("v1/secrets") + PodGVR = client.NewGVR("v1/pods") + SvcGVR = client.NewGVR("v1/services") + DsGVR = client.NewGVR("apps/v1/daemonsets") + StsGVR = client.NewGVR("apps/v1/statefulSets") + DpGVR = client.NewGVR("apps/v1/deployments") + RsGVR = client.NewGVR("apps/v1/replicasets") ) // Workload tracks a select set of resources in a given namespace. @@ -80,6 +80,62 @@ func (w *Workload) Delete(ctx context.Context, path string, propagation *metav1. return dial.Namespace(ns).Delete(ctx, n, opts) } +// List fetch workloads. +func (a *Workload) List(ctx context.Context, ns string) ([]runtime.Object, error) { + oo := make([]runtime.Object, 0, 100) + + workloadGVRs, _ := ctx.Value(internal.KeyWorkloadGVRs).([]config.WorkloadGVR) + for i, wkgvr := range workloadGVRs { + // Apply default values + workloadGVRs[i].ApplyDefault() + + table, err := a.fetch(ctx, workloadGVRs[i].GetGVR(), ns) + if err != nil { + log.Warn().Msgf("could not fetch gvr %s: %q", workloadGVRs[i].Name, err) + continue + } + + for _, r := range table.Rows { + ns, ts := a.getNamespaceAndTimestamp(r) + + oo = append(oo, &render.WorkloadRes{Row: metav1.TableRow{Cells: []interface{}{ + workloadGVRs[i].GetGVR().String(), + ns, + r.Cells[indexOf("Name", table.ColumnDefinitions)], + a.getStatus(wkgvr, table.ColumnDefinitions, r.Cells), + a.getReadiness(wkgvr, table.ColumnDefinitions, r.Cells), + a.getValidity(wkgvr, table.ColumnDefinitions, r.Cells), + ts, + }}}) + } + } + + return oo, nil +} + +// getNamespaceAndTimestamp will retrieve the namespace and the timestamp of a given resource +func (a *Workload) getNamespaceAndTimestamp(r metav1.TableRow) (string, metav1.Time) { + var ( + ns string + ts metav1.Time + ) + + if obj := r.Object.Object; obj != nil { + if m, err := meta.Accessor(obj); err == nil { + ns = m.GetNamespace() + ts = m.GetCreationTimestamp() + } + } else { + var m metav1.PartialObjectMetadata + if err := json.Unmarshal(r.Object.Raw, &m); err == nil { + ns = m.GetNamespace() + ts = m.CreationTimestamp + } + } + + return ns, ts +} + func (a *Workload) fetch(ctx context.Context, gvr client.GVR, ns string) (*metav1.Table, error) { a.Table.gvr = gvr oo, err := a.Table.List(ctx, ns) @@ -97,106 +153,136 @@ func (a *Workload) fetch(ctx context.Context, gvr client.GVR, ns string) (*metav return tt, nil } -// List fetch workloads. -func (a *Workload) List(ctx context.Context, ns string) ([]runtime.Object, error) { - oo := make([]runtime.Object, 0, 100) - for _, gvr := range resList { - table, err := a.fetch(ctx, gvr, ns) - if err != nil { - return nil, err - } - var ( - ns string - ts metav1.Time - ) - for _, r := range table.Rows { - if obj := r.Object.Object; obj != nil { - if m, err := meta.Accessor(obj); err == nil { - ns = m.GetNamespace() - ts = m.GetCreationTimestamp() - } - } else { - var m metav1.PartialObjectMetadata - if err := json.Unmarshal(r.Object.Raw, &m); err == nil { - ns = m.GetNamespace() - ts = m.CreationTimestamp - } - } - stat := status(gvr, r, table.ColumnDefinitions) - oo = append(oo, &render.WorkloadRes{Row: metav1.TableRow{Cells: []interface{}{ - gvr.String(), - ns, - r.Cells[indexOf("Name", table.ColumnDefinitions)], - stat, - readiness(gvr, r, table.ColumnDefinitions), - validity(stat), - ts, - }}}) - } +// getStatus will retrieve the status of the resource depending of it's configuration +func (wk *Workload) getStatus(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefinition, cells []interface{}) string { + status := NotAvailable + + if wkgvr.Status == nil || wkgvr.Status.NA { + return status } - return oo, nil + if statusIndex := indexOf(string(wkgvr.Status.CellName), cd); statusIndex != -1 { + status = valueToString(cells[statusIndex]) + + } + + return status } -// Helpers... - -func readiness(gvr client.GVR, r metav1.TableRow, h []metav1.TableColumnDefinition) string { - switch gvr { - case PodGVR, DpGVR, StsGVR: - return r.Cells[indexOf("Ready", h)].(string) - case RsGVR, DsGVR: - c := r.Cells[indexOf("Ready", h)].(int64) - d := r.Cells[indexOf("Desired", h)].(int64) - return fmt.Sprintf("%d/%d", c, d) - case SvcGVR: +// getReadiness will retrieve the readiness of the resource depending of it's configuration +func (wk *Workload) getReadiness(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefinition, cells []interface{}) string { + ready := NotAvailable + + if wkgvr.Readiness == nil || wkgvr.Readiness.NA { + return ready + } + + if readyIndex := indexOf(string(wkgvr.Readiness.CellName), cd); readyIndex != -1 { + ready = valueToString(cells[readyIndex]) + } + + if extrReadyIndex := indexOf(string(wkgvr.Readiness.CellExtraName), cd); extrReadyIndex != -1 { + ready = fmt.Sprintf("%s/%s", ready, valueToString(cells[extrReadyIndex])) + } + + return ready +} + +// getValidity will retrieve the validity of the resource depending of it's configuration +func (wk *Workload) getValidity(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefinition, cells []interface{}) string { + if wkgvr.Validity == nil || wkgvr.Validity.NA { return "" } - return render.NAValue + if validity := getMatchesValidity(wkgvr, cd, cells); validity == DegradedStatus { + return DegradedStatus + } + + if validity := getReplicasValidity(wkgvr, cd, cells); validity == DegradedStatus { + return DegradedStatus + } + + return "" } -func status(gvr client.GVR, r metav1.TableRow, h []metav1.TableColumnDefinition) string { - switch gvr { - case PodGVR: - if status := r.Cells[indexOf("Status", h)]; status == render.PhaseCompleted { - return StatusOK - } else if !isReady(r.Cells[indexOf("Ready", h)].(string)) || status != render.PhaseRunning { - return DegradedStatus +// getMatchesValidity retrieve the validity depending if all the matches are fullfiled or not +func getMatchesValidity(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefinition, cells []interface{}) string { + for _, m := range wkgvr.Validity.Matchs { + v := "" + if matchCellNameIndex := indexOf(string(m.CellName), cd); matchCellNameIndex != -1 { + v = valueToString(cells[matchCellNameIndex]) } - case DpGVR, StsGVR: - if !isReady(r.Cells[indexOf("Ready", h)].(string)) { + + if v != m.Value { return DegradedStatus } - case RsGVR, DsGVR: - rd, ok1 := r.Cells[indexOf("Ready", h)].(int64) - de, ok2 := r.Cells[indexOf("Desired", h)].(int64) - if ok1 && ok2 { - if !isReady(fmt.Sprintf("%d/%d", rd, de)) { - return DegradedStatus - } - break - } - rds, oks1 := r.Cells[indexOf("Ready", h)].(string) - des, oks2 := r.Cells[indexOf("Desired", h)].(string) - if oks1 && oks2 { - if !isReady(fmt.Sprintf("%s/%s", rds, des)) { - return DegradedStatus - } - } - case SvcGVR: - default: - return render.MissingValue + + } + + return "" +} + +// getReplicasValidity returns the validity corresponding of the replicas from 2 cells or a single one +func getReplicasValidity(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefinition, cells []interface{}) string { + if getReplicasGrouped(wkgvr, cd, cells) == DegradedStatus { + return DegradedStatus + } + + if getReplicasSeparated(wkgvr, cd, cells) == DegradedStatus { + return DegradedStatus } - return StatusOK + return "" } -func validity(status string) string { - if status != "DEGRADED" { +// getReplicasGrouped returns the validity corresponding of the replicas from one cell +func getReplicasGrouped(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefinition, cells []interface{}) string { + if wkgvr.Validity.Replicas.CellAllName == "" { return "" } - return status + allCellNameIndex := indexOf(string(wkgvr.Validity.Replicas.CellAllName), cd) + if allCellNameIndex < 0 { + return "" + } + + if !isReady(valueToString(cells[allCellNameIndex])) { + return DegradedStatus + } + + return "" +} + +// getReplicasSeparated returns the validity corresponding of the replicas from 2 cells (current/desired) +func getReplicasSeparated(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefinition, cells []interface{}) string { + if wkgvr.Validity.Replicas.CellCurrentName == "" || wkgvr.Validity.Replicas.CellDesiredName == "" { + return "" + } + + currentIndex := indexOf(string(wkgvr.Validity.Replicas.CellCurrentName), cd) + desiredIndex := indexOf(string(wkgvr.Validity.Replicas.CellDesiredName), cd) + + if currentIndex < 0 || desiredIndex < 0 { + return "" + } + + if !isReady(fmt.Sprintf("%s/%s", valueToString(cells[desiredIndex]), valueToString(cells[currentIndex]))) { + return DegradedStatus + } + + return "" +} + +func valueToString(v interface{}) string { + if sv, ok := v.(string); ok { + return sv + } + + if iv, ok := v.(int64); ok { + return strconv.Itoa(int(iv)) + } + + return "" } func isReady(s string) bool { @@ -222,6 +308,10 @@ func isReady(s string) bool { } func indexOf(n string, defs []metav1.TableColumnDefinition) int { + if n == "" { + return -1 + } + for i, d := range defs { if d.Name == n { return i diff --git a/internal/keys.go b/internal/keys.go index d18bc11d36..1728dc66ac 100644 --- a/internal/keys.go +++ b/internal/keys.go @@ -36,4 +36,5 @@ const ( KeyWait ContextKey = "wait" KeyPodCounting ContextKey = "podCounting" KeyEnableImgScan ContextKey = "vulScan" + KeyWorkloadGVRs ContextKey = "workloadGVRs" ) diff --git a/internal/view/workload.go b/internal/view/workload.go index 78bba38fa2..20857fdcda 100644 --- a/internal/view/workload.go +++ b/internal/view/workload.go @@ -29,6 +29,7 @@ func NewWorkload(gvr client.GVR) ResourceViewer { w := Workload{ ResourceViewer: NewBrowser(gvr), } + w.SetContextFn(w.workloadContext) w.GetTable().SetEnterFn(w.showRes) w.AddBindKeysFn(w.bindKeys) w.GetTable().SetSortCol("KIND", true) @@ -36,6 +37,11 @@ func NewWorkload(gvr client.GVR) ResourceViewer { return &w } +// workloadContext will set the configuration's values of the workloadGVRs in the context to be used in the dao/workload +func (n *Workload) workloadContext(ctx context.Context) context.Context { + return context.WithValue(ctx, internal.KeyWorkloadGVRs, n.App().Config.K9s.WorkloadGVRs) +} + func (w *Workload) bindDangerousKeys(aa *ui.KeyActions) { aa.Bulk(ui.KeyMap{ ui.KeyE: ui.NewKeyActionWithOpts("Edit", w.editCmd,