From 8553b052b5b305549909a3168140077e6101342b Mon Sep 17 00:00:00 2001 From: Thomas Lacroix Date: Wed, 4 Dec 2024 18:58:20 -0500 Subject: [PATCH 01/10] feat: Adding new config for workload view --- internal/config/k9s.go | 52 ++++++------ internal/config/workload.go | 141 ++++++++++++++++++++++++++++++++ internal/dao/workload.go | 156 +++++++++++++++++++++--------------- internal/keys.go | 57 ++++++------- internal/view/workload.go | 6 ++ 5 files changed, 298 insertions(+), 114 deletions(-) create mode 100644 internal/config/workload.go diff --git a/internal/config/k9s.go b/internal/config/k9s.go index 953fb7ca3b..812a628592 100644 --- a/internal/config/k9s.go +++ b/internal/config/k9s.go @@ -18,19 +18,20 @@ import ( // K9s tracks K9s configuration options. type K9s struct { - LiveViewAutoRefresh bool `json:"liveViewAutoRefresh" yaml:"liveViewAutoRefresh"` - ScreenDumpDir string `json:"screenDumpDir" yaml:"screenDumpDir,omitempty"` - RefreshRate int `json:"refreshRate" yaml:"refreshRate"` - MaxConnRetry int `json:"maxConnRetry" yaml:"maxConnRetry"` - ReadOnly bool `json:"readOnly" yaml:"readOnly"` - NoExitOnCtrlC bool `json:"noExitOnCtrlC" yaml:"noExitOnCtrlC"` - UI UI `json:"ui" yaml:"ui"` - SkipLatestRevCheck bool `json:"skipLatestRevCheck" yaml:"skipLatestRevCheck"` - DisablePodCounting bool `json:"disablePodCounting" yaml:"disablePodCounting"` - ShellPod ShellPod `json:"shellPod" yaml:"shellPod"` - ImageScans ImageScans `json:"imageScans" yaml:"imageScans"` - Logger Logger `json:"logger" yaml:"logger"` - Thresholds Threshold `json:"thresholds" yaml:"thresholds"` + LiveViewAutoRefresh bool `json:"liveViewAutoRefresh" yaml:"liveViewAutoRefresh"` + ScreenDumpDir string `json:"screenDumpDir" yaml:"screenDumpDir,omitempty"` + RefreshRate int `json:"refreshRate" yaml:"refreshRate"` + MaxConnRetry int `json:"maxConnRetry" yaml:"maxConnRetry"` + ReadOnly bool `json:"readOnly" yaml:"readOnly"` + NoExitOnCtrlC bool `json:"noExitOnCtrlC" yaml:"noExitOnCtrlC"` + UI UI `json:"ui" yaml:"ui"` + SkipLatestRevCheck bool `json:"skipLatestRevCheck" yaml:"skipLatestRevCheck"` + DisablePodCounting bool `json:"disablePodCounting" yaml:"disablePodCounting"` + ShellPod ShellPod `json:"shellPod" yaml:"shellPod"` + ImageScans ImageScans `json:"imageScans" yaml:"imageScans"` + Logger Logger `json:"logger" yaml:"logger"` + Thresholds Threshold `json:"thresholds" yaml:"thresholds"` + CustomWorkloadGVRs []WorkloadGVR `json:"customWorkloadGVRs,omitempty" yaml:"customWorkloadGVRs,omitempty"` manualRefreshRate int manualHeadless *bool manualLogoless *bool @@ -49,16 +50,17 @@ type K9s struct { // NewK9s create a new K9s configuration. func NewK9s(conn client.Connection, ks data.KubeSettings) *K9s { return &K9s{ - RefreshRate: defaultRefreshRate, - MaxConnRetry: defaultMaxConnRetry, - ScreenDumpDir: AppDumpsDir, - Logger: NewLogger(), - Thresholds: NewThreshold(), - ShellPod: NewShellPod(), - ImageScans: NewImageScans(), - dir: data.NewDir(AppContextsDir), - conn: conn, - ks: ks, + RefreshRate: defaultRefreshRate, + MaxConnRetry: defaultMaxConnRetry, + ScreenDumpDir: AppDumpsDir, + Logger: NewLogger(), + Thresholds: NewThreshold(), + ShellPod: NewShellPod(), + ImageScans: NewImageScans(), + CustomWorkloadGVRs: NewDefaultWorkloadGVRs(), + dir: data.NewDir(AppContextsDir), + conn: conn, + ks: ks, } } @@ -108,6 +110,10 @@ func (k *K9s) Merge(k1 *K9s) { if k1.Thresholds != nil { k.Thresholds = k1.Thresholds } + // TODO: comment, we don't want to update the list if not set, this should use the default values instead + if k1.CustomWorkloadGVRs != nil { + k.CustomWorkloadGVRs = k1.CustomWorkloadGVRs + } } // AppScreenDumpDir fetch screen dumps dir. diff --git a/internal/config/workload.go b/internal/config/workload.go new file mode 100644 index 0000000000..8d8a146a7a --- /dev/null +++ b/internal/config/workload.go @@ -0,0 +1,141 @@ +package config + +import "github.com/derailed/k9s/internal/client" + +var ( + // TODO: Remove that and add it to the doc (with basic example) + // yaml + // customWorkloadGVRs: + // - Name: "" + // Status: + // CellName: "" + // Readiness: + // CellName: "" + // ExtraCellName: "" + // Validity: + // Replicas: + // AllCellName: "" + // CurrentCellName: "" + // DesiredCellName: "" + // Matchs: + // - CellName: "" + // Value : "" + // - CellName: "" + // Value: "" + // ... + + defaultGVRs = map[string]WorkloadGVR{ + "v1/pods": { + Name: "v1/pods", + Status: &WKStatus{CellName: "Status"}, + Readiness: &Readiness{CellName: "Ready"}, + Validity: &Validity{ + Matchs: []Match{ + {CellName: "Status", Value: "Running"}, + }, + Replicas: Replicas{AllCellName: "Ready"}, + }, + }, + "apps/v1/replicasets": { + Name: "apps/v1/replicasets", + Readiness: &Readiness{CellName: "Current", ExtraCellName: "Desired"}, + Validity: &Validity{ + Replicas: Replicas{DesiredCellName: "Desired", CurrentCellName: "Current"}, + }, + }, + "v1/serviceaccounts": {Name: "v1/serviceaccounts"}, + "v1/persistentvolumeclaims": {Name: "v1/persistentvolumeclaims"}, + "scheduling.k8s.io/v1/priorityclasses": {Name: "scheduling.k8s.io/v1/priorityclasses"}, + "v1/configmaps": {Name: "v1/configmaps"}, + "v1/secrets": {Name: "v1/secrets"}, + "v1/services": {Name: "v1/services"}, + "apps/v1/daemonsets": { + Name: "apps/v1/daemonsets", + Readiness: &Readiness{CellName: "Ready", ExtraCellName: "Desired"}, + Validity: &Validity{ + Replicas: Replicas{DesiredCellName: "Desired", CurrentCellName: "Ready"}, + }, + }, + "apps/v1/statefulSets": { + Name: "apps/v1/statefulSets", + Status: &WKStatus{CellName: "Ready"}, + Readiness: &Readiness{CellName: "Ready"}, + Validity: &Validity{ + Replicas: Replicas{AllCellName: "Ready"}, + }, + }, + "apps/v1/deployments": { + Name: "apps/v1/deployments", + Readiness: &Readiness{CellName: "Ready"}, + Validity: &Validity{ + Replicas: Replicas{AllCellName: "Ready"}, + }, + }, + } +) + +// TODO: Rename all fields with better names + +type CellName string + +type WKStatus struct { + CellName CellName `json:"name" yaml:"name"` +} + +type Readiness struct { + CellName CellName `json:"name" yaml:"name"` + ExtraCellName CellName `json:"extra_cell_name" yaml:"extra_cell_name"` +} + +type Match struct { + CellName CellName `json:"name" yaml:"name"` + Value string `json:"value" yaml:"value"` +} + +type Replicas struct { + CurrentCellName CellName `json:"currentName" yaml:"currentName"` + DesiredCellName CellName `json:"desiredName" yaml:"desiredName"` + AllCellName CellName `json:"allName" yaml:"allName"` +} + +type Validity struct { + Matchs []Match `json:"matchs,omitempty" yaml:"matchs,omitempty"` + Replicas Replicas `json:"replicas" yaml:"replicas"` +} + +type WorkloadGVR struct { + Name string `json:"name" yaml:"name"` + Status *WKStatus `json:"status,omitempty" yaml:"status,omitempty"` + Readiness *Readiness `json:"readiness,omitempty" yaml:"readiness,omitempty"` + Validity *Validity `json:"validity,omitempty" yaml:"validity,omitempty"` +} + +// TODO: Find a better name, this only create the default gvr values +func NewDefaultWorkloadGVRs() []WorkloadGVR { + defaultWorkloadGVRs := make([]WorkloadGVR, 0) + for _, gvr := range defaultGVRs { + defaultWorkloadGVRs = append(defaultWorkloadGVRs, gvr) + } + + return defaultWorkloadGVRs +} + +// TODO: Add comment +func (wgvr WorkloadGVR) GetGVR() client.GVR { + return client.NewGVR(wgvr.Name) +} + +// TODO: Add comment, this is applying default value for GVR set partially +func (wkgvr *WorkloadGVR) ApplyDefault() { + if existingGvr, ok := defaultGVRs[wkgvr.Name]; ok { + if wkgvr.Status == nil { + wkgvr.Status = existingGvr.Status + } + if wkgvr.Readiness == nil { + wkgvr.Readiness = existingGvr.Readiness + } + if wkgvr.Validity == nil { + wkgvr.Validity = existingGvr.Validity + } + } +} diff --git a/internal/dao/workload.go b/internal/dao/workload.go index 604c6ca9ad..3d468846b4 100644 --- a/internal/dao/workload.go +++ b/internal/dao/workload.go @@ -13,6 +13,7 @@ import ( "github.com/derailed/k9s/internal" "github.com/derailed/k9s/internal/client" + "github.com/derailed/k9s/internal/config" "github.com/derailed/k9s/internal/render" "github.com/rs/zerolog/log" "k8s.io/apimachinery/pkg/api/meta" @@ -21,23 +22,22 @@ import ( ) const ( - StatusOK = "OK" DegradedStatus = "DEGRADED" + NotAvailable = "n/a" ) var ( - SaGVR = client.NewGVR("v1/serviceaccounts") - PvcGVR = client.NewGVR("v1/persistentvolumeclaims") - PcGVR = client.NewGVR("scheduling.k8s.io/v1/priorityclasses") - CmGVR = client.NewGVR("v1/configmaps") - SecGVR = client.NewGVR("v1/secrets") - PodGVR = client.NewGVR("v1/pods") - SvcGVR = client.NewGVR("v1/services") - DsGVR = client.NewGVR("apps/v1/daemonsets") - StsGVR = client.NewGVR("apps/v1/statefulSets") - DpGVR = client.NewGVR("apps/v1/deployments") - RsGVR = client.NewGVR("apps/v1/replicasets") - resList = []client.GVR{PodGVR, SvcGVR, DsGVR, StsGVR, DpGVR, RsGVR} + SaGVR = client.NewGVR("v1/serviceaccounts") + PvcGVR = client.NewGVR("v1/persistentvolumeclaims") + PcGVR = client.NewGVR("scheduling.k8s.io/v1/priorityclasses") + CmGVR = client.NewGVR("v1/configmaps") + SecGVR = client.NewGVR("v1/secrets") + PodGVR = client.NewGVR("v1/pods") + SvcGVR = client.NewGVR("v1/services") + DsGVR = client.NewGVR("apps/v1/daemonsets") + StsGVR = client.NewGVR("apps/v1/statefulSets") + DpGVR = client.NewGVR("apps/v1/deployments") + RsGVR = client.NewGVR("apps/v1/replicasets") ) // Workload tracks a select set of resources in a given namespace. @@ -100,10 +100,15 @@ func (a *Workload) fetch(ctx context.Context, gvr client.GVR, ns string) (*metav // List fetch workloads. func (a *Workload) List(ctx context.Context, ns string) ([]runtime.Object, error) { oo := make([]runtime.Object, 0, 100) - for _, gvr := range resList { - table, err := a.fetch(ctx, gvr, ns) + + workloadGVRs, _ := ctx.Value(internal.KeyCustomWorkloadGVRs).([]config.WorkloadGVR) + for _, wkgvr := range workloadGVRs { + wkgvr.ApplyDefault() + + table, err := a.fetch(ctx, wkgvr.GetGVR(), ns) if err != nil { - return nil, err + // TODO: Add log, skipping in case the resource doesn't exists on the cluster + continue } var ( ns string @@ -122,14 +127,14 @@ func (a *Workload) List(ctx context.Context, ns string) ([]runtime.Object, error ts = m.CreationTimestamp } } - stat := status(gvr, r, table.ColumnDefinitions) + oo = append(oo, &render.WorkloadRes{Row: metav1.TableRow{Cells: []interface{}{ - gvr.String(), + wkgvr.GetGVR().String(), ns, r.Cells[indexOf("Name", table.ColumnDefinitions)], - stat, - readiness(gvr, r, table.ColumnDefinitions), - validity(stat), + a.getStatus(wkgvr, table.ColumnDefinitions, r.Cells), + a.getReadiness(wkgvr, table.ColumnDefinitions, r.Cells), + a.getValidity(wkgvr, table.ColumnDefinitions, r.Cells), ts, }}}) } @@ -138,65 +143,86 @@ func (a *Workload) List(ctx context.Context, ns string) ([]runtime.Object, error return oo, nil } -// Helpers... +// TODO: getStatus add comment to explain how it retrieve / try to get the status +func (wk *Workload) getStatus(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefinition, cells []interface{}) string { + status := NotAvailable -func readiness(gvr client.GVR, r metav1.TableRow, h []metav1.TableColumnDefinition) string { - switch gvr { - case PodGVR, DpGVR, StsGVR: - return r.Cells[indexOf("Ready", h)].(string) - case RsGVR, DsGVR: - c := r.Cells[indexOf("Ready", h)].(int64) - d := r.Cells[indexOf("Desired", h)].(int64) - return fmt.Sprintf("%d/%d", c, d) - case SvcGVR: - return "" + if wkgvr.Status != nil { + if statusIndex := indexOf(string(wkgvr.Status.CellName), cd); statusIndex != -1 { + status = valueToString(cells[statusIndex]) + } } - return render.NAValue + return status } -func status(gvr client.GVR, r metav1.TableRow, h []metav1.TableColumnDefinition) string { - switch gvr { - case PodGVR: - if status := r.Cells[indexOf("Status", h)]; status == render.PhaseCompleted { - return StatusOK - } else if !isReady(r.Cells[indexOf("Ready", h)].(string)) || status != render.PhaseRunning { - return DegradedStatus +// TODO: getReadiness add comment to explain how it retrieve / try to get the readiness +func (wk *Workload) getReadiness(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefinition, cells []interface{}) string { + ready := NotAvailable + + if wkgvr.Readiness != nil { + if readyIndex := indexOf(string(wkgvr.Readiness.CellName), cd); readyIndex != -1 { + ready = valueToString(cells[readyIndex]) } - case DpGVR, StsGVR: - if !isReady(r.Cells[indexOf("Ready", h)].(string)) { - return DegradedStatus + + if extrReadyIndex := indexOf(string(wkgvr.Readiness.ExtraCellName), cd); extrReadyIndex != -1 { + ready = fmt.Sprintf("%s/%s", ready, valueToString(cells[extrReadyIndex])) + } + } + + return ready +} + +// TODO: getValidity add comment to explain how it retrieve / try to get the validity (to show them as error when doing ctrl+z) +func (wk *Workload) getValidity(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefinition, cells []interface{}) string { + var validity string + + if wkgvr.Validity != nil { + if wkgvr.Validity.Matchs != nil { + for _, m := range wkgvr.Validity.Matchs { + v := "" + if matchCellNameIndex := indexOf(string(m.CellName), cd); matchCellNameIndex != -1 { + v = valueToString(cells[matchCellNameIndex]) + } + + if v != m.Value { + validity = DegradedStatus + } + } } - case RsGVR, DsGVR: - rd, ok1 := r.Cells[indexOf("Ready", h)].(int64) - de, ok2 := r.Cells[indexOf("Desired", h)].(int64) - if ok1 && ok2 { - if !isReady(fmt.Sprintf("%d/%d", rd, de)) { - return DegradedStatus + + if wkgvr.Validity.Replicas.AllCellName != "" { + if allCellNameIndex := indexOf(string(wkgvr.Validity.Replicas.AllCellName), cd); allCellNameIndex != -1 { + if !isReady(valueToString(cells[allCellNameIndex])) { + validity = DegradedStatus + } } - break } - rds, oks1 := r.Cells[indexOf("Ready", h)].(string) - des, oks2 := r.Cells[indexOf("Desired", h)].(string) - if oks1 && oks2 { - if !isReady(fmt.Sprintf("%s/%s", rds, des)) { - return DegradedStatus + + if wkgvr.Validity.Replicas.CurrentCellName != "" && wkgvr.Validity.Replicas.DesiredCellName != "" { + currentIndex := indexOf(string(wkgvr.Validity.Replicas.CurrentCellName), cd) + desiredIndex := indexOf(string(wkgvr.Validity.Replicas.DesiredCellName), cd) + if currentIndex != -1 && desiredIndex != -1 { + if !isReady(fmt.Sprintf("%s/%s", valueToString(cells[desiredIndex]), valueToString(cells[currentIndex]))) { + validity = DegradedStatus + } } } - case SvcGVR: - default: - return render.MissingValue } - return StatusOK + return validity } -func validity(status string) string { - if status != "DEGRADED" { - return "" +func valueToString(v interface{}) string { + if sv, ok := v.(string); ok { + return sv } - return status + if iv, ok := v.(int64); ok { + return strconv.Itoa(int(iv)) + } + + return "" } func isReady(s string) bool { @@ -222,6 +248,10 @@ func isReady(s string) bool { } func indexOf(n string, defs []metav1.TableColumnDefinition) int { + if n == "" { + return -1 + } + for i, d := range defs { if d.Name == n { return i diff --git a/internal/keys.go b/internal/keys.go index d18bc11d36..222a75e0b6 100644 --- a/internal/keys.go +++ b/internal/keys.go @@ -8,32 +8,33 @@ type ContextKey string // A collection of context keys. const ( - KeyFactory ContextKey = "factory" - KeyLabels ContextKey = "labels" - KeyFields ContextKey = "fields" - KeyTable ContextKey = "table" - KeyDir ContextKey = "dir" - KeyPath ContextKey = "path" - KeySubject ContextKey = "subject" - KeyGVR ContextKey = "gvr" - KeyFQN ContextKey = "fqn" - KeyForwards ContextKey = "forwards" - KeyContainers ContextKey = "containers" - KeyBenchCfg ContextKey = "benchcfg" - KeyAliases ContextKey = "aliases" - KeyUID ContextKey = "uid" - KeySubjectKind ContextKey = "subjectKind" - KeySubjectName ContextKey = "subjectName" - KeyNamespace ContextKey = "namespace" - KeyCluster ContextKey = "cluster" - KeyApp ContextKey = "app" - KeyStyles ContextKey = "styles" - KeyMetrics ContextKey = "metrics" - KeyHasMetrics ContextKey = "has-metrics" - KeyToast ContextKey = "toast" - KeyWithMetrics ContextKey = "withMetrics" - KeyViewConfig ContextKey = "viewConfig" - KeyWait ContextKey = "wait" - KeyPodCounting ContextKey = "podCounting" - KeyEnableImgScan ContextKey = "vulScan" + KeyFactory ContextKey = "factory" + KeyLabels ContextKey = "labels" + KeyFields ContextKey = "fields" + KeyTable ContextKey = "table" + KeyDir ContextKey = "dir" + KeyPath ContextKey = "path" + KeySubject ContextKey = "subject" + KeyGVR ContextKey = "gvr" + KeyFQN ContextKey = "fqn" + KeyForwards ContextKey = "forwards" + KeyContainers ContextKey = "containers" + KeyBenchCfg ContextKey = "benchcfg" + KeyAliases ContextKey = "aliases" + KeyUID ContextKey = "uid" + KeySubjectKind ContextKey = "subjectKind" + KeySubjectName ContextKey = "subjectName" + KeyNamespace ContextKey = "namespace" + KeyCluster ContextKey = "cluster" + KeyApp ContextKey = "app" + KeyStyles ContextKey = "styles" + KeyMetrics ContextKey = "metrics" + KeyHasMetrics ContextKey = "has-metrics" + KeyToast ContextKey = "toast" + KeyWithMetrics ContextKey = "withMetrics" + KeyViewConfig ContextKey = "viewConfig" + KeyWait ContextKey = "wait" + KeyPodCounting ContextKey = "podCounting" + KeyEnableImgScan ContextKey = "vulScan" + KeyCustomWorkloadGVRs ContextKey = "customWorkloadGVRs" ) diff --git a/internal/view/workload.go b/internal/view/workload.go index 78bba38fa2..61ccd7b2ff 100644 --- a/internal/view/workload.go +++ b/internal/view/workload.go @@ -32,10 +32,16 @@ func NewWorkload(gvr client.GVR) ResourceViewer { w.GetTable().SetEnterFn(w.showRes) w.AddBindKeysFn(w.bindKeys) w.GetTable().SetSortCol("KIND", true) + w.SetContextFn(w.workloadContext) return &w } +// TODO: workloadContext Add comment, this is actually setting the config workloadGVRs from the config +func (n *Workload) workloadContext(ctx context.Context) context.Context { + return context.WithValue(ctx, internal.KeyCustomWorkloadGVRs, n.App().Config.K9s.CustomWorkloadGVRs) +} + func (w *Workload) bindDangerousKeys(aa *ui.KeyActions) { aa.Bulk(ui.KeyMap{ ui.KeyE: ui.NewKeyActionWithOpts("Edit", w.editCmd, From ff516ca0def84c84eefa0ef1105e67bf2ea2058d Mon Sep 17 00:00:00 2001 From: Thomas Lacroix Date: Wed, 11 Dec 2024 18:11:10 -0500 Subject: [PATCH 02/10] feat: Add config and add some documentation --- README.md | 92 +++++++++++++++++++++++ internal/config/k9s.go | 56 +++++++------- internal/config/workload.go | 144 ++++++++++++++++++------------------ internal/dao/workload.go | 78 +++++++++++-------- internal/keys.go | 58 +++++++-------- internal/view/workload.go | 6 +- 6 files changed, 271 insertions(+), 163 deletions(-) diff --git a/README.md b/README.md index 351dff3169..685ace5cf4 100644 --- a/README.md +++ b/README.md @@ -1056,6 +1056,98 @@ k9s: --- +## Custom Workload View + +You can customize the workload view with CRDs or any resources you want to see on this view. + +To do so, you will need to update your config to add a new field `workloadGVRs` following this pattern: +``` +k9s: + workloadGVRs: + - name: "v1/pods" + - name: "test.com/v1alpha1/myCRD" + status: + cellName: "State" + readiness: + cellName: "Current" + # The cellExtraName will be added as `cellName/cellExtraName` + cellExtraName: "Desired" + validity: + replicas: + cellCurrentName: "Current" + cellDesiredName: "Desired" + matchs: + - cellName: "State" + cellValue: "Ready" + - name: "external-secrets.io/v1beta1/externalsecrets" + status: + na: true + validity: + matchs: + - cellName: Ready + cellValue: True + - cellName: Status + cellValue: SecretSynced +``` +The first one (`v1/pods`) will be recognized by k9s and will set it's default values for the readiness, validity and status. + +The second one (`test.com/v1alpha1/myCRD`) will be an unknown GVR, it will use this configuration to be shown on the workload view. + +The third one (`external-secrets.io/v1beta1/externalsecrets`) will be an unknown GVR, it will use this configuration to be shown on the workload view, but as the readiness is not set, it will use the default values it. About the status, it's set as `na: true` not applicable (for example the secrets does not need a status) + +The default values applied for an unknown GVR are if they are not set and if they are not flagged as not applicable are: +``` + status: + cellName: "Status" + validity: + matchs: + - cellName: "Ready" + cellValue: "True" + readiness: + cellName: "Ready" +``` + +The known GVRs from k9s are: +``` + - v1/pods + - apps/v1/replicasets + - v1/serviceaccounts + - v1/persistentvolumeclaims + - scheduling.k8s.io/v1/priorityclasses + - v1/configmaps + - v1/secrets + - v1/services + - apps/v1/daemonsets + - apps/v1/statefulSets +``` + +The full structure about the configuration is: +``` + workloadGVRs: + - name: string + status: + cellName: string + na: bool + readiness: + cellName: string + cellExtraName: string + na: bool + validity: + matchs: + - cellName: string + cellValue: string + - cellName: string + cellValue: string + ... + replicas: + cellCurrentName: string + cellDesiredName: string + cellAllName: string + na: bool +``` + +--- + ## Contributors Without the contributions from these fine folks, this project would be a total dud! diff --git a/internal/config/k9s.go b/internal/config/k9s.go index 812a628592..f6877cdf42 100644 --- a/internal/config/k9s.go +++ b/internal/config/k9s.go @@ -18,20 +18,21 @@ import ( // K9s tracks K9s configuration options. type K9s struct { - LiveViewAutoRefresh bool `json:"liveViewAutoRefresh" yaml:"liveViewAutoRefresh"` - ScreenDumpDir string `json:"screenDumpDir" yaml:"screenDumpDir,omitempty"` - RefreshRate int `json:"refreshRate" yaml:"refreshRate"` - MaxConnRetry int `json:"maxConnRetry" yaml:"maxConnRetry"` - ReadOnly bool `json:"readOnly" yaml:"readOnly"` - NoExitOnCtrlC bool `json:"noExitOnCtrlC" yaml:"noExitOnCtrlC"` - UI UI `json:"ui" yaml:"ui"` - SkipLatestRevCheck bool `json:"skipLatestRevCheck" yaml:"skipLatestRevCheck"` - DisablePodCounting bool `json:"disablePodCounting" yaml:"disablePodCounting"` - ShellPod ShellPod `json:"shellPod" yaml:"shellPod"` - ImageScans ImageScans `json:"imageScans" yaml:"imageScans"` - Logger Logger `json:"logger" yaml:"logger"` - Thresholds Threshold `json:"thresholds" yaml:"thresholds"` - CustomWorkloadGVRs []WorkloadGVR `json:"customWorkloadGVRs,omitempty" yaml:"customWorkloadGVRs,omitempty"` + LiveViewAutoRefresh bool `json:"liveViewAutoRefresh" yaml:"liveViewAutoRefresh"` + ScreenDumpDir string `json:"screenDumpDir" yaml:"screenDumpDir,omitempty"` + RefreshRate int `json:"refreshRate" yaml:"refreshRate"` + MaxConnRetry int `json:"maxConnRetry" yaml:"maxConnRetry"` + ReadOnly bool `json:"readOnly" yaml:"readOnly"` + NoExitOnCtrlC bool `json:"noExitOnCtrlC" yaml:"noExitOnCtrlC"` + UI UI `json:"ui" yaml:"ui"` + SkipLatestRevCheck bool `json:"skipLatestRevCheck" yaml:"skipLatestRevCheck"` + DisablePodCounting bool `json:"disablePodCounting" yaml:"disablePodCounting"` + ShellPod ShellPod `json:"shellPod" yaml:"shellPod"` + ImageScans ImageScans `json:"imageScans" yaml:"imageScans"` + Logger Logger `json:"logger" yaml:"logger"` + Thresholds Threshold `json:"thresholds" yaml:"thresholds"` + // TODO: Add spec to k9s.json + WorkloadGVRs []WorkloadGVR `yaml:"workloadGVRs,omitempty"` manualRefreshRate int manualHeadless *bool manualLogoless *bool @@ -50,17 +51,17 @@ type K9s struct { // NewK9s create a new K9s configuration. func NewK9s(conn client.Connection, ks data.KubeSettings) *K9s { return &K9s{ - RefreshRate: defaultRefreshRate, - MaxConnRetry: defaultMaxConnRetry, - ScreenDumpDir: AppDumpsDir, - Logger: NewLogger(), - Thresholds: NewThreshold(), - ShellPod: NewShellPod(), - ImageScans: NewImageScans(), - CustomWorkloadGVRs: NewDefaultWorkloadGVRs(), - dir: data.NewDir(AppContextsDir), - conn: conn, - ks: ks, + RefreshRate: defaultRefreshRate, + MaxConnRetry: defaultMaxConnRetry, + ScreenDumpDir: AppDumpsDir, + Logger: NewLogger(), + Thresholds: NewThreshold(), + ShellPod: NewShellPod(), + ImageScans: NewImageScans(), + WorkloadGVRs: NewWorkloadGVRs(), + dir: data.NewDir(AppContextsDir), + conn: conn, + ks: ks, } } @@ -110,9 +111,8 @@ func (k *K9s) Merge(k1 *K9s) { if k1.Thresholds != nil { k.Thresholds = k1.Thresholds } - // TODO: comment, we don't want to update the list if not set, this should use the default values instead - if k1.CustomWorkloadGVRs != nil { - k.CustomWorkloadGVRs = k1.CustomWorkloadGVRs + if k1.WorkloadGVRs != nil { + k.WorkloadGVRs = k1.WorkloadGVRs } } diff --git a/internal/config/workload.go b/internal/config/workload.go index 8d8a146a7a..acae84cb12 100644 --- a/internal/config/workload.go +++ b/internal/config/workload.go @@ -1,46 +1,36 @@ package config -import "github.com/derailed/k9s/internal/client" +import ( + "github.com/derailed/k9s/internal/client" + "github.com/rs/zerolog/log" +) var ( - // TODO: Remove that and add it to the doc (with basic example) - // yaml - // customWorkloadGVRs: - // - Name: "" - // Status: - // CellName: "" - // Readiness: - // CellName: "" - // ExtraCellName: "" - // Validity: - // Replicas: - // AllCellName: "" - // CurrentCellName: "" - // DesiredCellName: "" - // Matchs: - // - CellName: "" - // Value : "" - // - CellName: "" - // Value: "" - // ... + // defaultGvr represent the default values uses if a custom gvr is set without status, validity or readiness + defaultGvr = WorkloadGVR{ + Status: &GVRStatus{CellName: "Status"}, + Validity: &GVRValidity{Matchs: []Match{{CellName: "Ready", Value: "True"}}}, + Readiness: &GVRReadiness{CellName: "Ready"}, + } - defaultGVRs = map[string]WorkloadGVR{ + // defaultConfigGVRs represents the default configurations + defaultConfigGVRs = map[string]WorkloadGVR{ "v1/pods": { Name: "v1/pods", - Status: &WKStatus{CellName: "Status"}, - Readiness: &Readiness{CellName: "Ready"}, - Validity: &Validity{ + Status: &GVRStatus{CellName: "Status"}, + Readiness: &GVRReadiness{CellName: "Ready"}, + Validity: &GVRValidity{ Matchs: []Match{ {CellName: "Status", Value: "Running"}, }, - Replicas: Replicas{AllCellName: "Ready"}, + Replicas: Replicas{CellAllName: "Ready"}, }, }, "apps/v1/replicasets": { Name: "apps/v1/replicasets", - Readiness: &Readiness{CellName: "Current", ExtraCellName: "Desired"}, - Validity: &Validity{ - Replicas: Replicas{DesiredCellName: "Desired", CurrentCellName: "Current"}, + Readiness: &GVRReadiness{CellName: "Current", CellExtraName: "Desired"}, + Validity: &GVRValidity{ + Replicas: Replicas{CellDesiredName: "Desired", CellCurrentName: "Current"}, }, }, "v1/serviceaccounts": {Name: "v1/serviceaccounts"}, @@ -51,91 +41,103 @@ var ( "v1/services": {Name: "v1/services"}, "apps/v1/daemonsets": { Name: "apps/v1/daemonsets", - Readiness: &Readiness{CellName: "Ready", ExtraCellName: "Desired"}, - Validity: &Validity{ - Replicas: Replicas{DesiredCellName: "Desired", CurrentCellName: "Ready"}, + Readiness: &GVRReadiness{CellName: "Ready", CellExtraName: "Desired"}, + Validity: &GVRValidity{ + Replicas: Replicas{CellDesiredName: "Desired", CellCurrentName: "Ready"}, }, }, "apps/v1/statefulSets": { Name: "apps/v1/statefulSets", - Status: &WKStatus{CellName: "Ready"}, - Readiness: &Readiness{CellName: "Ready"}, - Validity: &Validity{ - Replicas: Replicas{AllCellName: "Ready"}, + Status: &GVRStatus{CellName: "Ready"}, + Readiness: &GVRReadiness{CellName: "Ready"}, + Validity: &GVRValidity{ + Replicas: Replicas{CellAllName: "Ready"}, }, }, "apps/v1/deployments": { Name: "apps/v1/deployments", - Readiness: &Readiness{CellName: "Ready"}, - Validity: &Validity{ - Replicas: Replicas{AllCellName: "Ready"}, + Readiness: &GVRReadiness{CellName: "Ready"}, + Validity: &GVRValidity{ + Replicas: Replicas{CellAllName: "Ready"}, }, }, } ) -// TODO: Rename all fields with better names - type CellName string -type WKStatus struct { - CellName CellName `json:"name" yaml:"name"` +type GVRStatus struct { + NA bool `json:"na" yaml:"na"` + CellName CellName `json:"cellName" yaml:"cellName"` } -type Readiness struct { - CellName CellName `json:"name" yaml:"name"` - ExtraCellName CellName `json:"extra_cell_name" yaml:"extra_cell_name"` +type GVRReadiness struct { + NA bool `json:"na" yaml:"na"` + CellName CellName `json:"cellName" yaml:"cellName"` + CellExtraName CellName `json:"cellExtraName" yaml:"cellExtraName"` } type Match struct { - CellName CellName `json:"name" yaml:"name"` - Value string `json:"value" yaml:"value"` + CellName CellName `json:"cellName" yaml:"cellName"` + Value string `json:"cellValue" yaml:"cellValue"` } type Replicas struct { - CurrentCellName CellName `json:"currentName" yaml:"currentName"` - DesiredCellName CellName `json:"desiredName" yaml:"desiredName"` - AllCellName CellName `json:"allName" yaml:"allName"` + CellCurrentName CellName `json:"cellCurrentName" yaml:"cellCurrentName"` + CellDesiredName CellName `json:"cellDesiredName" yaml:"cellDesiredName"` + CellAllName CellName `json:"cellAllName" yaml:"cellAllName"` } -type Validity struct { +type GVRValidity struct { + NA bool `json:"na" yaml:"na"` Matchs []Match `json:"matchs,omitempty" yaml:"matchs,omitempty"` Replicas Replicas `json:"replicas" yaml:"replicas"` } type WorkloadGVR struct { - Name string `json:"name" yaml:"name"` - Status *WKStatus `json:"status,omitempty" yaml:"status,omitempty"` - Readiness *Readiness `json:"readiness,omitempty" yaml:"readiness,omitempty"` - Validity *Validity `json:"validity,omitempty" yaml:"validity,omitempty"` + Name string `json:"name" yaml:"name"` + Status *GVRStatus `json:"status,omitempty" yaml:"status,omitempty"` + Readiness *GVRReadiness `json:"readiness,omitempty" yaml:"readiness,omitempty"` + Validity *GVRValidity `json:"validity,omitempty" yaml:"validity,omitempty"` } -// TODO: Find a better name, this only create the default gvr values -func NewDefaultWorkloadGVRs() []WorkloadGVR { +// NewWorkloadGVRs returns the default GVRs to use if no custom config is set +func NewWorkloadGVRs() []WorkloadGVR { defaultWorkloadGVRs := make([]WorkloadGVR, 0) - for _, gvr := range defaultGVRs { + for _, gvr := range defaultConfigGVRs { defaultWorkloadGVRs = append(defaultWorkloadGVRs, gvr) } return defaultWorkloadGVRs } -// TODO: Add comment +// GetGVR will return the GVR defined by the WorkloadGVR's name func (wgvr WorkloadGVR) GetGVR() client.GVR { return client.NewGVR(wgvr.Name) } -// TODO: Add comment, this is applying default value for GVR set partially +// ApplyDefault will complete the GVR with missing values +// If it's an existing GVR's name, it will apply their corresponding default values +// If it's an unknown resources without readiness, status or validity it will use the default ones func (wkgvr *WorkloadGVR) ApplyDefault() { - if existingGvr, ok := defaultGVRs[wkgvr.Name]; ok { - if wkgvr.Status == nil { - wkgvr.Status = existingGvr.Status - } - if wkgvr.Readiness == nil { - wkgvr.Readiness = existingGvr.Readiness - } - if wkgvr.Validity == nil { - wkgvr.Validity = existingGvr.Validity - } + // Apply default values + existingGvr, ok := defaultConfigGVRs[wkgvr.Name] + if ok { + wkgvr.applyDefaultValues(existingGvr) + } else { + wkgvr.applyDefaultValues(defaultGvr) + } +} + +func (wkgvr *WorkloadGVR) applyDefaultValues(defaultGVR WorkloadGVR) { + if wkgvr.Status == nil { + wkgvr.Status = defaultGVR.Status + } + if wkgvr.Readiness == nil { + wkgvr.Readiness = defaultGVR.Readiness + } + if wkgvr.Validity == nil { + wkgvr.Validity = defaultGVR.Validity } + log.Warn().Msgf("Validity after: %s", wkgvr.Validity) } diff --git a/internal/dao/workload.go b/internal/dao/workload.go index 3d468846b4..36db8f6185 100644 --- a/internal/dao/workload.go +++ b/internal/dao/workload.go @@ -80,36 +80,21 @@ func (w *Workload) Delete(ctx context.Context, path string, propagation *metav1. return dial.Namespace(ns).Delete(ctx, n, opts) } -func (a *Workload) fetch(ctx context.Context, gvr client.GVR, ns string) (*metav1.Table, error) { - a.Table.gvr = gvr - oo, err := a.Table.List(ctx, ns) - if err != nil { - return nil, err - } - if len(oo) == 0 { - return nil, fmt.Errorf("no table found for gvr: %s", gvr) - } - tt, ok := oo[0].(*metav1.Table) - if !ok { - return nil, errors.New("not a metav1.Table") - } - - return tt, nil -} - // List fetch workloads. func (a *Workload) List(ctx context.Context, ns string) ([]runtime.Object, error) { oo := make([]runtime.Object, 0, 100) - workloadGVRs, _ := ctx.Value(internal.KeyCustomWorkloadGVRs).([]config.WorkloadGVR) - for _, wkgvr := range workloadGVRs { - wkgvr.ApplyDefault() + workloadGVRs, _ := ctx.Value(internal.KeyWorkloadGVRs).([]config.WorkloadGVR) + for i, wkgvr := range workloadGVRs { + // Apply default values + workloadGVRs[i].ApplyDefault() - table, err := a.fetch(ctx, wkgvr.GetGVR(), ns) + table, err := a.fetch(ctx, workloadGVRs[i].GetGVR(), ns) if err != nil { - // TODO: Add log, skipping in case the resource doesn't exists on the cluster + log.Warn().Msgf("could not fetch gvr %s: %q", workloadGVRs[i].Name, err) continue } + var ( ns string ts metav1.Time @@ -129,7 +114,7 @@ func (a *Workload) List(ctx context.Context, ns string) ([]runtime.Object, error } oo = append(oo, &render.WorkloadRes{Row: metav1.TableRow{Cells: []interface{}{ - wkgvr.GetGVR().String(), + workloadGVRs[i].GetGVR().String(), ns, r.Cells[indexOf("Name", table.ColumnDefinitions)], a.getStatus(wkgvr, table.ColumnDefinitions, r.Cells), @@ -143,11 +128,32 @@ func (a *Workload) List(ctx context.Context, ns string) ([]runtime.Object, error return oo, nil } -// TODO: getStatus add comment to explain how it retrieve / try to get the status +func (a *Workload) fetch(ctx context.Context, gvr client.GVR, ns string) (*metav1.Table, error) { + a.Table.gvr = gvr + oo, err := a.Table.List(ctx, ns) + if err != nil { + return nil, err + } + if len(oo) == 0 { + return nil, fmt.Errorf("no table found for gvr: %s", gvr) + } + tt, ok := oo[0].(*metav1.Table) + if !ok { + return nil, errors.New("not a metav1.Table") + } + + return tt, nil +} + +// getStatus will retrieve the status of the resource depending of it's configuration func (wk *Workload) getStatus(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefinition, cells []interface{}) string { status := NotAvailable if wkgvr.Status != nil { + if wkgvr.Status.NA { + return status + } + if statusIndex := indexOf(string(wkgvr.Status.CellName), cd); statusIndex != -1 { status = valueToString(cells[statusIndex]) } @@ -156,16 +162,20 @@ func (wk *Workload) getStatus(wkgvr config.WorkloadGVR, cd []metav1.TableColumnD return status } -// TODO: getReadiness add comment to explain how it retrieve / try to get the readiness +// getReadiness will retrieve the readiness of the resource depending of it's configuration func (wk *Workload) getReadiness(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefinition, cells []interface{}) string { ready := NotAvailable if wkgvr.Readiness != nil { + if wkgvr.Readiness.NA { + return ready + } + if readyIndex := indexOf(string(wkgvr.Readiness.CellName), cd); readyIndex != -1 { ready = valueToString(cells[readyIndex]) } - if extrReadyIndex := indexOf(string(wkgvr.Readiness.ExtraCellName), cd); extrReadyIndex != -1 { + if extrReadyIndex := indexOf(string(wkgvr.Readiness.CellExtraName), cd); extrReadyIndex != -1 { ready = fmt.Sprintf("%s/%s", ready, valueToString(cells[extrReadyIndex])) } } @@ -173,11 +183,15 @@ func (wk *Workload) getReadiness(wkgvr config.WorkloadGVR, cd []metav1.TableColu return ready } -// TODO: getValidity add comment to explain how it retrieve / try to get the validity (to show them as error when doing ctrl+z) +// getValidity will retrieve the validity of the resource depending of it's configuration func (wk *Workload) getValidity(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefinition, cells []interface{}) string { var validity string if wkgvr.Validity != nil { + if wkgvr.Validity.NA { + return validity + } + if wkgvr.Validity.Matchs != nil { for _, m := range wkgvr.Validity.Matchs { v := "" @@ -191,17 +205,17 @@ func (wk *Workload) getValidity(wkgvr config.WorkloadGVR, cd []metav1.TableColum } } - if wkgvr.Validity.Replicas.AllCellName != "" { - if allCellNameIndex := indexOf(string(wkgvr.Validity.Replicas.AllCellName), cd); allCellNameIndex != -1 { + if wkgvr.Validity.Replicas.CellAllName != "" { + if allCellNameIndex := indexOf(string(wkgvr.Validity.Replicas.CellAllName), cd); allCellNameIndex != -1 { if !isReady(valueToString(cells[allCellNameIndex])) { validity = DegradedStatus } } } - if wkgvr.Validity.Replicas.CurrentCellName != "" && wkgvr.Validity.Replicas.DesiredCellName != "" { - currentIndex := indexOf(string(wkgvr.Validity.Replicas.CurrentCellName), cd) - desiredIndex := indexOf(string(wkgvr.Validity.Replicas.DesiredCellName), cd) + if wkgvr.Validity.Replicas.CellCurrentName != "" && wkgvr.Validity.Replicas.CellDesiredName != "" { + currentIndex := indexOf(string(wkgvr.Validity.Replicas.CellCurrentName), cd) + desiredIndex := indexOf(string(wkgvr.Validity.Replicas.CellDesiredName), cd) if currentIndex != -1 && desiredIndex != -1 { if !isReady(fmt.Sprintf("%s/%s", valueToString(cells[desiredIndex]), valueToString(cells[currentIndex]))) { validity = DegradedStatus diff --git a/internal/keys.go b/internal/keys.go index 222a75e0b6..1728dc66ac 100644 --- a/internal/keys.go +++ b/internal/keys.go @@ -8,33 +8,33 @@ type ContextKey string // A collection of context keys. const ( - KeyFactory ContextKey = "factory" - KeyLabels ContextKey = "labels" - KeyFields ContextKey = "fields" - KeyTable ContextKey = "table" - KeyDir ContextKey = "dir" - KeyPath ContextKey = "path" - KeySubject ContextKey = "subject" - KeyGVR ContextKey = "gvr" - KeyFQN ContextKey = "fqn" - KeyForwards ContextKey = "forwards" - KeyContainers ContextKey = "containers" - KeyBenchCfg ContextKey = "benchcfg" - KeyAliases ContextKey = "aliases" - KeyUID ContextKey = "uid" - KeySubjectKind ContextKey = "subjectKind" - KeySubjectName ContextKey = "subjectName" - KeyNamespace ContextKey = "namespace" - KeyCluster ContextKey = "cluster" - KeyApp ContextKey = "app" - KeyStyles ContextKey = "styles" - KeyMetrics ContextKey = "metrics" - KeyHasMetrics ContextKey = "has-metrics" - KeyToast ContextKey = "toast" - KeyWithMetrics ContextKey = "withMetrics" - KeyViewConfig ContextKey = "viewConfig" - KeyWait ContextKey = "wait" - KeyPodCounting ContextKey = "podCounting" - KeyEnableImgScan ContextKey = "vulScan" - KeyCustomWorkloadGVRs ContextKey = "customWorkloadGVRs" + KeyFactory ContextKey = "factory" + KeyLabels ContextKey = "labels" + KeyFields ContextKey = "fields" + KeyTable ContextKey = "table" + KeyDir ContextKey = "dir" + KeyPath ContextKey = "path" + KeySubject ContextKey = "subject" + KeyGVR ContextKey = "gvr" + KeyFQN ContextKey = "fqn" + KeyForwards ContextKey = "forwards" + KeyContainers ContextKey = "containers" + KeyBenchCfg ContextKey = "benchcfg" + KeyAliases ContextKey = "aliases" + KeyUID ContextKey = "uid" + KeySubjectKind ContextKey = "subjectKind" + KeySubjectName ContextKey = "subjectName" + KeyNamespace ContextKey = "namespace" + KeyCluster ContextKey = "cluster" + KeyApp ContextKey = "app" + KeyStyles ContextKey = "styles" + KeyMetrics ContextKey = "metrics" + KeyHasMetrics ContextKey = "has-metrics" + KeyToast ContextKey = "toast" + KeyWithMetrics ContextKey = "withMetrics" + KeyViewConfig ContextKey = "viewConfig" + KeyWait ContextKey = "wait" + KeyPodCounting ContextKey = "podCounting" + KeyEnableImgScan ContextKey = "vulScan" + KeyWorkloadGVRs ContextKey = "workloadGVRs" ) diff --git a/internal/view/workload.go b/internal/view/workload.go index 61ccd7b2ff..20857fdcda 100644 --- a/internal/view/workload.go +++ b/internal/view/workload.go @@ -29,17 +29,17 @@ func NewWorkload(gvr client.GVR) ResourceViewer { w := Workload{ ResourceViewer: NewBrowser(gvr), } + w.SetContextFn(w.workloadContext) w.GetTable().SetEnterFn(w.showRes) w.AddBindKeysFn(w.bindKeys) w.GetTable().SetSortCol("KIND", true) - w.SetContextFn(w.workloadContext) return &w } -// TODO: workloadContext Add comment, this is actually setting the config workloadGVRs from the config +// workloadContext will set the configuration's values of the workloadGVRs in the context to be used in the dao/workload func (n *Workload) workloadContext(ctx context.Context) context.Context { - return context.WithValue(ctx, internal.KeyCustomWorkloadGVRs, n.App().Config.K9s.CustomWorkloadGVRs) + return context.WithValue(ctx, internal.KeyWorkloadGVRs, n.App().Config.K9s.WorkloadGVRs) } func (w *Workload) bindDangerousKeys(aa *ui.KeyActions) { From a39390c4eb3c196cf83c86baf2291b7961e2e024 Mon Sep 17 00:00:00 2001 From: Thomas Lacroix Date: Wed, 11 Dec 2024 18:23:47 -0500 Subject: [PATCH 03/10] feat: Refactor getValidity feat: Refactor getValidity feat: refactor the get replicas complexity --- internal/dao/workload.go | 130 ++++++++++++++++++++++++--------------- 1 file changed, 82 insertions(+), 48 deletions(-) diff --git a/internal/dao/workload.go b/internal/dao/workload.go index 36db8f6185..263d6118f1 100644 --- a/internal/dao/workload.go +++ b/internal/dao/workload.go @@ -149,14 +149,13 @@ func (a *Workload) fetch(ctx context.Context, gvr client.GVR, ns string) (*metav func (wk *Workload) getStatus(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefinition, cells []interface{}) string { status := NotAvailable - if wkgvr.Status != nil { - if wkgvr.Status.NA { - return status - } + if wkgvr.Status == nil || wkgvr.Status.NA { + return status + } + + if statusIndex := indexOf(string(wkgvr.Status.CellName), cd); statusIndex != -1 { + status = valueToString(cells[statusIndex]) - if statusIndex := indexOf(string(wkgvr.Status.CellName), cd); statusIndex != -1 { - status = valueToString(cells[statusIndex]) - } } return status @@ -166,18 +165,16 @@ func (wk *Workload) getStatus(wkgvr config.WorkloadGVR, cd []metav1.TableColumnD func (wk *Workload) getReadiness(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefinition, cells []interface{}) string { ready := NotAvailable - if wkgvr.Readiness != nil { - if wkgvr.Readiness.NA { - return ready - } + if wkgvr.Readiness == nil || wkgvr.Readiness.NA { + return ready + } - if readyIndex := indexOf(string(wkgvr.Readiness.CellName), cd); readyIndex != -1 { - ready = valueToString(cells[readyIndex]) - } + if readyIndex := indexOf(string(wkgvr.Readiness.CellName), cd); readyIndex != -1 { + ready = valueToString(cells[readyIndex]) + } - if extrReadyIndex := indexOf(string(wkgvr.Readiness.CellExtraName), cd); extrReadyIndex != -1 { - ready = fmt.Sprintf("%s/%s", ready, valueToString(cells[extrReadyIndex])) - } + if extrReadyIndex := indexOf(string(wkgvr.Readiness.CellExtraName), cd); extrReadyIndex != -1 { + ready = fmt.Sprintf("%s/%s", ready, valueToString(cells[extrReadyIndex])) } return ready @@ -185,46 +182,83 @@ func (wk *Workload) getReadiness(wkgvr config.WorkloadGVR, cd []metav1.TableColu // getValidity will retrieve the validity of the resource depending of it's configuration func (wk *Workload) getValidity(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefinition, cells []interface{}) string { - var validity string + if wkgvr.Validity == nil || wkgvr.Validity.NA { + return "" + } - if wkgvr.Validity != nil { - if wkgvr.Validity.NA { - return validity - } + if validity := getMatchesValidity(wkgvr, cd, cells); validity == DegradedStatus { + return DegradedStatus + } - if wkgvr.Validity.Matchs != nil { - for _, m := range wkgvr.Validity.Matchs { - v := "" - if matchCellNameIndex := indexOf(string(m.CellName), cd); matchCellNameIndex != -1 { - v = valueToString(cells[matchCellNameIndex]) - } + if validity := getReplicasValidity(wkgvr, cd, cells); validity == DegradedStatus { + return DegradedStatus + } - if v != m.Value { - validity = DegradedStatus - } - } - } + return "" +} - if wkgvr.Validity.Replicas.CellAllName != "" { - if allCellNameIndex := indexOf(string(wkgvr.Validity.Replicas.CellAllName), cd); allCellNameIndex != -1 { - if !isReady(valueToString(cells[allCellNameIndex])) { - validity = DegradedStatus - } - } +func getMatchesValidity(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefinition, cells []interface{}) string { + for _, m := range wkgvr.Validity.Matchs { + v := "" + if matchCellNameIndex := indexOf(string(m.CellName), cd); matchCellNameIndex != -1 { + v = valueToString(cells[matchCellNameIndex]) } - if wkgvr.Validity.Replicas.CellCurrentName != "" && wkgvr.Validity.Replicas.CellDesiredName != "" { - currentIndex := indexOf(string(wkgvr.Validity.Replicas.CellCurrentName), cd) - desiredIndex := indexOf(string(wkgvr.Validity.Replicas.CellDesiredName), cd) - if currentIndex != -1 && desiredIndex != -1 { - if !isReady(fmt.Sprintf("%s/%s", valueToString(cells[desiredIndex]), valueToString(cells[currentIndex]))) { - validity = DegradedStatus - } - } + if v != m.Value { + return DegradedStatus } + } - return validity + return "" +} + +func getReplicasValidity(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefinition, cells []interface{}) string { + if getReplicasGrouped(wkgvr, cd, cells) == DegradedStatus { + return DegradedStatus + } + + if getReplicasSeparated(wkgvr, cd, cells) == DegradedStatus { + return DegradedStatus + } + + return "" +} + +func getReplicasGrouped(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefinition, cells []interface{}) string { + if wkgvr.Validity.Replicas.CellAllName == "" { + return "" + } + + allCellNameIndex := indexOf(string(wkgvr.Validity.Replicas.CellAllName), cd) + if allCellNameIndex < 0 { + return "" + } + + if !isReady(valueToString(cells[allCellNameIndex])) { + return DegradedStatus + } + + return "" +} + +func getReplicasSeparated(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefinition, cells []interface{}) string { + if wkgvr.Validity.Replicas.CellCurrentName == "" || wkgvr.Validity.Replicas.CellDesiredName == "" { + return "" + } + + currentIndex := indexOf(string(wkgvr.Validity.Replicas.CellCurrentName), cd) + desiredIndex := indexOf(string(wkgvr.Validity.Replicas.CellDesiredName), cd) + + if currentIndex < 0 || desiredIndex < 0 { + return "" + } + + if !isReady(fmt.Sprintf("%s/%s", valueToString(cells[desiredIndex]), valueToString(cells[currentIndex]))) { + return DegradedStatus + } + + return "" } func valueToString(v interface{}) string { From 2ca9d8463f03bf0481a355aaebe4500c7d53834b Mon Sep 17 00:00:00 2001 From: Thomas Lacroix Date: Thu, 12 Dec 2024 13:05:44 -0500 Subject: [PATCH 04/10] feat: trigger pipeline --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 685ace5cf4..b4c9348e30 100644 --- a/README.md +++ b/README.md @@ -1093,7 +1093,7 @@ The first one (`v1/pods`) will be recognized by k9s and will set it's default va The second one (`test.com/v1alpha1/myCRD`) will be an unknown GVR, it will use this configuration to be shown on the workload view. -The third one (`external-secrets.io/v1beta1/externalsecrets`) will be an unknown GVR, it will use this configuration to be shown on the workload view, but as the readiness is not set, it will use the default values it. About the status, it's set as `na: true` not applicable (for example the secrets does not need a status) +The third one (`external-secrets.io/v1beta1/externalsecrets`) will be an unknown GVR, it will use this configuration to be shown on the workload view, but as the readiness is not set, it will use the default values it. About the status, it's set as `na: true` not applicable (for example the secrets does not need a status). The default values applied for an unknown GVR are if they are not set and if they are not flagged as not applicable are: ``` From 06e66da37dfa80379d16654050a5ea684d155bf1 Mon Sep 17 00:00:00 2001 From: Thomas Lacroix Date: Thu, 12 Dec 2024 15:00:22 -0500 Subject: [PATCH 05/10] feat: refactor workload list --- internal/dao/workload.go | 39 +++++++++++++++++++++++---------------- 1 file changed, 23 insertions(+), 16 deletions(-) diff --git a/internal/dao/workload.go b/internal/dao/workload.go index 263d6118f1..621c0fd7eb 100644 --- a/internal/dao/workload.go +++ b/internal/dao/workload.go @@ -95,23 +95,8 @@ func (a *Workload) List(ctx context.Context, ns string) ([]runtime.Object, error continue } - var ( - ns string - ts metav1.Time - ) for _, r := range table.Rows { - if obj := r.Object.Object; obj != nil { - if m, err := meta.Accessor(obj); err == nil { - ns = m.GetNamespace() - ts = m.GetCreationTimestamp() - } - } else { - var m metav1.PartialObjectMetadata - if err := json.Unmarshal(r.Object.Raw, &m); err == nil { - ns = m.GetNamespace() - ts = m.CreationTimestamp - } - } + ns, ts := a.getNamespaceAndTimestamp(r) oo = append(oo, &render.WorkloadRes{Row: metav1.TableRow{Cells: []interface{}{ workloadGVRs[i].GetGVR().String(), @@ -128,6 +113,28 @@ func (a *Workload) List(ctx context.Context, ns string) ([]runtime.Object, error return oo, nil } +func (a *Workload) getNamespaceAndTimestamp(r metav1.TableRow) (string, metav1.Time) { + var ( + ns string + ts metav1.Time + ) + + if obj := r.Object.Object; obj != nil { + if m, err := meta.Accessor(obj); err == nil { + ns = m.GetNamespace() + ts = m.GetCreationTimestamp() + } + } else { + var m metav1.PartialObjectMetadata + if err := json.Unmarshal(r.Object.Raw, &m); err == nil { + ns = m.GetNamespace() + ts = m.CreationTimestamp + } + } + + return ns, ts +} + func (a *Workload) fetch(ctx context.Context, gvr client.GVR, ns string) (*metav1.Table, error) { a.Table.gvr = gvr oo, err := a.Table.List(ctx, ns) From 9fba1c3e4bb8ba51505d769c17e0441f0c2c2d52 Mon Sep 17 00:00:00 2001 From: Thomas Lacroix Date: Thu, 12 Dec 2024 15:18:02 -0500 Subject: [PATCH 06/10] feat: delete log --- internal/config/workload.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/config/workload.go b/internal/config/workload.go index acae84cb12..c8ef2340c2 100644 --- a/internal/config/workload.go +++ b/internal/config/workload.go @@ -2,7 +2,6 @@ package config import ( "github.com/derailed/k9s/internal/client" - "github.com/rs/zerolog/log" ) var ( @@ -133,11 +132,12 @@ func (wkgvr *WorkloadGVR) applyDefaultValues(defaultGVR WorkloadGVR) { if wkgvr.Status == nil { wkgvr.Status = defaultGVR.Status } + if wkgvr.Readiness == nil { wkgvr.Readiness = defaultGVR.Readiness } + if wkgvr.Validity == nil { wkgvr.Validity = defaultGVR.Validity } - log.Warn().Msgf("Validity after: %s", wkgvr.Validity) } From 4754c2f358ff978128508b8c787e95657b511618 Mon Sep 17 00:00:00 2001 From: Thomas Lacroix Date: Thu, 12 Dec 2024 15:38:00 -0500 Subject: [PATCH 07/10] feat: unblock unit tests (will come back to it) --- internal/config/config_test.go | 3 +++ internal/config/k9s_test.go | 6 ++++++ 2 files changed, 9 insertions(+) diff --git a/internal/config/config_test.go b/internal/config/config_test.go index 3910eb5c74..f61c4514b0 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -54,6 +54,7 @@ func TestConfigSave(t *testing.T) { t.Run(k, func(t *testing.T) { c := mock.NewMockConfig() _, err := c.K9s.ActivateContext(u.ct) + c.K9s.WorkloadGVRs = nil assert.NoError(t, err) if u.flags != nil { c.K9s.Override(u.k9sFlags) @@ -556,6 +557,7 @@ func TestConfigSaveFile(t *testing.T) { cfg.K9s.ReadOnly = true cfg.K9s.Logger.TailCount = 500 cfg.K9s.Logger.BufferSize = 800 + cfg.K9s.WorkloadGVRs = nil cfg.Validate() path := filepath.Join("/tmp", "k9s.yaml") @@ -574,6 +576,7 @@ func TestConfigReset(t *testing.T) { cfg.Validate() path := filepath.Join("/tmp", "k9s.yaml") + cfg.K9s.WorkloadGVRs = nil assert.NoError(t, cfg.SaveFile(path)) bb, err := os.ReadFile(path) diff --git a/internal/config/k9s_test.go b/internal/config/k9s_test.go index d74c59c39d..3e8fc096c5 100644 --- a/internal/config/k9s_test.go +++ b/internal/config/k9s_test.go @@ -98,11 +98,13 @@ func TestK9sMerge(t *testing.T) { ImageScans: config.ImageScans{}, Logger: config.Logger{}, Thresholds: nil, + WorkloadGVRs: nil, }, k2: &config.K9s{ LiveViewAutoRefresh: true, MaxConnRetry: 100, ShellPod: config.NewShellPod(), + WorkloadGVRs: nil, }, ek: &config.K9s{ LiveViewAutoRefresh: true, @@ -118,6 +120,7 @@ func TestK9sMerge(t *testing.T) { ImageScans: config.ImageScans{}, Logger: config.Logger{}, Thresholds: nil, + WorkloadGVRs: nil, }, }, } @@ -126,6 +129,9 @@ func TestK9sMerge(t *testing.T) { u := uu[k] t.Run(k, func(t *testing.T) { u.k1.Merge(u.k2) + + u.ek.WorkloadGVRs = u.k1.WorkloadGVRs + assert.Equal(t, u.ek, u.k1) }) } From a782b70037055a291a3df1b0daa122e2a36e5119 Mon Sep 17 00:00:00 2001 From: Thomas Lacroix Date: Thu, 12 Dec 2024 17:05:05 -0500 Subject: [PATCH 08/10] feat: fix unit tests and add k9s.json new schema --- internal/config/config_test.go | 3 - internal/config/json/schemas/k9s.json | 48 +++++++++++++ internal/config/k9s_test.go | 3 - internal/config/testdata/configs/default.yaml | 71 +++++++++++++++++++ .../config/testdata/configs/expected.yaml | 71 +++++++++++++++++++ internal/config/testdata/configs/k9s.yaml | 71 +++++++++++++++++++ internal/config/workload.go | 48 +++++++------ 7 files changed, 288 insertions(+), 27 deletions(-) diff --git a/internal/config/config_test.go b/internal/config/config_test.go index f61c4514b0..3910eb5c74 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -54,7 +54,6 @@ func TestConfigSave(t *testing.T) { t.Run(k, func(t *testing.T) { c := mock.NewMockConfig() _, err := c.K9s.ActivateContext(u.ct) - c.K9s.WorkloadGVRs = nil assert.NoError(t, err) if u.flags != nil { c.K9s.Override(u.k9sFlags) @@ -557,7 +556,6 @@ func TestConfigSaveFile(t *testing.T) { cfg.K9s.ReadOnly = true cfg.K9s.Logger.TailCount = 500 cfg.K9s.Logger.BufferSize = 800 - cfg.K9s.WorkloadGVRs = nil cfg.Validate() path := filepath.Join("/tmp", "k9s.yaml") @@ -576,7 +574,6 @@ func TestConfigReset(t *testing.T) { cfg.Validate() path := filepath.Join("/tmp", "k9s.yaml") - cfg.K9s.WorkloadGVRs = nil assert.NoError(t, cfg.SaveFile(path)) bb, err := os.ReadFile(path) diff --git a/internal/config/json/schemas/k9s.json b/internal/config/json/schemas/k9s.json index e217bbec29..e14def8b9b 100644 --- a/internal/config/json/schemas/k9s.json +++ b/internal/config/json/schemas/k9s.json @@ -125,6 +125,54 @@ } } } + }, + "workloadGVRs": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "status": { + "type":"object", + "properties": { + "cellName": {"type": "string"}, + "na": {"type": "boolean"} + } + }, + "readiness": { + "type": "object", + "properties": { + "cellName": {"type": "string"}, + "cellExtraName": {"type": "string"}, + "na": {"type": "boolean"} + } + }, + "validity": { + "type": "object", + "properties": { + "matchs": { + "type": "array", + "items": { + "type" :"object", + "properties": { + "cellName": {"type": "string"}, + "cellValue": {"type": "string"} + } + } + }, + "replicas": { + "type": "object", + "properties": { + "cellCurrentName": {"type":"string"}, + "cellDesiredName": {"type":"string"}, + "cellAllName": {"type":"string"} + } + }, + "na": {"type": "boolean"} + } + } + } + } } } } diff --git a/internal/config/k9s_test.go b/internal/config/k9s_test.go index 3e8fc096c5..ab681ca53a 100644 --- a/internal/config/k9s_test.go +++ b/internal/config/k9s_test.go @@ -129,9 +129,6 @@ func TestK9sMerge(t *testing.T) { u := uu[k] t.Run(k, func(t *testing.T) { u.k1.Merge(u.k2) - - u.ek.WorkloadGVRs = u.k1.WorkloadGVRs - assert.Equal(t, u.ek, u.k1) }) } diff --git a/internal/config/testdata/configs/default.yaml b/internal/config/testdata/configs/default.yaml index abf8432ba4..575d14a3d8 100644 --- a/internal/config/testdata/configs/default.yaml +++ b/internal/config/testdata/configs/default.yaml @@ -39,3 +39,74 @@ k9s: memory: critical: 90 warn: 70 + workloadGVRs: + - name: apps/v1/daemonsets + readiness: + na: false + cellName: Ready + cellExtraName: Desired + validity: + na: false + replicas: + cellCurrentName: Ready + cellDesiredName: Desired + cellAllName: "" + - name: apps/v1/deployments + readiness: + na: false + cellName: Ready + cellExtraName: "" + validity: + na: false + replicas: + cellCurrentName: "" + cellDesiredName: "" + cellAllName: Ready + - name: apps/v1/replicasets + readiness: + na: false + cellName: Current + cellExtraName: Desired + validity: + na: false + replicas: + cellCurrentName: Current + cellDesiredName: Desired + cellAllName: "" + - name: apps/v1/statefulSets + status: + na: false + cellName: Ready + readiness: + na: false + cellName: Ready + cellExtraName: "" + validity: + na: false + replicas: + cellCurrentName: "" + cellDesiredName: "" + cellAllName: Ready + - name: scheduling.k8s.io/v1/priorityclasses + - name: v1/configmaps + - name: v1/persistentvolumeclaims + - name: v1/pods + status: + na: false + cellName: Status + readiness: + na: false + cellName: Ready + cellExtraName: "" + validity: + na: false + matchs: + - cellName: Status + cellValue: Running + replicas: + cellCurrentName: "" + cellDesiredName: "" + cellAllName: Ready + - name: v1/secrets + - name: v1/serviceaccounts + - name: v1/services diff --git a/internal/config/testdata/configs/expected.yaml b/internal/config/testdata/configs/expected.yaml index e85a32f160..f028af37ef 100644 --- a/internal/config/testdata/configs/expected.yaml +++ b/internal/config/testdata/configs/expected.yaml @@ -39,3 +39,74 @@ k9s: memory: critical: 90 warn: 70 + workloadGVRs: + - name: apps/v1/daemonsets + readiness: + na: false + cellName: Ready + cellExtraName: Desired + validity: + na: false + replicas: + cellCurrentName: Ready + cellDesiredName: Desired + cellAllName: "" + - name: apps/v1/deployments + readiness: + na: false + cellName: Ready + cellExtraName: "" + validity: + na: false + replicas: + cellCurrentName: "" + cellDesiredName: "" + cellAllName: Ready + - name: apps/v1/replicasets + readiness: + na: false + cellName: Current + cellExtraName: Desired + validity: + na: false + replicas: + cellCurrentName: Current + cellDesiredName: Desired + cellAllName: "" + - name: apps/v1/statefulSets + status: + na: false + cellName: Ready + readiness: + na: false + cellName: Ready + cellExtraName: "" + validity: + na: false + replicas: + cellCurrentName: "" + cellDesiredName: "" + cellAllName: Ready + - name: scheduling.k8s.io/v1/priorityclasses + - name: v1/configmaps + - name: v1/persistentvolumeclaims + - name: v1/pods + status: + na: false + cellName: Status + readiness: + na: false + cellName: Ready + cellExtraName: "" + validity: + na: false + matchs: + - cellName: Status + cellValue: Running + replicas: + cellCurrentName: "" + cellDesiredName: "" + cellAllName: Ready + - name: v1/secrets + - name: v1/serviceaccounts + - name: v1/services diff --git a/internal/config/testdata/configs/k9s.yaml b/internal/config/testdata/configs/k9s.yaml index 8f3546d357..4cc0515450 100644 --- a/internal/config/testdata/configs/k9s.yaml +++ b/internal/config/testdata/configs/k9s.yaml @@ -39,3 +39,74 @@ k9s: memory: critical: 90 warn: 70 + workloadGVRs: + - name: apps/v1/daemonsets + readiness: + na: false + cellName: Ready + cellExtraName: Desired + validity: + na: false + replicas: + cellCurrentName: Ready + cellDesiredName: Desired + cellAllName: "" + - name: apps/v1/deployments + readiness: + na: false + cellName: Ready + cellExtraName: "" + validity: + na: false + replicas: + cellCurrentName: "" + cellDesiredName: "" + cellAllName: Ready + - name: apps/v1/replicasets + readiness: + na: false + cellName: Current + cellExtraName: Desired + validity: + na: false + replicas: + cellCurrentName: Current + cellDesiredName: Desired + cellAllName: "" + - name: apps/v1/statefulSets + status: + na: false + cellName: Ready + readiness: + na: false + cellName: Ready + cellExtraName: "" + validity: + na: false + replicas: + cellCurrentName: "" + cellDesiredName: "" + cellAllName: Ready + - name: scheduling.k8s.io/v1/priorityclasses + - name: v1/configmaps + - name: v1/persistentvolumeclaims + - name: v1/pods + status: + na: false + cellName: Status + readiness: + na: false + cellName: Ready + cellExtraName: "" + validity: + na: false + matchs: + - cellName: Status + cellValue: Running + replicas: + cellCurrentName: "" + cellDesiredName: "" + cellAllName: Ready + - name: v1/secrets + - name: v1/serviceaccounts + - name: v1/services diff --git a/internal/config/workload.go b/internal/config/workload.go index c8ef2340c2..0e11baa064 100644 --- a/internal/config/workload.go +++ b/internal/config/workload.go @@ -1,6 +1,8 @@ package config import ( + "sort" + "github.com/derailed/k9s/internal/client" ) @@ -14,30 +16,13 @@ var ( // defaultConfigGVRs represents the default configurations defaultConfigGVRs = map[string]WorkloadGVR{ - "v1/pods": { - Name: "v1/pods", - Status: &GVRStatus{CellName: "Status"}, + "apps/v1/deployments": { + Name: "apps/v1/deployments", Readiness: &GVRReadiness{CellName: "Ready"}, Validity: &GVRValidity{ - Matchs: []Match{ - {CellName: "Status", Value: "Running"}, - }, Replicas: Replicas{CellAllName: "Ready"}, }, }, - "apps/v1/replicasets": { - Name: "apps/v1/replicasets", - Readiness: &GVRReadiness{CellName: "Current", CellExtraName: "Desired"}, - Validity: &GVRValidity{ - Replicas: Replicas{CellDesiredName: "Desired", CellCurrentName: "Current"}, - }, - }, - "v1/serviceaccounts": {Name: "v1/serviceaccounts"}, - "v1/persistentvolumeclaims": {Name: "v1/persistentvolumeclaims"}, - "scheduling.k8s.io/v1/priorityclasses": {Name: "scheduling.k8s.io/v1/priorityclasses"}, - "v1/configmaps": {Name: "v1/configmaps"}, - "v1/secrets": {Name: "v1/secrets"}, - "v1/services": {Name: "v1/services"}, "apps/v1/daemonsets": { Name: "apps/v1/daemonsets", Readiness: &GVRReadiness{CellName: "Ready", CellExtraName: "Desired"}, @@ -45,6 +30,13 @@ var ( Replicas: Replicas{CellDesiredName: "Desired", CellCurrentName: "Ready"}, }, }, + "apps/v1/replicasets": { + Name: "apps/v1/replicasets", + Readiness: &GVRReadiness{CellName: "Current", CellExtraName: "Desired"}, + Validity: &GVRValidity{ + Replicas: Replicas{CellDesiredName: "Desired", CellCurrentName: "Current"}, + }, + }, "apps/v1/statefulSets": { Name: "apps/v1/statefulSets", Status: &GVRStatus{CellName: "Ready"}, @@ -53,13 +45,23 @@ var ( Replicas: Replicas{CellAllName: "Ready"}, }, }, - "apps/v1/deployments": { - Name: "apps/v1/deployments", + "scheduling.k8s.io/v1/priorityclasses": {Name: "scheduling.k8s.io/v1/priorityclasses"}, + "v1/configmaps": {Name: "v1/configmaps"}, + "v1/persistentvolumeclaims": {Name: "v1/persistentvolumeclaims"}, + "v1/pods": { + Name: "v1/pods", + Status: &GVRStatus{CellName: "Status"}, Readiness: &GVRReadiness{CellName: "Ready"}, Validity: &GVRValidity{ + Matchs: []Match{ + {CellName: "Status", Value: "Running"}, + }, Replicas: Replicas{CellAllName: "Ready"}, }, }, + "v1/secrets": {Name: "v1/secrets"}, + "v1/serviceaccounts": {Name: "v1/serviceaccounts"}, + "v1/services": {Name: "v1/services"}, } ) @@ -107,6 +109,10 @@ func NewWorkloadGVRs() []WorkloadGVR { defaultWorkloadGVRs = append(defaultWorkloadGVRs, gvr) } + sort.Slice(defaultWorkloadGVRs, func(i, j int) bool { + return defaultWorkloadGVRs[i].Name < defaultWorkloadGVRs[j].Name + }) + return defaultWorkloadGVRs } From 1c272f3c8a36aed43669212fcebcb6cabba5cd9d Mon Sep 17 00:00:00 2001 From: Thomas Lacroix Date: Thu, 12 Dec 2024 17:23:56 -0500 Subject: [PATCH 09/10] feat: add comment on the dao/workload --- internal/dao/workload.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/internal/dao/workload.go b/internal/dao/workload.go index 621c0fd7eb..f943185742 100644 --- a/internal/dao/workload.go +++ b/internal/dao/workload.go @@ -113,6 +113,7 @@ func (a *Workload) List(ctx context.Context, ns string) ([]runtime.Object, error return oo, nil } +// getNamespaceAndTimestamp will retrieve the namespace and the timestamp of a given resource func (a *Workload) getNamespaceAndTimestamp(r metav1.TableRow) (string, metav1.Time) { var ( ns string @@ -204,6 +205,7 @@ func (wk *Workload) getValidity(wkgvr config.WorkloadGVR, cd []metav1.TableColum return "" } +// getMatchesValidity retrieve the validity depending if all the matches are fullfiled or not func getMatchesValidity(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefinition, cells []interface{}) string { for _, m := range wkgvr.Validity.Matchs { v := "" @@ -220,6 +222,7 @@ func getMatchesValidity(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefinit return "" } +// getReplicasValidity returns the validity corresponding of the replicas from 2 cells or a single one func getReplicasValidity(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefinition, cells []interface{}) string { if getReplicasGrouped(wkgvr, cd, cells) == DegradedStatus { return DegradedStatus @@ -232,6 +235,7 @@ func getReplicasValidity(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefini return "" } +// getReplicasGrouped returns the validity corresponding of the replicas from one cell func getReplicasGrouped(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefinition, cells []interface{}) string { if wkgvr.Validity.Replicas.CellAllName == "" { return "" @@ -249,6 +253,7 @@ func getReplicasGrouped(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefinit return "" } +// getReplicasSeparated returns the validity corresponding of the replicas from 2 cells (current/desired) func getReplicasSeparated(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefinition, cells []interface{}) string { if wkgvr.Validity.Replicas.CellCurrentName == "" || wkgvr.Validity.Replicas.CellDesiredName == "" { return "" From c8e3d3ace72100bd39809ba48fb3517e1f8837e0 Mon Sep 17 00:00:00 2001 From: Thomas Lacroix Date: Fri, 13 Dec 2024 13:22:54 -0500 Subject: [PATCH 10/10] feat: remove todo --- internal/config/k9s.go | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/internal/config/k9s.go b/internal/config/k9s.go index f6877cdf42..ca3d781bd4 100644 --- a/internal/config/k9s.go +++ b/internal/config/k9s.go @@ -18,21 +18,20 @@ import ( // K9s tracks K9s configuration options. type K9s struct { - LiveViewAutoRefresh bool `json:"liveViewAutoRefresh" yaml:"liveViewAutoRefresh"` - ScreenDumpDir string `json:"screenDumpDir" yaml:"screenDumpDir,omitempty"` - RefreshRate int `json:"refreshRate" yaml:"refreshRate"` - MaxConnRetry int `json:"maxConnRetry" yaml:"maxConnRetry"` - ReadOnly bool `json:"readOnly" yaml:"readOnly"` - NoExitOnCtrlC bool `json:"noExitOnCtrlC" yaml:"noExitOnCtrlC"` - UI UI `json:"ui" yaml:"ui"` - SkipLatestRevCheck bool `json:"skipLatestRevCheck" yaml:"skipLatestRevCheck"` - DisablePodCounting bool `json:"disablePodCounting" yaml:"disablePodCounting"` - ShellPod ShellPod `json:"shellPod" yaml:"shellPod"` - ImageScans ImageScans `json:"imageScans" yaml:"imageScans"` - Logger Logger `json:"logger" yaml:"logger"` - Thresholds Threshold `json:"thresholds" yaml:"thresholds"` - // TODO: Add spec to k9s.json - WorkloadGVRs []WorkloadGVR `yaml:"workloadGVRs,omitempty"` + LiveViewAutoRefresh bool `json:"liveViewAutoRefresh" yaml:"liveViewAutoRefresh"` + ScreenDumpDir string `json:"screenDumpDir" yaml:"screenDumpDir,omitempty"` + RefreshRate int `json:"refreshRate" yaml:"refreshRate"` + MaxConnRetry int `json:"maxConnRetry" yaml:"maxConnRetry"` + ReadOnly bool `json:"readOnly" yaml:"readOnly"` + NoExitOnCtrlC bool `json:"noExitOnCtrlC" yaml:"noExitOnCtrlC"` + UI UI `json:"ui" yaml:"ui"` + SkipLatestRevCheck bool `json:"skipLatestRevCheck" yaml:"skipLatestRevCheck"` + DisablePodCounting bool `json:"disablePodCounting" yaml:"disablePodCounting"` + ShellPod ShellPod `json:"shellPod" yaml:"shellPod"` + ImageScans ImageScans `json:"imageScans" yaml:"imageScans"` + Logger Logger `json:"logger" yaml:"logger"` + Thresholds Threshold `json:"thresholds" yaml:"thresholds"` + WorkloadGVRs []WorkloadGVR `json:"workloadGVRs,omitempty" yaml:"workloadGVRs,omitempty"` manualRefreshRate int manualHeadless *bool manualLogoless *bool