diff --git a/client/pkg/client/example_test.go b/client/pkg/client/example_test.go index 1bf54d8bf..7f920e738 100644 --- a/client/pkg/client/example_test.go +++ b/client/pkg/client/example_test.go @@ -21,13 +21,13 @@ import ( func Example() { // This example shows how to use Omni client to access resources. - // Setup versions information. You can embed that into `go build` too. + // Setup version information. You can embed this into the binary via `go build -ldflags`. version.Name = "omni" version.SHA = "build SHA" version.Tag = "v0.9.1" - // For this example we will use Omni service account. - // You can create your service account in advance: + // For this example we will use an Omni service account. + // You can create one in advance: // // omnictl serviceaccount create example.account // Created service account "example.account" with public key ID "" @@ -36,29 +36,29 @@ func Example() { // OMNI_ENDPOINT=https://.omni.siderolabs.io:443 // OMNI_SERVICE_ACCOUNT_KEY=base64encodedkey // - // Note: Store the service account key securely, it will not be displayed again + // Note: Store the service account key securely, it will not be displayed again. ctx := context.Background() - // Creating a new client. - client, err := client.New("https://.omni.siderolabs.io:443", client.WithServiceAccount( + // Create a new client. + c, err := client.New("https://.omni.siderolabs.io:443", client.WithServiceAccount( "base64encodedkey", // From the generated service account. )) if err != nil { log.Panicf("failed to create omni client %s", err) } - // Omni service is using COSI https://github.com/cosi-project/runtime/. - // The same client is used to get resources in Talos. - st := client.Omni().State() - defer func() { - if e := client.Close(); e != nil { + if e := c.Close(); e != nil { log.Printf("failed to close client %s", e) } }() - // Getting the resources from the Omni state. + // Omni uses COSI (https://github.com/cosi-project/runtime) for resource management, + // the same runtime used in Talos. + st := c.Omni().State() + + // List all machine statuses. machines, err := safe.StateList[*omni.MachineStatus](ctx, st, omni.NewMachineStatus("").Metadata()) if err != nil { log.Panicf("failed to get machines %s", err) @@ -72,34 +72,25 @@ func Example() { for item := range machines.All() { log.Printf("machine %s, connected: %t", item.Metadata(), item.TypedSpec().Value.GetConnected()) - // Check cluster assignment for a machine. - // Find a machine which is allocated into a cluster for the later use. - if c, ok := item.Metadata().Labels().Get(omni.LabelCluster); ok && machine == nil { - cluster = c + // Find a machine that is allocated to a cluster, for use in the Talos API example below. + if clusterName, ok := item.Metadata().Labels().Get(omni.LabelCluster); ok && machine == nil { + cluster = clusterName machine = item } } - // Creating an empty cluster via template. - // Alternative is to use template.Load to load a cluster template. - template := template.WithCluster("example.cluster") - - if _, err = template.Sync(ctx, st); err != nil { - log.Panicf("failed to sync cluster %s", err) - } + // Create an empty cluster via a template. + // Use template.Load to load a cluster template from YAML instead. + tmpl := template.WithCluster("example.cluster") - if _, err = template.Sync(ctx, st); err != nil { + if _, err = tmpl.Sync(ctx, st); err != nil { log.Panicf("failed to sync cluster %s", err) } - log.Printf("sync cluster") - - // Delete cluster. - if _, err = template.Delete(ctx, st); err != nil { - log.Panicf("failed to delete the cluster %s", err) - } + log.Printf("synced cluster") - if _, err = template.Delete(ctx, st); err != nil { + // Delete the cluster. + if _, err = tmpl.Delete(ctx, st); err != nil { log.Panicf("failed to delete the cluster %s", err) } @@ -112,12 +103,12 @@ func Example() { return } - // Using Talos through Omni. - // Use cluster and machine which we previously found. - cpuInfo, err := client.Talos().WithCluster( + // Use the Talos API through Omni. + // You can use machine UUID as Omni will properly resolve it into machine IP. + cpuInfo, err := c.Talos().WithCluster( cluster, ).WithNodes( - machine.Metadata().ID(), // You can use machine UUID as Omni will properly resolve it into machine IP. + machine.Metadata().ID(), ).CPUInfo(ctx, &emptypb.Empty{}) if err != nil { log.Panicf("failed to read machine CPU info %s", err) @@ -133,9 +124,50 @@ func Example() { } } - // Talking to Omni specific APIs: getting talosconfig. - _, err = client.Management().Talosconfig(ctx) + // Get the talosconfig for the cluster. + _, err = c.Management().Talosconfig(ctx) if err != nil { log.Panicf("failed to get talosconfig %s", err) } + + // Resource management via the COSI state API. + // The following example uses MachineClass as a representative user-managed resource to demonstrate + // create, update, and destroy operations. The same patterns apply to any other user-managed resource type. + // + // MachineClass selects machines using label selector expressions. + // Each entry in MatchLabels is a comma-separated set of conditions (AND within one entry, OR across entries). + machineClass := omni.NewMachineClass("test") + machineClass.Metadata().Labels().Set("my-label", "my-value") + + machineClass.TypedSpec().Value.MatchLabels = []string{ + "omni.sidero.dev/arch = amd64, omni.sidero.dev/cpus > 2", // amd64 machines with more than 2 CPUs + } + + if err = st.Create(ctx, machineClass); err != nil { + log.Panicf("failed to create machine class %s", err) + } + + // Update the machine class: add an OR condition to also match arm64 machines. + updated, err := safe.StateUpdateWithConflicts(ctx, st, machineClass.Metadata(), func(res *omni.MachineClass) error { + res.TypedSpec().Value.MatchLabels = append(res.TypedSpec().Value.MatchLabels, "omni.sidero.dev/arch = arm64") + + return nil + }) + if err != nil { + log.Panicf("failed to update machine class %s", err) + } + + log.Printf("updated machine class labels: %v", updated.TypedSpec().Value.MatchLabels) + + // For upsert (create-or-update) semantics, safe.StateModify or safe.StateModifyWithResult can be used instead. + + // Destroy the machine class. + // + // The correct deletion sequence for COSI resources is: call Teardown, wait until all finalizers are + // cleared (controllers holding them will release upon seeing the teardown phase), then call Destroy. + // Calling only Teardown is not sufficient, and calling Destroy directly without Teardown first will fail + // if any finalizers are present. TeardownAndDestroy handles this full sequence. + if err = st.TeardownAndDestroy(ctx, machineClass.Metadata()); err != nil { + log.Panicf("failed to destroy machine class %s", err) + } }