Skip to content

Commit 8184927

Browse files
pranav767smira
authored andcommitted
feat: implement KubeSpan multi-document configuration
Migrate KubeSpan configuration to support multi-document format. Add version-aware support for talosctl cluster create and gen config. Uses multi-doc format for Talos 1.13+, legacy format for 1.12 and earlier. Signed-off-by: Pranav Patil <pranavppatil767@gmail.com> Signed-off-by: Andrey Smirnov <andrey.smirnov@siderolabs.com>
1 parent 4d0604b commit 8184927

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

58 files changed

+1447
-882
lines changed

cmd/talosctl/cmd/mgmt/cluster/create/clusterops/configmaker/internal/makers/common.go

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,6 @@ import (
3030
"github.com/siderolabs/talos/pkg/machinery/config/generate"
3131
"github.com/siderolabs/talos/pkg/machinery/config/machine"
3232
"github.com/siderolabs/talos/pkg/machinery/config/types/siderolink"
33-
"github.com/siderolabs/talos/pkg/machinery/config/types/v1alpha1"
3433
"github.com/siderolabs/talos/pkg/machinery/constants"
3534
"github.com/siderolabs/talos/pkg/provision"
3635
)
@@ -452,9 +451,9 @@ func (m *Maker[T]) initGenOps() error {
452451

453452
if m.Ops.EnableKubeSpan {
454453
genOptions = slices.Concat(genOptions,
455-
[]generate.Option{generate.WithNetworkOptions(
456-
v1alpha1.WithKubeSpan(),
457-
)},
454+
[]generate.Option{
455+
generate.WithKubeSpanEnabled(m.Ops.EnableKubeSpan),
456+
},
458457
)
459458
}
460459

cmd/talosctl/cmd/mgmt/gen/config.go

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,6 @@ import (
2828
"github.com/siderolabs/talos/pkg/machinery/config/generate"
2929
"github.com/siderolabs/talos/pkg/machinery/config/generate/secrets"
3030
"github.com/siderolabs/talos/pkg/machinery/config/machine"
31-
"github.com/siderolabs/talos/pkg/machinery/config/types/v1alpha1"
3231
"github.com/siderolabs/talos/pkg/machinery/constants"
3332
)
3433

@@ -204,11 +203,10 @@ func writeConfig(args []string) error {
204203
genOptions = append(genOptions, generate.WithVersionContract(versionContract))
205204
}
206205

206+
// Add KubeSpan configuration based on version
207207
if genConfigCmdFlags.withKubeSpan {
208208
genOptions = append(genOptions,
209-
generate.WithNetworkOptions(
210-
v1alpha1.WithKubeSpan(),
211-
),
209+
generate.WithKubeSpanEnabled(genConfigCmdFlags.withKubeSpan),
212210
)
213211
}
214212

hack/release.toml

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -118,6 +118,15 @@ It replaces and deprecates the previous method of setting environment variables
118118
Multiple values for the same environment variable will replace previous values, with the last one taking precedence.
119119
120120
To remove an environment variable, remove it from the `EnvironmentConfig` document and restart the node.
121+
"""
122+
123+
[notes.kubespan]
124+
title = "KubeSpan Configuration"
125+
description = """\
126+
A new `KubeSpanConfig` document has been introduced to configure KubeSpan settings.
127+
It replaces and deprecates the previous method of configuring KubeSpan via the `.machine.network.kubespan` field.
128+
129+
The old configuration field will continue to work for backward compatibility.
121130
"""
122131

123132
[make_deps]

internal/app/machined/pkg/controllers/etcd/config_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -195,7 +195,7 @@ func (suite *ConfigSuite) TestReconcile() {
195195
},
196196
MachineConfig: &v1alpha1.MachineConfig{
197197
MachineType: "controlplane",
198-
MachineNetwork: &v1alpha1.NetworkConfig{
198+
MachineNetwork: &v1alpha1.NetworkConfig{ //nolint:staticcheck // legacy config
199199
NetworkInterfaces: tt.networkConfig,
200200
},
201201
},

internal/app/machined/pkg/controllers/k8s/nodeip_config_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ func (suite *NodeIPConfigSuite) TestReconcileWithSubnets() {
4141
KubeletNodeIPValidSubnets: []string{"10.0.0.0/24"},
4242
},
4343
},
44-
MachineNetwork: &v1alpha1.NetworkConfig{
44+
MachineNetwork: &v1alpha1.NetworkConfig{ //nolint:staticcheck // legacy controller
4545
NetworkInterfaces: []*v1alpha1.Device{
4646
{
4747
DeviceVIPConfig: &v1alpha1.DeviceVIPConfig{

internal/app/machined/pkg/controllers/kubespan/config.go

Lines changed: 12 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -43,14 +43,20 @@ func NewConfigController() *ConfigController {
4343
if cfg != nil && cfg.Config().Machine() != nil {
4444
c := cfg.Config()
4545

46-
res.TypedSpec().Enabled = c.Machine().Network().KubeSpan().Enabled()
46+
if c.NetworkKubeSpanConfig() != nil {
47+
res.TypedSpec().Enabled = c.NetworkKubeSpanConfig().Enabled()
48+
res.TypedSpec().ForceRouting = c.NetworkKubeSpanConfig().ForceRouting()
49+
res.TypedSpec().AdvertiseKubernetesNetworks = c.NetworkKubeSpanConfig().AdvertiseKubernetesNetworks()
50+
res.TypedSpec().HarvestExtraEndpoints = c.NetworkKubeSpanConfig().HarvestExtraEndpoints()
51+
res.TypedSpec().MTU = c.NetworkKubeSpanConfig().MTU()
52+
53+
if c.NetworkKubeSpanConfig().Filters() != nil {
54+
res.TypedSpec().EndpointFilters = c.NetworkKubeSpanConfig().Filters().Endpoints()
55+
}
56+
}
57+
4758
res.TypedSpec().ClusterID = c.Cluster().ID()
4859
res.TypedSpec().SharedSecret = c.Cluster().Secret()
49-
res.TypedSpec().ForceRouting = c.Machine().Network().KubeSpan().ForceRouting()
50-
res.TypedSpec().AdvertiseKubernetesNetworks = c.Machine().Network().KubeSpan().AdvertiseKubernetesNetworks()
51-
res.TypedSpec().HarvestExtraEndpoints = c.Machine().Network().KubeSpan().HarvestExtraEndpoints()
52-
res.TypedSpec().MTU = c.Machine().Network().KubeSpan().MTU()
53-
res.TypedSpec().EndpointFilters = c.Machine().Network().KubeSpan().Filters().Endpoints()
5460
res.TypedSpec().ExtraEndpoints = c.KubespanConfig().ExtraAnnouncedEndpoints()
5561
}
5662

internal/app/machined/pkg/controllers/kubespan/config_test.go

Lines changed: 62 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -9,11 +9,11 @@ import (
99
"testing"
1010
"time"
1111

12-
"github.com/cosi-project/runtime/pkg/resource"
1312
"github.com/siderolabs/go-pointer"
14-
"github.com/siderolabs/go-retry/retry"
13+
"github.com/stretchr/testify/assert"
1514
"github.com/stretchr/testify/suite"
1615

16+
"github.com/siderolabs/talos/internal/app/machined/pkg/controllers/ctest"
1717
kubespanctrl "github.com/siderolabs/talos/internal/app/machined/pkg/controllers/kubespan"
1818
"github.com/siderolabs/talos/pkg/machinery/config/container"
1919
"github.com/siderolabs/talos/pkg/machinery/config/types/network"
@@ -23,20 +23,16 @@ import (
2323
)
2424

2525
type ConfigSuite struct {
26-
KubeSpanSuite
26+
ctest.DefaultSuite
2727
}
2828

2929
func (suite *ConfigSuite) TestReconcileConfig() {
30-
suite.Require().NoError(suite.runtime.RegisterController(kubespanctrl.NewConfigController()))
31-
32-
suite.startRuntime()
33-
3430
ctr, err := container.New(
3531
&v1alpha1.Config{
3632
ConfigVersion: "v1alpha1",
3733
MachineConfig: &v1alpha1.MachineConfig{
38-
MachineNetwork: &v1alpha1.NetworkConfig{
39-
NetworkKubeSpan: &v1alpha1.NetworkKubeSpan{
34+
MachineNetwork: &v1alpha1.NetworkConfig{ //nolint:staticcheck // legacy config
35+
NetworkKubeSpan: &v1alpha1.NetworkKubeSpan{ //nolint:staticcheck // legacy config
4036
KubeSpanEnabled: pointer.To(true),
4137
},
4238
},
@@ -54,65 +50,83 @@ func (suite *ConfigSuite) TestReconcileConfig() {
5450
)
5551
suite.Require().NoError(err)
5652

57-
cfg := config.NewMachineConfig(ctr)
58-
59-
suite.Require().NoError(suite.state.Create(suite.ctx, cfg))
53+
suite.Create(config.NewMachineConfig(ctr))
6054

61-
specMD := resource.NewMetadata(config.NamespaceName, kubespan.ConfigType, kubespan.ConfigID, resource.VersionUndefined)
55+
ctest.AssertResource(suite, kubespan.ConfigID, func(res *kubespan.Config, asrt *assert.Assertions) {
56+
spec := res.TypedSpec()
6257

63-
suite.Assert().NoError(retry.Constant(3*time.Second, retry.WithUnits(100*time.Millisecond)).Retry(
64-
suite.assertResource(
65-
specMD,
66-
func(res resource.Resource) error {
67-
spec := res.(*kubespan.Config).TypedSpec()
68-
69-
suite.Assert().True(spec.Enabled)
70-
suite.Assert().Equal("8XuV9TZHW08DOk3bVxQjH9ih_TBKjnh-j44tsCLSBzo=", spec.ClusterID)
71-
suite.Assert().Equal("I+1In7fLnpcRIjUmEoeugZnSyFoTF6MztLxICL5Yu0s=", spec.SharedSecret)
72-
suite.Assert().True(spec.ForceRouting)
73-
suite.Assert().False(spec.AdvertiseKubernetesNetworks)
74-
suite.Assert().False(spec.HarvestExtraEndpoints)
75-
suite.Assert().Equal("[\"192.168.33.11:1001\"]", fmt.Sprintf("%q", spec.ExtraEndpoints))
76-
77-
return nil
78-
},
79-
),
80-
))
58+
asrt.True(spec.Enabled)
59+
asrt.Equal("8XuV9TZHW08DOk3bVxQjH9ih_TBKjnh-j44tsCLSBzo=", spec.ClusterID)
60+
asrt.Equal("I+1In7fLnpcRIjUmEoeugZnSyFoTF6MztLxICL5Yu0s=", spec.SharedSecret)
61+
asrt.True(spec.ForceRouting)
62+
asrt.False(spec.AdvertiseKubernetesNetworks)
63+
asrt.False(spec.HarvestExtraEndpoints)
64+
asrt.Equal("[\"192.168.33.11:1001\"]", fmt.Sprintf("%q", spec.ExtraEndpoints))
65+
})
8166
}
8267

8368
func (suite *ConfigSuite) TestReconcileDisabled() {
84-
suite.Require().NoError(suite.runtime.RegisterController(kubespanctrl.NewConfigController()))
85-
86-
suite.startRuntime()
87-
8869
cfg := config.NewMachineConfig(
8970
container.NewV1Alpha1(
9071
&v1alpha1.Config{
9172
ConfigVersion: "v1alpha1",
9273
MachineConfig: &v1alpha1.MachineConfig{},
9374
ClusterConfig: &v1alpha1.ClusterConfig{},
9475
}))
76+
suite.Create(cfg)
9577

96-
suite.Require().NoError(suite.state.Create(suite.ctx, cfg))
97-
98-
specMD := resource.NewMetadata(config.NamespaceName, kubespan.ConfigType, kubespan.ConfigID, resource.VersionUndefined)
78+
ctest.AssertResource(suite, kubespan.ConfigID, func(res *kubespan.Config, asrt *assert.Assertions) {
79+
spec := res.TypedSpec()
9980

100-
suite.Assert().NoError(retry.Constant(3*time.Second, retry.WithUnits(100*time.Millisecond)).Retry(
101-
suite.assertResource(
102-
specMD,
103-
func(res resource.Resource) error {
104-
spec := res.(*kubespan.Config).TypedSpec()
81+
asrt.False(spec.Enabled)
82+
})
83+
}
10584

106-
suite.Assert().False(spec.Enabled)
85+
func (suite *ConfigSuite) TestReconcileMultiDoc() {
86+
kubeSpanCfg := network.NewKubeSpanV1Alpha1()
87+
kubeSpanCfg.ConfigEnabled = pointer.To(true)
88+
kubeSpanCfg.ConfigMTU = pointer.To(uint32(1380))
89+
kubeSpanCfg.ConfigFilters = &network.KubeSpanFiltersConfig{
90+
ConfigEndpoints: []string{"0.0.0.0/0", "::/0"},
91+
}
10792

108-
return nil
93+
ctr, err := container.New(
94+
&v1alpha1.Config{
95+
ConfigVersion: "v1alpha1",
96+
MachineConfig: &v1alpha1.MachineConfig{},
97+
ClusterConfig: &v1alpha1.ClusterConfig{
98+
ClusterID: "test-cluster-id-multi-doc",
99+
ClusterSecret: "test-cluster-secret-multi-doc",
109100
},
110-
),
111-
))
101+
},
102+
kubeSpanCfg,
103+
)
104+
suite.Require().NoError(err)
105+
106+
suite.Create(config.NewMachineConfig(ctr))
107+
108+
ctest.AssertResource(suite, kubespan.ConfigID,
109+
func(res *kubespan.Config, asrt *assert.Assertions) {
110+
spec := res.TypedSpec()
111+
112+
asrt.True(spec.Enabled)
113+
asrt.Equal("test-cluster-id-multi-doc", spec.ClusterID)
114+
asrt.Equal("test-cluster-secret-multi-doc", spec.SharedSecret)
115+
asrt.Equal(uint32(1380), spec.MTU)
116+
asrt.Equal([]string{"0.0.0.0/0", "::/0"}, spec.EndpointFilters)
117+
},
118+
)
112119
}
113120

114121
func TestConfigSuite(t *testing.T) {
115122
t.Parallel()
116123

117-
suite.Run(t, new(ConfigSuite))
124+
suite.Run(t, &ConfigSuite{
125+
DefaultSuite: ctest.DefaultSuite{
126+
Timeout: 5 * time.Second,
127+
AfterSetup: func(suite *ctest.DefaultSuite) {
128+
suite.Require().NoError(suite.Runtime().RegisterController(kubespanctrl.NewConfigController()))
129+
},
130+
},
131+
})
118132
}

internal/app/machined/pkg/controllers/kubespan/endpoint_test.go

Lines changed: 27 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -8,10 +8,10 @@ import (
88
"testing"
99
"time"
1010

11-
"github.com/cosi-project/runtime/pkg/resource"
12-
"github.com/siderolabs/go-retry/retry"
11+
"github.com/stretchr/testify/assert"
1312
"github.com/stretchr/testify/suite"
1413

14+
"github.com/siderolabs/talos/internal/app/machined/pkg/controllers/ctest"
1515
kubespanctrl "github.com/siderolabs/talos/internal/app/machined/pkg/controllers/kubespan"
1616
"github.com/siderolabs/talos/pkg/machinery/config/machine"
1717
"github.com/siderolabs/talos/pkg/machinery/resources/cluster"
@@ -20,17 +20,13 @@ import (
2020
)
2121

2222
type EndpointSuite struct {
23-
KubeSpanSuite
23+
ctest.DefaultSuite
2424
}
2525

2626
func (suite *EndpointSuite) TestReconcile() {
27-
suite.Require().NoError(suite.runtime.RegisterController(&kubespanctrl.EndpointController{}))
28-
29-
suite.startRuntime()
30-
3127
cfg := kubespan.NewConfig(config.NamespaceName, kubespan.ConfigID)
3228
cfg.TypedSpec().HarvestExtraEndpoints = true
33-
suite.Require().NoError(suite.state.Create(suite.ctx, cfg))
29+
suite.Create(cfg)
3430

3531
// create some affiliates and peer statuses
3632
affiliate1 := cluster.NewAffiliate(cluster.NamespaceName, "7x1SuC8Ege5BGXdAfTEff5iQnlWZLfv9h1LGMxA2pYkC")
@@ -59,8 +55,8 @@ func (suite *EndpointSuite) TestReconcile() {
5955
},
6056
}
6157

62-
suite.Require().NoError(suite.state.Create(suite.ctx, affiliate1))
63-
suite.Require().NoError(suite.state.Create(suite.ctx, affiliate2))
58+
suite.Create(affiliate1)
59+
suite.Create(affiliate2)
6460

6561
peerStatus1 := kubespan.NewPeerStatus(kubespan.NamespaceName, affiliate1.TypedSpec().KubeSpan.PublicKey)
6662
*peerStatus1.TypedSpec() = kubespan.PeerStatusSpec{
@@ -80,57 +76,36 @@ func (suite *EndpointSuite) TestReconcile() {
8076
State: kubespan.PeerStateUp,
8177
}
8278

83-
suite.Require().NoError(suite.state.Create(suite.ctx, peerStatus1))
84-
suite.Require().NoError(suite.state.Create(suite.ctx, peerStatus2))
85-
suite.Require().NoError(suite.state.Create(suite.ctx, peerStatus3))
79+
suite.Create(peerStatus1)
80+
suite.Create(peerStatus2)
81+
suite.Create(peerStatus3)
8682

8783
// peer1 is up and has matching affiliate
88-
suite.Assert().NoError(retry.Constant(3*time.Second, retry.WithUnits(100*time.Millisecond)).Retry(
89-
suite.assertResource(
90-
resource.NewMetadata(
91-
kubespan.NamespaceName,
92-
kubespan.EndpointType,
93-
peerStatus1.Metadata().ID(),
94-
resource.VersionUndefined,
95-
),
96-
func(res resource.Resource) error {
97-
spec := res.(*kubespan.Endpoint).TypedSpec()
98-
99-
suite.Assert().Equal(peerStatus1.TypedSpec().Endpoint, spec.Endpoint)
100-
suite.Assert().Equal(affiliate1.TypedSpec().NodeID, spec.AffiliateID)
101-
102-
return nil
103-
},
104-
),
105-
))
84+
ctest.AssertResource(suite, peerStatus1.Metadata().ID(),
85+
func(res *kubespan.Endpoint, asrt *assert.Assertions) {
86+
spec := res.TypedSpec()
87+
88+
asrt.Equal(peerStatus1.TypedSpec().Endpoint, spec.Endpoint)
89+
asrt.Equal(affiliate1.TypedSpec().NodeID, spec.AffiliateID)
90+
},
91+
)
10692

10793
// peer2 is not up, it shouldn't be published as an endpoint
108-
suite.Assert().NoError(retry.Constant(3*time.Second, retry.WithUnits(100*time.Millisecond)).Retry(
109-
suite.assertNoResource(
110-
resource.NewMetadata(
111-
kubespan.NamespaceName,
112-
kubespan.EndpointType,
113-
peerStatus2.Metadata().ID(),
114-
resource.VersionUndefined,
115-
),
116-
),
117-
))
94+
ctest.AssertNoResource[*kubespan.Endpoint](suite, peerStatus2.Metadata().ID())
11895

11996
// peer3 is up, but has not matching affiliate
120-
suite.Assert().NoError(retry.Constant(3*time.Second, retry.WithUnits(100*time.Millisecond)).Retry(
121-
suite.assertNoResource(
122-
resource.NewMetadata(
123-
kubespan.NamespaceName,
124-
kubespan.EndpointType,
125-
peerStatus3.Metadata().ID(),
126-
resource.VersionUndefined,
127-
),
128-
),
129-
))
97+
ctest.AssertNoResource[*kubespan.Endpoint](suite, peerStatus3.Metadata().ID())
13098
}
13199

132100
func TestEndpointSuite(t *testing.T) {
133101
t.Parallel()
134102

135-
suite.Run(t, new(EndpointSuite))
103+
suite.Run(t, &EndpointSuite{
104+
DefaultSuite: ctest.DefaultSuite{
105+
Timeout: 5 * time.Second,
106+
AfterSetup: func(suite *ctest.DefaultSuite) {
107+
suite.Require().NoError(suite.Runtime().RegisterController(&kubespanctrl.EndpointController{}))
108+
},
109+
},
110+
})
136111
}

0 commit comments

Comments
 (0)