Skip to content

Commit b0a8ba6

Browse files
committed
UPSTREAM: <carry>: kubelet/cm: move CPU reading from cm to cm/cpumanager
Adapted from kubernetes@77d03e4 to avoid merge conflict This can be dropped in the 4.19 rebase Authored-by: Francesco Romani <fromani@redhat.com> Signed-off-by: Peter Hunt <pehunt@redhat.com>
1 parent 433c6c3 commit b0a8ba6

File tree

5 files changed

+29
-41
lines changed

5 files changed

+29
-41
lines changed

pkg/kubelet/cm/container_manager_linux.go

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,6 @@ import (
3434
"github.com/opencontainers/runc/libcontainer/configs"
3535
"k8s.io/klog/v2"
3636
"k8s.io/mount-utils"
37-
"k8s.io/utils/cpuset"
3837
utilpath "k8s.io/utils/path"
3938

4039
libcontaineruserns "github.com/opencontainers/runc/libcontainer/userns"
@@ -132,10 +131,6 @@ type containerManagerImpl struct {
132131
topologyManager topologymanager.Manager
133132
// Interface for Dynamic Resource Allocation management.
134133
draManager dra.Manager
135-
// The full set of CPUs on the node. This field is set lazily, and is used to make sure
136-
// the `cpuset` cgroup hierarchy is created on cgroup v2 when cpumanager is using a
137-
// None policy.
138-
allCPUs cpuset.CPUSet
139134
}
140135

141136
type features struct {

pkg/kubelet/cm/cpumanager/cpu_manager.go

Lines changed: 19 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,10 @@ type Manager interface {
9494
// GetCPUAffinity returns cpuset which includes cpus from shared pools
9595
// as well as exclusively allocated cpus
9696
GetCPUAffinity(podUID, containerName string) cpuset.CPUSet
97+
98+
// GetAllCPUs returns all the CPUs known by cpumanager, as reported by the
99+
// hardware discovery. Maps to the CPU capacity.
100+
GetAllCPUs() cpuset.CPUSet
97101
}
98102

99103
type manager struct {
@@ -137,7 +141,11 @@ type manager struct {
137141
// stateFileDirectory holds the directory where the state file for checkpoints is held.
138142
stateFileDirectory string
139143

140-
// allocatableCPUs is the set of online CPUs as reported by the system
144+
// allCPUs is the set of online CPUs as reported by the system
145+
allCPUs cpuset.CPUSet
146+
147+
// allocatableCPUs is the set of online CPUs as reported by the system,
148+
// and available for allocation, minus the reserved set
141149
allocatableCPUs cpuset.CPUSet
142150

143151
// pendingAdmissionPod contain the pod during the admission phase
@@ -157,6 +165,11 @@ func NewManager(cpuPolicyName string, cpuPolicyOptions map[string]string, reconc
157165
var policy Policy
158166
var err error
159167

168+
topo, err = topology.Discover(machineInfo)
169+
if err != nil {
170+
return nil, err
171+
}
172+
160173
switch policyName(cpuPolicyName) {
161174

162175
case PolicyNone:
@@ -166,10 +179,6 @@ func NewManager(cpuPolicyName string, cpuPolicyOptions map[string]string, reconc
166179
}
167180

168181
case PolicyStatic:
169-
topo, err = topology.Discover(machineInfo)
170-
if err != nil {
171-
return nil, err
172-
}
173182
klog.InfoS("Detected CPU topology", "topology", topo)
174183

175184
reservedCPUs, ok := nodeAllocatableReservation[v1.ResourceCPU]
@@ -206,6 +215,7 @@ func NewManager(cpuPolicyName string, cpuPolicyOptions map[string]string, reconc
206215
topology: topo,
207216
nodeAllocatableReservation: nodeAllocatableReservation,
208217
stateFileDirectory: stateFileDirectory,
218+
allCPUs: topo.CPUDetails.CPUs(),
209219
}
210220
manager.sourcesReady = &sourcesReadyStub{}
211221
return manager, nil
@@ -340,6 +350,10 @@ func (m *manager) GetAllocatableCPUs() cpuset.CPUSet {
340350
return m.allocatableCPUs.Clone()
341351
}
342352

353+
func (m *manager) GetAllCPUs() cpuset.CPUSet {
354+
return m.allCPUs.Clone()
355+
}
356+
343357
type reconciledContainer struct {
344358
podName string
345359
containerName string

pkg/kubelet/cm/cpumanager/cpu_manager_test.go

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -645,15 +645,8 @@ func TestCPUManagerGenerate(t *testing.T) {
645645
if rawMgr.policy.Name() != testCase.expectedPolicy {
646646
t.Errorf("Unexpected policy name. Have: %q wants %q", rawMgr.policy.Name(), testCase.expectedPolicy)
647647
}
648-
if rawMgr.policy.Name() == string(PolicyNone) {
649-
if rawMgr.topology != nil {
650-
t.Errorf("Expected topology to be nil for 'none' policy. Have: %q", rawMgr.topology)
651-
}
652-
}
653-
if rawMgr.policy.Name() != string(PolicyNone) {
654-
if rawMgr.topology == nil {
655-
t.Errorf("Expected topology to be non-nil for policy '%v'. Have: %q", rawMgr.policy.Name(), rawMgr.topology)
656-
}
648+
if rawMgr.topology == nil {
649+
t.Errorf("Expected topology to be non-nil for policy '%v'. Have: %q", rawMgr.policy.Name(), rawMgr.topology)
657650
}
658651
}
659652
})

pkg/kubelet/cm/cpumanager/fake_cpu_manager.go

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,11 @@ func (m *fakeManager) GetCPUAffinity(podUID, containerName string) cpuset.CPUSet
8585
return cpuset.CPUSet{}
8686
}
8787

88+
func (m *fakeManager) GetAllCPUs() cpuset.CPUSet {
89+
klog.InfoS("GetAllCPUs")
90+
return cpuset.CPUSet{}
91+
}
92+
8893
// NewFakeManager creates empty/fake cpu manager
8994
func NewFakeManager() Manager {
9095
return &fakeManager{

pkg/kubelet/cm/node_container_manager_linux.go

Lines changed: 3 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -31,12 +31,9 @@ import (
3131
utilfeature "k8s.io/apiserver/pkg/util/feature"
3232
"k8s.io/klog/v2"
3333
kubefeatures "k8s.io/kubernetes/pkg/features"
34-
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
35-
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
3634
"k8s.io/kubernetes/pkg/kubelet/events"
3735
"k8s.io/kubernetes/pkg/kubelet/stats/pidlimit"
3836
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
39-
"k8s.io/utils/cpuset"
4037
)
4138

4239
const (
@@ -199,30 +196,14 @@ func (cm *containerManagerImpl) getCgroupConfig(rl v1.ResourceList) *ResourceCon
199196
// An alternative is to delegate the `cpuset` cgroup to the kubelet, but that would require some plumbing in libcontainer,
200197
// and this is sufficient.
201198
// Only do so on None policy, as Static policy will do its own updating of the cpuset.
202-
if cm.NodeConfig.CPUManagerPolicy == string(cpumanager.PolicyNone) {
203-
if cm.allCPUs.IsEmpty() {
204-
cm.allCPUs = cm.getAllCPUs()
205-
}
206-
rc.CPUSet = cm.allCPUs
199+
// Please see the comment on policy none's GetAllocatableCPUs
200+
if cm.cpuManager.GetAllocatableCPUs().IsEmpty() {
201+
rc.CPUSet = cm.cpuManager.GetAllCPUs()
207202
}
208203

209204
return &rc
210205
}
211206

212-
func (cm *containerManagerImpl) getAllCPUs() cpuset.CPUSet {
213-
machineInfo, err := cm.cadvisorInterface.MachineInfo()
214-
if err != nil {
215-
klog.V(4).InfoS("Failed to get machine info to get default cpuset", "error", err)
216-
return cpuset.CPUSet{}
217-
}
218-
topo, err := topology.Discover(machineInfo)
219-
if err != nil {
220-
klog.V(4).InfoS("Failed to get topology info to get default cpuset", "error", err)
221-
return cpuset.CPUSet{}
222-
}
223-
return topo.CPUDetails.CPUs()
224-
}
225-
226207
// GetNodeAllocatableAbsolute returns the absolute value of Node Allocatable which is primarily useful for enforcement.
227208
// Note that not all resources that are available on the node are included in the returned list of resources.
228209
// Returns a ResourceList.

0 commit comments

Comments
 (0)