Skip to content

[ws-daemon] Support rootfs quota #7260

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions components/ws-daemon/pkg/container/container.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,4 +66,8 @@ type ID string
// OptsContainerRootfs provides options for the ContainerRootfs function
type OptsContainerRootfs struct {
Unmapped bool

// UpperDir selects the upperdir of the rootfs, rather than the rootfs mountpoint itself.
// If the container's rootfs is not an overlayfs, an error is returned.
UpperDir bool
}
11 changes: 9 additions & 2 deletions components/ws-daemon/pkg/container/containerd.go
Original file line number Diff line number Diff line change
Expand Up @@ -412,9 +412,16 @@ func (s *Containerd) ContainerRootfs(ctx context.Context, id ID, opts OptsContai
// We can't get the rootfs location on the node from containerd somehow.
// As a workaround we'll look at the node's mount table using the snapshotter key.
// This feels brittle and we should keep looking for a better way.
mnt, err := s.Mounts.GetMountpoint(func(mountPoint string) bool {
matcher := func(mountPoint string) bool {
return strings.Contains(mountPoint, info.SnapshotKey)
})
}

var mnt string
if opts.UpperDir {
mnt, err = s.Mounts.GetUpperdir(matcher)
} else {
mnt, err = s.Mounts.GetMountpoint(matcher)
}
if err != nil {
return
}
Expand Down
12 changes: 7 additions & 5 deletions components/ws-daemon/pkg/daemon/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
"github.com/gitpod-io/gitpod/ws-daemon/pkg/diskguard"
"github.com/gitpod-io/gitpod/ws-daemon/pkg/hosts"
"github.com/gitpod-io/gitpod/ws-daemon/pkg/iws"
"github.com/gitpod-io/gitpod/ws-daemon/pkg/quota"
"github.com/gitpod-io/gitpod/ws-daemon/pkg/resources"
)

Expand All @@ -18,11 +19,12 @@ type Config struct {
Runtime RuntimeConfig `json:"runtime"`
ReadinessSignal ReadinessSignalConfig `json:"readiness"`

Content content.Config `json:"content"`
Uidmapper iws.UidmapperConfig `json:"uidmapper"`
Resources resources.Config `json:"resources"`
Hosts hosts.Config `json:"hosts"`
DiskSpaceGuard diskguard.Config `json:"disk"`
Content content.Config `json:"content"`
Uidmapper iws.UidmapperConfig `json:"uidmapper"`
Resources resources.Config `json:"resources"`
Hosts hosts.Config `json:"hosts"`
DiskSpaceGuard diskguard.Config `json:"disk"`
ContainerRootfsQuota quota.Size `json:"containerRootfsQuota"`
}

type RuntimeConfig struct {
Expand Down
13 changes: 11 additions & 2 deletions components/ws-daemon/pkg/daemon/daemon.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,17 +46,26 @@ func NewDaemon(config Config, reg prometheus.Registerer) (*Daemon, error) {
if nodename == "" {
return nil, xerrors.Errorf("NODENAME env var isn't set")
}

cgCustomizer := &CgroupCustomizer{}
cgCustomizer.WithCgroupBasePath(config.Resources.CGroupsBasePath)
markUnmountFallback, err := NewMarkUnmountFallback(reg)
if err != nil {
return nil, err
}
dsptch, err := dispatch.NewDispatch(containerRuntime, clientset, config.Runtime.KubernetesNamespace, nodename,
listener := []dispatch.Listener{
resources.NewDispatchListener(&config.Resources, reg),
cgCustomizer,
markUnmountFallback,
)
}

if config.ContainerRootfsQuota != 0 {
listener = append(listener, &ContainerRootFSQuotaEnforcer{
Quota: config.ContainerRootfsQuota,
})
}

dsptch, err := dispatch.NewDispatch(containerRuntime, clientset, config.Runtime.KubernetesNamespace, nodename, listener...)
if err != nil {
return nil, err
}
Expand Down
50 changes: 50 additions & 0 deletions components/ws-daemon/pkg/daemon/rootfsquota.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
// Copyright (c) 2021 Gitpod GmbH. All rights reserved.
// Licensed under the GNU Affero General Public License (AGPL).
// See License-AGPL.txt in the project root for license information.

package daemon

import (
"context"

"github.com/gitpod-io/gitpod/common-go/log"
"github.com/gitpod-io/gitpod/ws-daemon/pkg/container"
"github.com/gitpod-io/gitpod/ws-daemon/pkg/dispatch"
"github.com/gitpod-io/gitpod/ws-daemon/pkg/quota"
"golang.org/x/xerrors"
)

type ContainerRootFSQuotaEnforcer struct {
Quota quota.Size
}

func (c *ContainerRootFSQuotaEnforcer) WorkspaceAdded(ctx context.Context, ws *dispatch.Workspace) error {
disp := dispatch.GetFromContext(ctx)
if disp == nil {
return xerrors.Errorf("no dispatch available")
}

loc, err := disp.Runtime.ContainerRootfs(ctx, ws.ContainerID, container.OptsContainerRootfs{
Unmapped: false,
UpperDir: true,
})
if err != nil {
return xerrors.Errorf("cannot find container rootfs: %w", err)
}

// TODO(cw); create one FS for all of those operations for performance/memory optimisation
fs, err := quota.NewXFS(loc)
if err != nil {
return xerrors.Errorf("XFS is not supported: %w", err)
}

// TODO(cw): we'll need to clean up the used prjquota's - otherwise we'll run out of them on a busy node
_, err = fs.SetQuota(loc, c.Quota)
if err != nil {
return xerrors.Errorf("cannot enforce rootfs quota: %w", err)
}

log.WithField("location", loc).WithField("quota", c.Quota).Info("quopta for workspace root FS created")

return nil
}