daemon/disk_usage.go
f2e11fb8
 package daemon
 
 import (
 	"fmt"
5a9f2a3c
 	"sync/atomic"
f2e11fb8
 
cf7d246a
 	"golang.org/x/net/context"
 
f2e11fb8
 	"github.com/docker/docker/api/types"
820b809e
 	"github.com/docker/docker/api/types/filters"
f2e11fb8
 	"github.com/docker/docker/layer"
 	"github.com/docker/docker/pkg/directory"
 	"github.com/docker/docker/volume"
7a855799
 	"github.com/opencontainers/go-digest"
1009e6a4
 	"github.com/sirupsen/logrus"
f2e11fb8
 )
 
ce8e529e
 func (daemon *Daemon) getLayerRefs() map[layer.ChainID]int {
 	tmpImages := daemon.imageStore.Map()
f2e11fb8
 	layerRefs := map[layer.ChainID]int{}
 	for id, img := range tmpImages {
 		dgst := digest.Digest(id)
ce8e529e
 		if len(daemon.referenceStore.References(dgst)) == 0 && len(daemon.imageStore.Children(id)) != 0 {
f2e11fb8
 			continue
 		}
 
 		rootFS := *img.RootFS
 		rootFS.DiffIDs = nil
 		for _, id := range img.RootFS.DiffIDs {
 			rootFS.Append(id)
 			chid := rootFS.ChainID()
 			layerRefs[chid]++
 		}
 	}
 
 	return layerRefs
 }
 
 // SystemDiskUsage returns information about the daemon data disk usage
cf7d246a
 func (daemon *Daemon) SystemDiskUsage(ctx context.Context) (*types.DiskUsage, error) {
5a9f2a3c
 	if !atomic.CompareAndSwapInt32(&daemon.diskUsageRunning, 0, 1) {
 		return nil, fmt.Errorf("a disk usage operation is already running")
 	}
 	defer atomic.StoreInt32(&daemon.diskUsageRunning, 0)
 
f2e11fb8
 	// Retrieve container list
 	allContainers, err := daemon.Containers(&types.ContainerListOptions{
 		Size: true,
 		All:  true,
 	})
 	if err != nil {
 		return nil, fmt.Errorf("failed to retrieve container list: %v", err)
 	}
 
 	// Get all top images with extra attributes
820b809e
 	allImages, err := daemon.Images(filters.NewArgs(), false, true)
f2e11fb8
 	if err != nil {
 		return nil, fmt.Errorf("failed to retrieve image list: %v", err)
 	}
 
 	// Get all local volumes
 	allVolumes := []*types.Volume{}
 	getLocalVols := func(v volume.Volume) error {
cf7d246a
 		select {
 		case <-ctx.Done():
 			return ctx.Err()
 		default:
0822d903
 			if d, ok := v.(volume.DetailedVolume); ok {
 				// skip local volumes with mount options since these could have external
 				// mounted filesystems that will be slow to enumerate.
 				if len(d.Options()) > 0 {
 					return nil
 				}
 			}
cf7d246a
 			name := v.Name()
 			refs := daemon.volumes.Refs(v)
f2e11fb8
 
cf7d246a
 			tv := volumeToAPIType(v)
 			sz, err := directory.Size(v.Path())
 			if err != nil {
 				logrus.Warnf("failed to determine size of volume %v", name)
 				sz = -1
 			}
 			tv.UsageData = &types.VolumeUsageData{Size: sz, RefCount: int64(len(refs))}
 			allVolumes = append(allVolumes, tv)
f2e11fb8
 		}
 
 		return nil
 	}
 
 	err = daemon.traverseLocalVolumes(getLocalVols)
 	if err != nil {
 		return nil, err
 	}
 
 	// Get total layers size on disk
 	var allLayersSize int64
ce8e529e
 	layerRefs := daemon.getLayerRefs()
afd305c4
 	for _, ls := range daemon.layerStores {
 		allLayers := ls.Map()
 		for _, l := range allLayers {
 			select {
 			case <-ctx.Done():
 				return nil, ctx.Err()
 			default:
 				size, err := l.DiffSize()
 				if err == nil {
 					if _, ok := layerRefs[l.ChainID()]; ok {
 						allLayersSize += size
 					} else {
 						logrus.Warnf("found leaked image layer %v", l.ChainID())
 					}
cf7d246a
 				} else {
afd305c4
 					logrus.Warnf("failed to get diff size for layer %v", l.ChainID())
cf7d246a
 				}
f2e11fb8
 			}
 		}
 	}
 
 	return &types.DiskUsage{
 		LayersSize: allLayersSize,
 		Containers: allContainers,
 		Volumes:    allVolumes,
 		Images:     allImages,
 	}, nil
 }