Skip to content

Commit

Permalink
DAOS-16327 control: Update dmg storage query usage for MD-on-SSD P2 (#…
Browse files Browse the repository at this point in the history
…15418)

Update dmg storage query usage for MD-on-SSD P2

Signed-off-by: Tom Nabarro <[email protected]>
  • Loading branch information
tanabarr authored Nov 11, 2024
1 parent 083d786 commit d4070e8
Show file tree
Hide file tree
Showing 28 changed files with 1,187 additions and 288 deletions.
450 changes: 331 additions & 119 deletions docs/admin/pool_operations.md

Large diffs are not rendered by default.

50 changes: 30 additions & 20 deletions src/control/cmd/dmg/pretty/pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,32 +57,45 @@ func printTierBytesRow(fmtName string, tierBytes uint64, numRanks int) txtfmt.Ta
}
}

func getPoolCreateRespRows(mdOnSSD bool, tierBytes []uint64, tierRatios []float64, numRanks int) (title string, rows []txtfmt.TableRow) {
func getPoolCreateRespRows(tierBytes []uint64, tierRatios []float64, numRanks int) (title string, rows []txtfmt.TableRow) {
title = "Pool created with "
tierName := "SCM"
if mdOnSSD {
tierName = "Metadata"
}

for tierIdx, tierRatio := range tierRatios {
if tierIdx > 0 {
title += ","
tierName = "NVMe"
if mdOnSSD {
tierName = "Data"
}
}

title += PrintTierRatio(tierRatio)
fmtName := fmt.Sprintf("Storage tier %d (%s)", tierIdx, tierName)
if mdOnSSD {
fmtName = tierName + " Storage"
rows = append(rows, printTierBytesRow(fmtName, tierBytes[tierIdx], numRanks))
}
title += " storage tier ratio"

return
}

func getPoolCreateRespRowsMDOnSSD(tierBytes []uint64, tierRatios []float64, numRanks int, memFileBytes uint64) (title string, rows []txtfmt.TableRow) {
title = "Pool created with "
tierName := "Metadata"

for tierIdx, tierRatio := range tierRatios {
if tierIdx > 0 {
title += ","
tierName = "Data"
}

title += PrintTierRatio(tierRatio)
fmtName := tierName + " Storage"
rows = append(rows, printTierBytesRow(fmtName, tierBytes[tierIdx], numRanks))
}
title += " storage tier ratio"

return title, rows
// Print memory-file size for MD-on-SSD.
rows = append(rows, printTierBytesRow("Memory File Size", memFileBytes, numRanks))

return
}

// PrintPoolCreateResponse generates a human-readable representation of the pool create
Expand Down Expand Up @@ -122,17 +135,14 @@ func PrintPoolCreateResponse(pcr *control.PoolCreateResp, out io.Writer, opts ..
"Total Size": humanize.Bytes(totalSize * uint64(numRanks)),
})

mdOnSsdEnabled := pcr.MemFileBytes > 0

title, tierRows := getPoolCreateRespRows(mdOnSsdEnabled, pcr.TierBytes, tierRatios,
numRanks)

// Print memory-file to meta-blob ratio for MD-on-SSD.
if mdOnSsdEnabled {
tierRows = append(tierRows, printTierBytesRow("Memory File Size",
pcr.MemFileBytes, numRanks))
var title string
var tierRows []txtfmt.TableRow
if pcr.MemFileBytes > 0 {
title, tierRows = getPoolCreateRespRowsMDOnSSD(pcr.TierBytes, tierRatios, numRanks,
pcr.MemFileBytes)
} else {
title, tierRows = getPoolCreateRespRows(pcr.TierBytes, tierRatios, numRanks)
}

fmtArgs = append(fmtArgs, tierRows...)

_, err := fmt.Fprintln(out, txtfmt.FormatEntity(title, fmtArgs))
Expand Down
2 changes: 1 addition & 1 deletion src/control/cmd/dmg/pretty/storage_nvme_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ func TestPretty_PrintNVMeController(t *testing.T) {
ctrlrWithSmd := func(idx int32, roleBits int) *storage.NvmeController {
c := storage.MockNvmeController(idx)
sd := storage.MockSmdDevice(nil, idx)
sd.Roles = storage.BdevRoles{storage.OptionBits(roleBits)}
sd.Roles = storage.BdevRolesFromBits(roleBits)
sd.Rank = ranklist.Rank(idx)
c.SmdDevices = []*storage.SmdDevice{sd}
return c
Expand Down
248 changes: 240 additions & 8 deletions src/control/cmd/dmg/pretty/storage_query.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,21 +9,29 @@ package pretty
import (
"fmt"
"io"
"sort"
"strings"

"github.com/dustin/go-humanize"
"github.com/pkg/errors"

"github.com/daos-stack/daos/src/control/common"
"github.com/daos-stack/daos/src/control/lib/control"
"github.com/daos-stack/daos/src/control/lib/txtfmt"
"github.com/daos-stack/daos/src/control/server/storage"
)

var (
errNoMetaRole = errors.New("no meta role detected")
errInconsistentRoles = errors.New("roles inconsistent between hosts")
errInsufficientScan = errors.New("insufficient info in scan response")
)

// PrintHostStorageUsageMap generates a human-readable representation of the supplied
// HostStorageMap struct and writes utilization info to the supplied io.Writer.
func PrintHostStorageUsageMap(hsm control.HostStorageMap, out io.Writer) error {
func PrintHostStorageUsageMap(hsm control.HostStorageMap, out io.Writer) {
if len(hsm) == 0 {
return nil
return
}

hostsTitle := "Hosts"
Expand All @@ -44,19 +52,243 @@ func PrintHostStorageUsageMap(hsm control.HostStorageMap, out io.Writer) error {
hosts := getPrintHosts(hss.HostSet.RangedString())
row := txtfmt.TableRow{hostsTitle: hosts}
storage := hss.HostStorage
row[scmTitle] = humanize.Bytes(storage.ScmNamespaces.Total())
row[scmFreeTitle] = humanize.Bytes(storage.ScmNamespaces.Free())
row[scmUsageTitle] = storage.ScmNamespaces.PercentUsage()
row[nvmeTitle] = humanize.Bytes(storage.NvmeDevices.Total())
row[nvmeFreeTitle] = humanize.Bytes(storage.NvmeDevices.Free())
row[nvmeUsageTitle] = storage.NvmeDevices.PercentUsage()

sns := storage.ScmNamespaces
row[scmTitle] = humanize.Bytes(sns.Total())
scmFree := sns.Free()
row[scmFreeTitle] = humanize.Bytes(scmFree)
row[scmUsageTitle] = common.PercentageString(sns.Total()-scmFree, sns.Total())

ncs := storage.NvmeDevices
row[nvmeTitle] = humanize.Bytes(ncs.Total())
nvmeFree := ncs.Free()
row[nvmeFreeTitle] = humanize.Bytes(nvmeFree)
row[nvmeUsageTitle] = common.PercentageString(ncs.Total()-nvmeFree, ncs.Total())

table = append(table, row)
}

tablePrint.Format(table)
}

const (
metaRole = storage.BdevRoleMeta
dataRole = storage.BdevRoleData
rankTitle = "Rank"
)

// Return role combinations for each tier that contains either a meta or data role.
func getTierRolesForHost(nvme storage.NvmeControllers, metaRolesLast, dataRolesLast *storage.BdevRoles) error {
roles := make(map[int]*storage.BdevRoles)
for _, c := range nvme {
if c.Roles().HasMeta() {
if _, exists := roles[metaRole]; !exists {
roles[metaRole] = c.Roles()
}
} else if c.Roles().HasData() {
if _, exists := roles[dataRole]; !exists {
roles[dataRole] = c.Roles()
}
}
}

if roles[metaRole].IsEmpty() {
return errNoMetaRole
}

if !metaRolesLast.IsEmpty() {
// Indicates valid "last" values exist so check consistency.
if *roles[metaRole] != *metaRolesLast {
return errInconsistentRoles
}
if roles[dataRole].IsEmpty() {
if !dataRolesLast.IsEmpty() {
return errInconsistentRoles
}
} else {
if *roles[dataRole] != *dataRolesLast {
return errInconsistentRoles
}
}
} else {
*metaRolesLast = *roles[metaRole]
if !roles[dataRole].IsEmpty() {
*dataRolesLast = *roles[dataRole]
}
}

return nil
}

// Print which roles each tier is assigned, only print tiers with meta or data roles.
// Currently tier-list hardcoded to (META/DATA) but this can be extended.
func printTierRolesTable(hsm control.HostStorageMap, out, dbg io.Writer) ([]storage.BdevRoles, error) {
tierTitle := "Tier"
rolesTitle := "Roles"

tablePrint := txtfmt.NewTableFormatter(tierTitle, rolesTitle)
tablePrint.InitWriter(out)
table := []txtfmt.TableRow{}

// Currently only tiers with meta and data are of interest so select implicitly.
var metaRoles, dataRoles storage.BdevRoles
for _, key := range hsm.Keys() {
err := getTierRolesForHost(hsm[key].HostStorage.NvmeDevices, &metaRoles, &dataRoles)
if err != nil {
hSet := hsm[key].HostSet
fmt.Fprintf(dbg, "scan resp for hosts %q: %+v\n", hSet, hsm[key].HostStorage)
return nil, errors.Wrapf(err, "hosts %q", hSet)
}
}

if metaRoles.IsEmpty() {
fmt.Fprintf(dbg, "scan resp: %+v\n", hsm)
return nil, errInsufficientScan
}

rolesToShow := []storage.BdevRoles{metaRoles}
if !dataRoles.IsEmpty() {
// Print data role row if assigned to a separate tier from meta role.
rolesToShow = append(rolesToShow, dataRoles)
}
for i, roles := range rolesToShow {
table = append(table, txtfmt.TableRow{
// Starting tier index of 1.
tierTitle: fmt.Sprintf("T%d", i+1),
rolesTitle: roles.String(),
})
}

tablePrint.Format(table)
return rolesToShow, nil
}

func getRowTierTitles(i int, showUsable bool) []string {
totalTitle := fmt.Sprintf("T%d-Total", i)
freeTitle := fmt.Sprintf("T%d-Free", i)
if showUsable {
freeTitle = fmt.Sprintf("T%d-Usable", i)
}
usageTitle := fmt.Sprintf("T%d-Usage", i)

return []string{totalTitle, freeTitle, usageTitle}
}

type roleDevsMap map[storage.BdevRoles]storage.NvmeControllers
type rankRoleDevsMap map[int]roleDevsMap

func iterRankRoleDevs(nvme storage.NvmeControllers, tierRoles []storage.BdevRoles, dbg io.Writer, rankRoleDevs rankRoleDevsMap) error {
for _, nd := range nvme {
if len(nd.SmdDevices) == 0 || nd.SmdDevices[0] == nil {
fmt.Fprintf(dbg, "no smd for %s\n", nd.PciAddr)
continue
}
rank := int(nd.Rank())
if _, exists := rankRoleDevs[rank]; !exists {
rankRoleDevs[rank] = make(roleDevsMap)
}
roles := nd.Roles()
if roles == nil {
return errors.New("unexpected nil roles")
}
for _, rolesWant := range tierRoles {
if *roles != rolesWant {
continue
}
fmt.Fprintf(dbg, "add r%d-%s roles %q tot/avail/usabl %d/%d/%d\n", rank,
nd.PciAddr, roles, nd.Total(), nd.Free(), nd.Usable())
rankRoleDevs[rank][rolesWant] = append(
rankRoleDevs[rank][rolesWant], nd)
break
}
}

return nil
}

func getRankRolesRow(rank int, tierRoles []storage.BdevRoles, roleDevs roleDevsMap, showUsable bool) txtfmt.TableRow {
row := txtfmt.TableRow{rankTitle: fmt.Sprintf("%d", rank)}
for i, roles := range tierRoles {
titles := getRowTierTitles(i+1, showUsable)
totalTitle, freeTitle, usageTitle := titles[0], titles[1], titles[2]
devs, exists := roleDevs[roles]
if !exists {
row[totalTitle] = "-"
row[freeTitle] = "-"
row[usageTitle] = "-"
continue
}
row[totalTitle] = humanize.Bytes(devs.Total())
free := devs.Free()
// Handle special case where SSDs with META but without DATA should show usable
// space as bytes available in regards to META space. Usable bytes is only
// calculated for SSDs with DATA role.
if showUsable && !(roles.HasMeta() && !roles.HasData()) {
free = devs.Usable()
}
row[freeTitle] = humanize.Bytes(free)
row[usageTitle] = common.PercentageString(devs.Total()-free, devs.Total())
}

return row
}

// Print usage table with row for each rank and column for each tier.
func printTierUsageTable(hsm control.HostStorageMap, tierRoles []storage.BdevRoles, out, dbg io.Writer, showUsable bool) error {
if len(tierRoles) == 0 {
return errors.New("no table role data to show")
}
titles := []string{rankTitle}
for i := range tierRoles {
titles = append(titles, getRowTierTitles(i+1, showUsable)...)
}

tablePrint := txtfmt.NewTableFormatter(titles...)
tablePrint.InitWriter(out)
table := []txtfmt.TableRow{}

// Build controllers-to-roles-to-rank map.
rankRoleDevs := make(rankRoleDevsMap)
for _, key := range hsm.Keys() {
err := iterRankRoleDevs(hsm[key].HostStorage.NvmeDevices, tierRoles, dbg,
rankRoleDevs)
if err != nil {
return errors.Wrapf(err, "host %q", hsm[key].HostSet)
}
}

var ranks []int
for rank := range rankRoleDevs {
ranks = append(ranks, rank)
}
sort.Ints(ranks)

for _, rank := range ranks {
table = append(table,
getRankRolesRow(rank, tierRoles, rankRoleDevs[rank], showUsable))
}

tablePrint.Format(table)
return nil
}

// PrintHostStorageUsageMapMdOnSsd generates a human-readable representation of the supplied
// HostStorageMap struct and writes utilization info to the supplied io.Writer in a format
// relevant to MD-on-SSD mode.
func PrintHostStorageUsageMapMdOnSsd(hsm control.HostStorageMap, out, dbg io.Writer, showUsable bool) error {
if len(hsm) == 0 {
return nil
}

tierRoles, err := printTierRolesTable(hsm, out, dbg)
if err != nil {
return err
}
fmt.Fprintf(out, "\n")

return printTierUsageTable(hsm, tierRoles, out, dbg, showUsable)
}

// NVMe controller namespace ID (NSID) should only be displayed if >= 1. Zero value should be
// ignored in display output.
func printSmdDevice(dev *storage.SmdDevice, iw io.Writer, opts ...PrintConfigOption) error {
Expand Down
Loading

0 comments on commit d4070e8

Please sign in to comment.