aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoraxtloss <axtlos@getcryst.al>2024-02-18 01:19:58 +0100
committeraxtloss <axtlos@getcryst.al>2024-02-18 01:19:58 +0100
commite6c12b02674a04ca34e27b46f6ca3261fca3c677 (patch)
tree0d5d785428436bf3fcc751eb8e2be4ca0b3bfe04
parent6a3db4c72485aba6187466005473e287f539e3ce (diff)
downloadfsverify-e6c12b02674a04ca34e27b46f6ca3261fca3c677.tar.gz
fsverify-e6c12b02674a04ca34e27b46f6ca3261fca3c677.tar.bz2
Add comments and function descriptions
Diffstat (limited to '')
-rw-r--r--cmd/verify.go14
-rw-r--r--core/crypt.go6
-rw-r--r--core/storage.go40
-rw-r--r--core/verification.go24
-rw-r--r--verifysetup/cmd/setup.go21
-rw-r--r--verifysetup/core/crypt.go3
-rw-r--r--verifysetup/core/storage.go10
-rw-r--r--verifysetup/go.sum8
8 files changed, 113 insertions, 13 deletions
diff --git a/cmd/verify.go b/cmd/verify.go
index 401787f..d0360f6 100644
--- a/cmd/verify.go
+++ b/cmd/verify.go
@@ -25,6 +25,7 @@ func NewVerifyCommand() *cobra.Command {
return cmd
}
+// validateThread validates a chain of nodes against a given byte slice
func validateThread(blockStart int, blockEnd int, bundleSize int, diskBytes []byte, n int, dbfile string, waitGroup *sync.WaitGroup, errChan chan error) {
defer waitGroup.Done()
defer close(errChan)
@@ -88,14 +89,17 @@ func ValidateCommand(_ *cobra.Command, args []string) error {
return fmt.Errorf("Usage: fsverify verify [disk]")
}
header, err := core.ReadHeader(config.FsVerifyPart)
+ if err != nil {
+ return err
+ }
+ // Check if the partition is even correct
+ // this does not check if the partition has been tampered with
+ // it only checks if the specified partition is even an fsverify partition
if header.MagicNumber != 0xACAB {
return fmt.Errorf("sanity bit does not match. Expected %d, got %d", 0xACAB, header.MagicNumber)
}
- if err != nil {
- return err
- }
fmt.Println("Reading DB")
dbfile, err := core.ReadDB(config.FsVerifyPart)
if err != nil {
@@ -127,6 +131,8 @@ func ValidateCommand(_ *cobra.Command, args []string) error {
}
diskSize := diskInfo.Size()
+ // If the filesystem size has increased ever since the fsverify partition was created
+ // it would mean that fsverify is not able to verify the entire partition, making it useless
if header.FilesystemSize*header.FilesystemUnit != int(diskSize) {
return fmt.Errorf("disk size does not match disk size specified in header. Expected %d, got %d", header.FilesystemSize*header.FilesystemUnit, diskSize)
}
@@ -142,6 +148,8 @@ func ValidateCommand(_ *cobra.Command, args []string) error {
errChan := make(chan error)
validateFailed = false
for i := 0; i < config.ProcCount; i++ {
+ // To ensure that each thread only uses the byte area it is meant to use, a copy of the
+ // area is made
diskBytes, err := core.CopyByteArea(i*(int(bundleSize)), (i+1)*(int(bundleSize)), reader)
if err != nil {
fmt.Println("Failed to copy byte area ", i*int(bundleSize), " ", (i+1)+int(bundleSize))
diff --git a/core/crypt.go b/core/crypt.go
index 19a8420..067a0b3 100644
--- a/core/crypt.go
+++ b/core/crypt.go
@@ -8,6 +8,7 @@ import (
"strings"
)
+// calculateStringHash calculates the sha1 checksum of a given string a.
func calculateStringHash(a string) (string, error) {
hash := sha1.New()
hash.Write([]byte(a))
@@ -15,9 +16,10 @@ func calculateStringHash(a string) (string, error) {
return strings.TrimSpace(fmt.Sprintf("%x", hashInBytes)), nil
}
-func CalculateBlockHash(block []byte) (string, error) {
+// CalculateBlockHash calculates the sha1 checksum of a given byte slice b.
+func CalculateBlockHash(b []byte) (string, error) {
hash := sha1.New()
- if _, err := io.Copy(hash, bytes.NewReader(block)); err != nil {
+ if _, err := io.Copy(hash, bytes.NewReader(b)); err != nil {
return "", err
}
hashInBytes := hash.Sum(nil)[:20]
diff --git a/core/storage.go b/core/storage.go
index 7f2dac6..8628e8d 100644
--- a/core/storage.go
+++ b/core/storage.go
@@ -11,6 +11,7 @@ import (
"os"
)
+// Header contains all information stored in the header of a fsverify partition.
type Header struct {
MagicNumber int
Signature string
@@ -20,6 +21,8 @@ type Header struct {
TableUnit int
}
+// Node contains all information stored in a database node.
+// If the Node is the first node in the database, PrevNodeSum should be set to Entrypoint.
type Node struct {
BlockStart int
BlockEnd int
@@ -27,10 +30,14 @@ type Node struct {
PrevNodeSum string
}
+// GetHash returns the hash of all fields of a Node combined.
+// The Node fields are combined in the order BlockStart, BlockEnd, BlockSum and PrevNodeSum
func (n *Node) GetHash() (string, error) {
return calculateStringHash(fmt.Sprintf("%d%d%s%s", n.BlockStart, n.BlockEnd, n.BlockSum, n.PrevNodeSum))
}
+// parseUnitSpec parses the file size unit specified in the header and returns it as an according multiplier.
+// In the case of an invalid Unit byte the function returns -1.
func parseUnitSpec(size []byte) int {
switch size[0] {
case 0:
@@ -50,6 +57,8 @@ func parseUnitSpec(size []byte) int {
}
}
+// ReadHeader reads the partition header and puts it in a variable of type Header.
+// If any field fails to be read, the function returns an empty Header struct and the error.
func ReadHeader(partition string) (Header, error) {
_, exist := os.Stat(partition)
if os.IsNotExist(exist) {
@@ -63,6 +72,10 @@ func ReadHeader(partition string) (Header, error) {
header := Header{}
reader := bufio.NewReader(part)
+ // Since the size of each field is already known
+ // it is best to hard code them, in the case
+ // that a field goes over its allocated size
+ // fsverify should (and will) fail
MagicNumber := make([]byte, 2)
UntrustedHash := make([]byte, 100)
TrustedHash := make([]byte, 88)
@@ -114,6 +127,9 @@ func ReadHeader(partition string) (Header, error) {
return header, nil
}
+// ReadDB reads the database from a fsverify partition.
+// It verifies the the size of the database with the size specified in the partition header and returns an error if the sizes do not match.
+// Due to limitations with bbolt the database gets written to a temporary path and the function returns the path to the database.
func ReadDB(partition string) (string, error) {
_, exist := os.Stat(partition)
if os.IsNotExist(exist) {
@@ -126,6 +142,9 @@ func ReadDB(partition string) (string, error) {
defer part.Close()
reader := bufio.NewReader(part)
+ // The area taken up by the header
+ // it is useless for this reader instance
+ // and will be skipped completely
_, err = reader.Read(make([]byte, 200))
if err != nil {
fmt.Println(err)
@@ -138,11 +157,13 @@ func ReadDB(partition string) (string, error) {
return "", err
}
+ // Reading the specified table size allows for tamper protection
+ // in the case that the partition was tampered with "lazily"
+ // meaning that only the database was modified, and not the header
+ // if that is the case, the database would be lacking data, making it unusable
db := make([]byte, header.TableSize*header.TableUnit)
n, err := io.ReadFull(reader, db)
if err != nil {
- fmt.Println("failed reading db")
- fmt.Println(header.TableSize * header.TableUnit)
return "", err
}
if n != header.TableSize*header.TableUnit {
@@ -150,11 +171,16 @@ func ReadDB(partition string) (string, error) {
}
fmt.Printf("db: %d\n", n)
+ // Write the database to a temporary directory
+ // to ensure that it disappears after the next reboot
temp, err := os.MkdirTemp("", "*-fsverify")
if err != nil {
return "", err
}
+ // The file permission is immediately set to 0700
+ // this ensures that the database is not modified
+ // after it has been written
err = os.WriteFile(temp+"/verify.db", db, 0700)
if err != nil {
return "", err
@@ -163,6 +189,7 @@ func ReadDB(partition string) (string, error) {
return temp + "/verify.db", nil
}
+// OpenDB opens a bbolt database and returns a bbolt instance.
func OpenDB(dbpath string, readonly bool) (*bolt.DB, error) {
_, exist := os.Stat(dbpath)
if os.IsNotExist(exist) {
@@ -175,6 +202,8 @@ func OpenDB(dbpath string, readonly bool) (*bolt.DB, error) {
return db, nil
}
+// GetNode retrieves a Node from the database based on the hash identifier.
+// If db is set to nil, the function will open the database in read-only mode itself.
func GetNode(checksum string, db *bolt.DB) (Node, error) {
var err error
var deferDB bool
@@ -198,7 +227,14 @@ func GetNode(checksum string, db *bolt.DB) (Node, error) {
return node, err
}
+// CopyByteArea copies an area of bytes from a reader.
+// It verifies that the reader reads the wanted amount of bytes, and returns an error if this is not the case.
func CopyByteArea(start int, end int, reader *bytes.Reader) ([]byte, error) {
+ if end-start < 0 {
+ return []byte{}, fmt.Errorf("tried creating byte slice with negative length. %d to %d total %d\n", start, end, end-start)
+ } else if end-start > 2000 {
+ return []byte{}, fmt.Errorf("tried creating byte slice with length over 2000. %d to %d total %d\n", start, end, end-start)
+ }
bytes := make([]byte, end-start)
n, err := reader.ReadAt(bytes, int64(start))
if err != nil {
diff --git a/core/verification.go b/core/verification.go
index f1e1f0b..289ce1e 100644
--- a/core/verification.go
+++ b/core/verification.go
@@ -12,8 +12,7 @@ import (
"github.com/tarm/serial"
)
-//var TotalReadBlocks int = 0
-
+// fileReadKey reads the public minisign key from a file specified in config.KeyLocation.
func fileReadKey() (string, error) {
if _, err := os.Stat(config.KeyLocation); os.IsNotExist(err) {
return "", fmt.Errorf("Key location %s does not exist", config.KeyLocation)
@@ -23,6 +22,7 @@ func fileReadKey() (string, error) {
return "", err
}
defer file.Close()
+ // A public key is never longer than 56 bytes
key := make([]byte, 56)
reader := bufio.NewReader(file)
n, err := reader.Read(key)
@@ -35,7 +35,11 @@ func fileReadKey() (string, error) {
return string(key), nil
}
+// serialReadKey reads the public minisign key from a usb tty specified in config.KeyLocation.
func serialReadKey() (string, error) {
+ // Since the usb serial is tested with an arduino
+ // it is assumed that the tty device does not always exist
+ // and can be manually plugged in by the user
if _, err := os.Stat(config.KeyLocation); !os.IsNotExist(err) {
fmt.Println("Reconnect arduino now")
for true {
@@ -67,6 +71,9 @@ func serialReadKey() (string, error) {
}
defer s.Close()
key = key + fmt.Sprintf("%q", buf[:n])
+ // ensure that two tab sequences are read
+ // meaning that the entire key has been captured
+ // since the key is surrounded by a tab sequence
if strings.Count(key, "\\t") == 2 {
break
}
@@ -79,6 +86,7 @@ func serialReadKey() (string, error) {
return key, nil
}
+// ReadKey is a wrapper function to call the proper readKey function according to config.KeyStore.
func ReadKey() (string, error) {
switch config.KeyStore {
case 0:
@@ -86,14 +94,21 @@ func ReadKey() (string, error) {
case 1:
return fileReadKey()
case 2:
- return "", nil
+ return "", nil // TPM
case 3:
return serialReadKey()
}
return "", nil
}
+// ReadBlock reads a data area of a bytes.Reader specified in the given node.
+// It additionally verifies that the amount of bytes read equal the wanted amount and returns an error if this is not the case.
func ReadBlock(node Node, part *bytes.Reader, totalReadBlocks int) ([]byte, int, error) {
+ if node.BlockEnd-node.BlockStart < 0 {
+ return []byte{}, -1, fmt.Errorf("tried creating byte slice with negative length. %d to %d total %d\n", node.BlockStart, node.BlockEnd, node.BlockEnd-node.BlockStart)
+ } else if node.BlockEnd-node.BlockStart > 2000 {
+ return []byte{}, -1, fmt.Errorf("tried creating byte slice with length over 2000. %d to %d total %d\n", node.BlockStart, node.BlockEnd, node.BlockEnd-node.BlockStart)
+ }
block := make([]byte, node.BlockEnd-node.BlockStart)
blockSize := node.BlockEnd - node.BlockStart
_, err := part.Seek(int64(node.BlockStart), 0)
@@ -109,6 +124,7 @@ func ReadBlock(node Node, part *bytes.Reader, totalReadBlocks int) ([]byte, int,
return block, totalReadBlocks + 1, err
}
+// VerifySignature verifies the database using a given signature and public key.
func VerifySignature(key string, signature string, database string) (bool, error) {
var pk minisign.PublicKey
if err := pk.UnmarshalText([]byte(key)); err != nil {
@@ -123,6 +139,7 @@ func VerifySignature(key string, signature string, database string) (bool, error
return minisign.Verify(pk, data, []byte(signature)), nil
}
+// VerifyBlock verifies a byte slice with the hash in a given Node.
func VerifyBlock(block []byte, node Node) error {
calculatedBlockHash, err := CalculateBlockHash(block)
if err != nil {
@@ -135,6 +152,7 @@ func VerifyBlock(block []byte, node Node) error {
return fmt.Errorf("Node %s ranging from %d to %d does not match block. Expected %s, got %s.", node.PrevNodeSum, node.BlockStart, node.BlockEnd, wantedBlockHash, calculatedBlockHash)
}
+// VerifyNode verifies that the current Node is valid by matching the checksum of it with the PrevNodeSum field of the next node.
func VerifyNode(node Node, nextNode Node) error {
nodeHash, err := calculateStringHash(fmt.Sprintf("%d%d%s%s", node.BlockStart, node.BlockEnd, node.BlockSum, node.PrevNodeSum))
if err != nil {
diff --git a/verifysetup/cmd/setup.go b/verifysetup/cmd/setup.go
index c2ba790..e946dd7 100644
--- a/verifysetup/cmd/setup.go
+++ b/verifysetup/cmd/setup.go
@@ -28,12 +28,15 @@ func NewSetupCommand() *cobra.Command {
return cmd
}
+// checksumBlock is a function to create a chain of Nodes to verify an area of a block device.
+// It is meant to be run as a goroutine, taking a waitGroup as a parameter.
func checksumBlock(blockStart int, blockEnd int, bundleSize int, diskBytes []byte, nodeChannel chan verify.Node, n int, waitGroup *sync.WaitGroup) {
defer waitGroup.Done()
defer close(nodeChannel)
var reader *bytes.Reader
node := verify.Node{}
+ // A block is 2000 bytes big
blockCount := math.Floor(float64(bundleSize / 2000))
for i := 0; i < int(blockCount); i++ {
@@ -51,6 +54,8 @@ func checksumBlock(blockStart int, blockEnd int, bundleSize int, diskBytes []byt
nodeChannel <- node
}
+ // Since it is unlikely that the bundleSize is perfectly divisible by 2000
+ // a final node has to be created that includes the last amount of bytes
block, err := core.ReadBlock(int(blockCount*2000), len(diskBytes), reader)
if err != nil {
fmt.Printf("%d:: final attempted reading from %d to %d. Error %s\n", blockStart, int(blockCount*2000)+2000, len(diskBytes), err)
@@ -65,6 +70,10 @@ func SetupCommand(_ *cobra.Command, args []string) error {
if len(args) != 3 && len(args) != 4 {
return fmt.Errorf("Usage: verifysetup setup [partition] [procCount] [fsverify partition output] <minisign directory>")
}
+
+ // The minisign directory argument is optional
+ // which is why the existence of the argument is checked
+ // before minisignDir is set to a directory
var minisignDir string
if len(args) != 4 {
minisignDir = "./minisign/"
@@ -75,6 +84,7 @@ func SetupCommand(_ *cobra.Command, args []string) error {
if err != nil {
return err
}
+
fmt.Println("Using partition: ", args[0])
disk, err := os.Open(args[0])
if err != nil {
@@ -86,6 +96,7 @@ func SetupCommand(_ *cobra.Command, args []string) error {
if err != nil {
return err
}
+
diskSize := diskInfo.Size()
bundleSize := math.Floor(float64(diskSize / int64(procCount)))
blockCount := math.Ceil(float64(bundleSize / 2000))
@@ -95,10 +106,14 @@ func SetupCommand(_ *cobra.Command, args []string) error {
return err
}
+ // To decrease the amount of file operations
+ // a single reader is created that gets used for the goroutines
reader := bytes.NewReader(diskBytes)
var waitGroup sync.WaitGroup
nodeChannels := make([]chan verify.Node, procCount+1)
for i := 0; i < procCount; i++ {
+ // Ensuring that each thread only reads the area it is meant to read
+ // by making a copy of the area which it gets access to
diskBytesCopy, err := verify.CopyByteArea(i*(int(bundleSize)), (i+1)*(int(bundleSize)), reader)
if err != nil {
return err
@@ -115,6 +130,10 @@ func SetupCommand(_ *cobra.Command, args []string) error {
return err
}
+ // All generated nodes are written to the database at once
+ // while this is worse for the speed of verifysetup.
+ // it ensures that no write conflicts happen
+ // which could be caused by multiple threads accessing the same database
for i := 0; i < procCount; i++ {
channel := nodeChannels[i]
err = db.Batch(func(tx *bolt.Tx) error {
@@ -141,6 +160,8 @@ func SetupCommand(_ *cobra.Command, args []string) error {
return err
}
+ // The untrusted Signature is stored in a special way
+ // requiring special decoding of it to represent it as a string
var UntrustedSignature [2 + 8 + ed25519.SignatureSize]byte
binary.LittleEndian.PutUint16(UntrustedSignature[:2], sig.Algorithm)
binary.LittleEndian.PutUint64(UntrustedSignature[2:10], sig.KeyID)
diff --git a/verifysetup/core/crypt.go b/verifysetup/core/crypt.go
index 4658641..1307bd3 100644
--- a/verifysetup/core/crypt.go
+++ b/verifysetup/core/crypt.go
@@ -11,6 +11,7 @@ import (
"strings"
)
+// CalculateBlockHash calculates the sha1 checksum of a byte slice.
func CalculateBlockHash(block []byte) (string, error) {
hash := sha1.New()
if _, err := io.Copy(hash, bytes.NewReader(block)); err != nil {
@@ -20,6 +21,8 @@ func CalculateBlockHash(block []byte) (string, error) {
return strings.TrimSpace(fmt.Sprintf("%x", hashInBytes)), nil
}
+// SignDatabase generates a minisign signature of the database using given keys.
+// The minisign signature uses "fsverify" as the comments to ensure predictability when fsverify verifies the signature.
func SignDatabase(database string, minisignKeys string) ([]byte, error) {
fmt.Print("Enter your password (will not echo): ")
p, err := term.ReadPassword(int(os.Stdin.Fd()))
diff --git a/verifysetup/core/storage.go b/verifysetup/core/storage.go
index 64b06a1..a4fc66d 100644
--- a/verifysetup/core/storage.go
+++ b/verifysetup/core/storage.go
@@ -10,8 +10,8 @@ import (
bolt "go.etcd.io/bbolt"
)
-var TotalReadBlocks = 0
-
+// ReadBlock reads the bytes in a specified ranges from a bytes.Reader.
+// It additionally verifies that the amount of bytes read match with the size of the area and fails if the they do not match.
func ReadBlock(start int, end int, device *bytes.Reader) ([]byte, error) {
if end-start < 0 {
return []byte{}, fmt.Errorf("tried creating byte slice with negative length. %d to %d total %d\n", start, end, end-start)
@@ -24,10 +24,11 @@ func ReadBlock(start int, end int, device *bytes.Reader) ([]byte, error) {
return []byte{}, err
}
_, err = device.Read(block)
- TotalReadBlocks = TotalReadBlocks + (end - start)
return block, err
}
+// CreateNode creates a Node based on given parameters.
+// If prevNode is set to nil, meaning this node is the first node in a verification chain, prevNodeHash is set to "EntrypointN" with N being the number of entrypoint.
func CreateNode(blockStart int, blockEnd int, block []byte, prevNode *verify.Node, n string) (verify.Node, error) {
node := verify.Node{}
node.BlockStart = blockStart
@@ -50,6 +51,8 @@ func CreateNode(blockStart int, blockEnd int, block []byte, prevNode *verify.Nod
return node, nil
}
+// AddNode adds a node to the bucket "Nodes" in the database.
+// It assumes that a database transaction has already been started and takes bolt.Tx as an argument.
func AddNode(node verify.Node, tx *bolt.Tx) error {
if node.BlockStart == node.BlockEnd {
return nil
@@ -66,6 +69,7 @@ func AddNode(node verify.Node, tx *bolt.Tx) error {
return nil
}
+// CreateHeader creates a header to be used in an fsverify partition containing all necessary information.
func CreateHeader(unsignedHash string, signedHash string, diskSize int, tableSize int) ([]byte, error) {
header := make([]byte, 200)
header[0] = 0xAC
diff --git a/verifysetup/go.sum b/verifysetup/go.sum
index debe9f9..be58f96 100644
--- a/verifysetup/go.sum
+++ b/verifysetup/go.sum
@@ -14,15 +14,23 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07 h1:UyzmZLoiDWMRywV4DUYb9Fbt8uiOSooupjTq10vpvnU=
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
+github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA=
go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
+golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8=
+golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
+golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE=
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
+golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ=
+golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=