aboutsummaryrefslogtreecommitdiff
path: root/verify/cmd/verify.go
blob: 1d8a9764dbb7940748e4f94afb2359f7118675c1 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
package cmd

import (
	"bytes"
	"fmt"
	"math"
	"os"
	"sync"

	"github.com/axtloss/fsverify/verify/config"
	"github.com/axtloss/fsverify/verify/core"
	"github.com/spf13/cobra"
)

var validateFailed bool

func NewVerifyCommand() *cobra.Command {
	cmd := &cobra.Command{
		Use:          "verify",
		Short:        "Verify the root filesystem based on the given verification",
		RunE:         ValidateCommand,
		SilenceUsage: true,
	}

	return cmd
}

// validateThread validates a chain of nodes against a given byte slice
func validateThread(blockStart int, blockEnd int, bundleSize int, diskBytes []byte, n int, dbfile string, waitGroup *sync.WaitGroup, errChan chan error) {
	defer waitGroup.Done()
	defer close(errChan)
	var reader *bytes.Reader
	blockCount := math.Floor(float64(bundleSize / 2000))
	totalReadBlocks := 0

	db, err := core.OpenDB(dbfile, true)
	if err != nil {
		errChan <- err
	}

	reader = bytes.NewReader(diskBytes)

	node, err := core.GetNode(fmt.Sprintf("Entrypoint%d", n), db)
	if err != nil {
		errChan <- err
	}
	block, i, err := core.ReadBlock(node, reader, totalReadBlocks)
	totalReadBlocks = i

	err = core.VerifyBlock(block, node)
	if err != nil {
		errChan <- err
	}

	for int64(totalReadBlocks) < int64(blockCount) {
		if validateFailed {
			return
		}
		nodeSum, err := node.GetHash()
		if err != nil {
			fmt.Println("Using node ", nodeSum)
			errChan <- err
		}
		node, err = core.GetNode(nodeSum, db)
		if err != nil {
			fmt.Println("Failed to get next node")
			errChan <- err
		}
		part, i, err := core.ReadBlock(node, reader, totalReadBlocks)
		totalReadBlocks = i
		if err != nil {
			errChan <- err
			validateFailed = true
			return
		}
		err = core.VerifyBlock(part, node)
		if err != nil {
			errChan <- err
			validateFailed = true
			return
		}

	}

}

func ValidateCommand(_ *cobra.Command, args []string) error {
	if len(args) != 1 {
		core.WarnUser()
		return fmt.Errorf("Usage: fsverify verify [disk]")
	}

	header, err := core.ReadHeader(config.FsVerifyPart)
	if err != nil {
		core.WarnUser()
		return err
	}

	// Check if the partition is even correct
	// this does not check if the partition has been tampered with
	// it only checks if the specified partition is even an fsverify partition
	if header.MagicNumber != 0xACAB {
		core.WarnUser()
		return fmt.Errorf("sanity bit does not match. Expected %d, got %d", 0xACAB, header.MagicNumber)
	}

	fmt.Println("Reading DB")
	dbfile, err := core.ReadDB(config.FsVerifyPart)
	if err != nil {
		core.WarnUser()
		return err
	}
	key, err := core.ReadKey()
	if err != nil {
		fmt.Println(err)
		core.WarnUser()
		return err
	}
	fmt.Println("Key: " + key)
	verified, err := core.VerifySignature(key, header.Signature, dbfile)
	if err != nil {
		core.WarnUser()
		return err
	} else if !verified {
		core.WarnUser()
		return fmt.Errorf("Signature verification failed\n")
	} else {
		fmt.Println("Signature verification success!")
	}

	fmt.Println("----")
	disk, err := os.Open(args[0])
	if err != nil {
		core.WarnUser()
		return err
	}
	defer disk.Close()
	diskInfo, err := disk.Stat()
	if err != nil {
		core.WarnUser()
		return err
	}
	diskSize := diskInfo.Size()

	// If the filesystem size has increased ever since the fsverify partition was created
	// it would mean that fsverify is not able to verify the entire partition, making it useless
	if header.FilesystemSize*header.FilesystemUnit != int(diskSize) {
		core.WarnUser()
		return fmt.Errorf("disk size does not match disk size specified in header. Expected %d, got %d", header.FilesystemSize*header.FilesystemUnit, diskSize)
	}

	bundleSize := math.Floor(float64(diskSize / int64(config.ProcCount)))
	diskBytes := make([]byte, diskSize)
	_, err = disk.Read(diskBytes)
	if err != nil {
		core.WarnUser()
		return err
	}
	reader := bytes.NewReader(diskBytes)
	var waitGroup sync.WaitGroup
	errChan := make(chan error)
	validateFailed = false
	for i := 0; i < config.ProcCount; i++ {
		// To ensure that each thread only uses the byte area it is meant to use, a copy of the
		// area is made
		diskBytes, err := core.CopyByteArea(i*(int(bundleSize)), (i+1)*(int(bundleSize)), reader)
		if err != nil {
			fmt.Println("Failed to copy byte area ", i*int(bundleSize), " ", (i+1)+int(bundleSize))
			return err
		}
		waitGroup.Add(1)
		go validateThread(i*int(bundleSize), (i+1)*int(bundleSize), int(bundleSize), diskBytes, i, dbfile, &waitGroup, errChan)
	}

	go func() {
		waitGroup.Wait()
		close(errChan)
	}()

	for err := range errChan {
		if err != nil {
			core.WarnUser()
			return err
		}
	}

	return nil
}