aboutsummaryrefslogtreecommitdiff
path: root/verify/core
diff options
context:
space:
mode:
Diffstat (limited to 'verify/core')
-rw-r--r--verify/core/crypt.go27
-rw-r--r--verify/core/storage.go246
-rw-r--r--verify/core/verification.go165
3 files changed, 438 insertions, 0 deletions
diff --git a/verify/core/crypt.go b/verify/core/crypt.go
new file mode 100644
index 0000000..067a0b3
--- /dev/null
+++ b/verify/core/crypt.go
@@ -0,0 +1,27 @@
+package core
+
+import (
+ "bytes"
+ "crypto/sha1"
+ "fmt"
+ "io"
+ "strings"
+)
+
+// calculateStringHash calculates the sha1 checksum of a given string a.
+func calculateStringHash(a string) (string, error) {
+ hash := sha1.New()
+ hash.Write([]byte(a))
+ hashInBytes := hash.Sum(nil)[:20]
+ return strings.TrimSpace(fmt.Sprintf("%x", hashInBytes)), nil
+}
+
+// CalculateBlockHash calculates the sha1 checksum of a given byte slice b.
+func CalculateBlockHash(b []byte) (string, error) {
+ hash := sha1.New()
+ if _, err := io.Copy(hash, bytes.NewReader(b)); err != nil {
+ return "", err
+ }
+ hashInBytes := hash.Sum(nil)[:20]
+ return strings.TrimSpace(fmt.Sprintf("%x", hashInBytes)), nil
+}
diff --git a/verify/core/storage.go b/verify/core/storage.go
new file mode 100644
index 0000000..182e1ec
--- /dev/null
+++ b/verify/core/storage.go
@@ -0,0 +1,246 @@
+package core
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ bolt "go.etcd.io/bbolt"
+ "io"
+ "os"
+)
+
+// Header contains all information stored in the header of a fsverify partition.
+type Header struct {
+ MagicNumber int
+ Signature string
+ FilesystemSize int
+ FilesystemUnit int
+ TableSize int
+ TableUnit int
+}
+
+// Node contains all information stored in a database node.
+// If the Node is the first node in the database, PrevNodeSum should be set to Entrypoint.
+type Node struct {
+ BlockStart int
+ BlockEnd int
+ BlockSum string
+ PrevNodeSum string
+}
+
+// GetHash returns the hash of all fields of a Node combined.
+// The Node fields are combined in the order BlockStart, BlockEnd, BlockSum and PrevNodeSum
+func (n *Node) GetHash() (string, error) {
+ return calculateStringHash(fmt.Sprintf("%d%d%s%s", n.BlockStart, n.BlockEnd, n.BlockSum, n.PrevNodeSum))
+}
+
+// parseUnitSpec parses the file size unit specified in the header and returns it as an according multiplier.
+// In the case of an invalid Unit byte the function returns -1.
+func parseUnitSpec(size []byte) int {
+ switch size[0] {
+ case 0:
+ return 1
+ case 1:
+ return 1000
+ case 2:
+ return 1000 * 1000
+ case 3:
+ return 1000 * 1000 * 10000
+ case 4:
+ return 100000000000000
+ case 5:
+ return 1000000000000000
+ default:
+ return -1
+ }
+}
+
+// ReadHeader reads the partition header and puts it in a variable of type Header.
+// If any field fails to be read, the function returns an empty Header struct and the error.
+func ReadHeader(partition string) (Header, error) {
+ _, exist := os.Stat(partition)
+ if os.IsNotExist(exist) {
+ return Header{}, fmt.Errorf("Cannot find partition %s", partition)
+ }
+ part, err := os.Open(partition)
+ if err != nil {
+ return Header{}, err
+ }
+ defer part.Close()
+
+ header := Header{}
+ reader := bufio.NewReader(part)
+ // Since the size of each field is already known
+ // it is best to hard code them, in the case
+ // that a field goes over its allocated size
+ // fsverify should (and will) fail
+ MagicNumber := make([]byte, 2)
+ UntrustedHash := make([]byte, 100)
+ TrustedHash := make([]byte, 88)
+ FilesystemSize := make([]byte, 4)
+ FilesystemUnit := make([]byte, 1)
+ TableSize := make([]byte, 4)
+ TableUnit := make([]byte, 1)
+
+ _, err = reader.Read(MagicNumber)
+ MagicNum := binary.BigEndian.Uint16(MagicNumber)
+ if MagicNum != 0xACAB { // The Silliest of magic numbers
+ return Header{}, err
+ }
+ header.MagicNumber = int(MagicNum)
+
+ _, err = reader.Read(UntrustedHash)
+ if err != nil {
+ return Header{}, err
+ }
+ _, err = reader.Read(TrustedHash)
+ if err != nil {
+ return Header{}, err
+ }
+ _, err = reader.Read(FilesystemSize)
+ if err != nil {
+ return Header{}, err
+ }
+ _, err = reader.Read(FilesystemUnit)
+ if err != nil {
+ return Header{}, err
+ }
+ _, err = reader.Read(TableSize)
+ if err != nil {
+ return Header{}, err
+ }
+ _, err = reader.Read(TableUnit)
+ if err != nil {
+ return Header{}, err
+ }
+
+ header.Signature = fmt.Sprintf("untrusted comment: fsverify\n%s\ntrusted comment: fsverify\n%s\n", string(UntrustedHash), string(TrustedHash))
+ header.FilesystemSize = int(binary.BigEndian.Uint32(FilesystemSize))
+ header.TableSize = int(binary.BigEndian.Uint32(TableSize))
+ header.FilesystemUnit = parseUnitSpec(FilesystemUnit)
+ header.TableUnit = parseUnitSpec(TableUnit)
+ if header.FilesystemUnit == -1 || header.TableUnit == -1 {
+ return Header{}, fmt.Errorf("unit size for Filesystem or Table invalid: fs: %x, table: %x", FilesystemUnit, TableUnit)
+ }
+ return header, nil
+}
+
+// ReadDB reads the database from a fsverify partition.
+// It verifies the the size of the database with the size specified in the partition header and returns an error if the sizes do not match.
+// Due to limitations with bbolt the database gets written to a temporary path and the function returns the path to the database.
+func ReadDB(partition string) (string, error) {
+ _, exist := os.Stat(partition)
+ if os.IsNotExist(exist) {
+ return "", fmt.Errorf("Cannot find partition %s", partition)
+ }
+ part, err := os.Open(partition)
+ if err != nil {
+ return "", err
+ }
+ defer part.Close()
+ reader := bufio.NewReader(part)
+
+ // The area taken up by the header
+ // it is useless for this reader instance
+ // and will be skipped completely
+ _, err = reader.Read(make([]byte, 200))
+ if err != nil {
+ fmt.Println(err)
+ return "", err
+ }
+
+ header, err := ReadHeader(partition)
+ if err != nil {
+ fmt.Println(err)
+ return "", err
+ }
+
+ // Reading the specified table size allows for tamper protection
+ // in the case that the partition was tampered with "lazily"
+ // meaning that only the database was modified, and not the header
+ // if that is the case, the database would be lacking data, making it unusable
+ db := make([]byte, header.TableSize*header.TableUnit)
+ n, err := io.ReadFull(reader, db)
+ if err != nil {
+ return "", err
+ }
+ if n != header.TableSize*header.TableUnit {
+ return "", fmt.Errorf("Database is not expected size. Expected %d, got %d", header.TableSize*header.TableUnit, n)
+ }
+ fmt.Printf("db: %d\n", n)
+
+ // Write the database to a temporary directory
+ // to ensure that it disappears after the next reboot
+ temp, err := os.MkdirTemp("", "*-fsverify")
+ if err != nil {
+ return "", err
+ }
+
+ // The file permission is immediately set to 0700
+ // this ensures that the database is not modified
+ // after it has been written
+ err = os.WriteFile(temp+"/verify.db", db, 0700)
+ if err != nil {
+ return "", err
+ }
+
+ return temp + "/verify.db", nil
+}
+
+// OpenDB opens a bbolt database and returns a bbolt instance.
+func OpenDB(dbpath string, readonly bool) (*bolt.DB, error) {
+ _, exist := os.Stat(dbpath)
+ if os.IsNotExist(exist) {
+ os.Create(dbpath)
+ }
+ db, err := bolt.Open(dbpath, 0777, &bolt.Options{ReadOnly: readonly})
+ if err != nil {
+ return nil, err
+ }
+ return db, nil
+}
+
+// GetNode retrieves a Node from the database based on the hash identifier.
+// If db is set to nil, the function will open the database in read-only mode itself.
+func GetNode(checksum string, db *bolt.DB) (Node, error) {
+ var err error
+ var deferDB bool
+ if db == nil {
+ db, err = OpenDB("my.db", true)
+ if err != nil {
+ return Node{}, err
+ }
+ deferDB = true
+ }
+ var node Node
+ err = db.View(func(tx *bolt.Tx) error {
+ nodes := tx.Bucket([]byte("Nodes"))
+ app := nodes.Get([]byte(checksum))
+ err := json.Unmarshal(app, &node)
+ return err
+ })
+ if deferDB {
+ defer db.Close()
+ }
+ return node, err
+}
+
+// CopyByteArea copies an area of bytes from a reader.
+// It verifies that the reader reads the wanted amount of bytes, and returns an error if this is not the case.
+func CopyByteArea(start int, end int, reader *bytes.Reader) ([]byte, error) {
+ if end-start < 0 {
+ return []byte{}, fmt.Errorf("tried creating byte slice with negative length. %d to %d total %d\n", start, end, end-start)
+ } else if end-start > 2000 {
+ return []byte{}, fmt.Errorf("tried creating byte slice with length over 2000. %d to %d total %d\n", start, end, end-start)
+ }
+ bytes := make([]byte, end-start)
+ n, err := reader.ReadAt(bytes, int64(start))
+ if err != nil {
+ return nil, err
+ } else if n != end-start {
+ return nil, fmt.Errorf("Unable to read requested size. Expected %d, got %d", end-start, n)
+ }
+ return bytes, nil
+}
diff --git a/verify/core/verification.go b/verify/core/verification.go
new file mode 100644
index 0000000..289ce1e
--- /dev/null
+++ b/verify/core/verification.go
@@ -0,0 +1,165 @@
+package core
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "os"
+ "strings"
+
+ "aead.dev/minisign"
+ "github.com/axtloss/fsverify/config"
+ "github.com/tarm/serial"
+)
+
+// fileReadKey reads the public minisign key from a file specified in config.KeyLocation.
+func fileReadKey() (string, error) {
+ if _, err := os.Stat(config.KeyLocation); os.IsNotExist(err) {
+ return "", fmt.Errorf("Key location %s does not exist", config.KeyLocation)
+ }
+ file, err := os.Open(config.KeyLocation)
+ if err != nil {
+ return "", err
+ }
+ defer file.Close()
+ // A public key is never longer than 56 bytes
+ key := make([]byte, 56)
+ reader := bufio.NewReader(file)
+ n, err := reader.Read(key)
+ if n != 56 {
+ return "", fmt.Errorf("Key does not match expected key size. Expected 56, got %d", n)
+ }
+ if err != nil {
+ return "", err
+ }
+ return string(key), nil
+}
+
+// serialReadKey reads the public minisign key from a usb tty specified in config.KeyLocation.
+func serialReadKey() (string, error) {
+ // Since the usb serial is tested with an arduino
+ // it is assumed that the tty device does not always exist
+ // and can be manually plugged in by the user
+ if _, err := os.Stat(config.KeyLocation); !os.IsNotExist(err) {
+ fmt.Println("Reconnect arduino now")
+ for true {
+ if _, err := os.Stat(config.KeyLocation); os.IsNotExist(err) {
+ break
+ }
+ }
+ } else {
+ fmt.Println("Connect arduino now")
+ }
+ for true {
+ if _, err := os.Stat(config.KeyLocation); !os.IsNotExist(err) {
+ break
+ }
+ }
+ fmt.Println("Arduino connected")
+ c := &serial.Config{Name: config.KeyLocation, Baud: 9600}
+ s, err := serial.OpenPort(c)
+ if err != nil {
+ return "", err
+ }
+
+ key := ""
+ for true {
+ buf := make([]byte, 128)
+ n, err := s.Read(buf)
+ if err != nil {
+ return "", err
+ }
+ defer s.Close()
+ key = key + fmt.Sprintf("%q", buf[:n])
+ // ensure that two tab sequences are read
+ // meaning that the entire key has been captured
+ // since the key is surrounded by a tab sequence
+ if strings.Count(key, "\\t") == 2 {
+ break
+ }
+ }
+ key = strings.ReplaceAll(key, "\\t", "")
+ key = strings.ReplaceAll(key, "\"", "")
+ if len(key) != 56 {
+ return "", fmt.Errorf("Key does not match expected key size. Expected 56, got %d", len(key))
+ }
+ return key, nil
+}
+
+// ReadKey is a wrapper function to call the proper readKey function according to config.KeyStore.
+func ReadKey() (string, error) {
+ switch config.KeyStore {
+ case 0:
+ return fileReadKey()
+ case 1:
+ return fileReadKey()
+ case 2:
+ return "", nil // TPM
+ case 3:
+ return serialReadKey()
+ }
+ return "", nil
+}
+
+// ReadBlock reads a data area of a bytes.Reader specified in the given node.
+// It additionally verifies that the amount of bytes read equal the wanted amount and returns an error if this is not the case.
+func ReadBlock(node Node, part *bytes.Reader, totalReadBlocks int) ([]byte, int, error) {
+ if node.BlockEnd-node.BlockStart < 0 {
+ return []byte{}, -1, fmt.Errorf("tried creating byte slice with negative length. %d to %d total %d\n", node.BlockStart, node.BlockEnd, node.BlockEnd-node.BlockStart)
+ } else if node.BlockEnd-node.BlockStart > 2000 {
+ return []byte{}, -1, fmt.Errorf("tried creating byte slice with length over 2000. %d to %d total %d\n", node.BlockStart, node.BlockEnd, node.BlockEnd-node.BlockStart)
+ }
+ block := make([]byte, node.BlockEnd-node.BlockStart)
+ blockSize := node.BlockEnd - node.BlockStart
+ _, err := part.Seek(int64(node.BlockStart), 0)
+ if err != nil {
+ return []byte{}, -1, err
+ }
+ n, err := part.Read(block)
+ if err != nil {
+ return block, -1, err
+ } else if n != blockSize {
+ return block, -1, fmt.Errorf("Did not read correct amount of bytes. Expected: %d, Got: %d", blockSize, n)
+ }
+ return block, totalReadBlocks + 1, err
+}
+
+// VerifySignature verifies the database using a given signature and public key.
+func VerifySignature(key string, signature string, database string) (bool, error) {
+ var pk minisign.PublicKey
+ if err := pk.UnmarshalText([]byte(key)); err != nil {
+ return false, err
+ }
+
+ data, err := os.ReadFile(database)
+ if err != nil {
+ return false, err
+ }
+
+ return minisign.Verify(pk, data, []byte(signature)), nil
+}
+
+// VerifyBlock verifies a byte slice with the hash in a given Node.
+func VerifyBlock(block []byte, node Node) error {
+ calculatedBlockHash, err := CalculateBlockHash(block)
+ if err != nil {
+ return err
+ }
+ wantedBlockHash := node.BlockSum
+ if strings.Compare(calculatedBlockHash, strings.TrimSpace(wantedBlockHash)) == 0 {
+ return nil
+ }
+ return fmt.Errorf("Node %s ranging from %d to %d does not match block. Expected %s, got %s.", node.PrevNodeSum, node.BlockStart, node.BlockEnd, wantedBlockHash, calculatedBlockHash)
+}
+
+// VerifyNode verifies that the current Node is valid by matching the checksum of it with the PrevNodeSum field of the next node.
+func VerifyNode(node Node, nextNode Node) error {
+ nodeHash, err := calculateStringHash(fmt.Sprintf("%d%d%s%s", node.BlockStart, node.BlockEnd, node.BlockSum, node.PrevNodeSum))
+ if err != nil {
+ return err
+ }
+ if strings.Compare(nodeHash, nextNode.PrevNodeSum) != 0 {
+ return fmt.Errorf("Node %s is not valid!", node.PrevNodeSum)
+ }
+ return nil
+}