Avoid reinventing the wheel, properly using TARs

This commit is contained in:
maride 2021-04-22 21:46:01 +02:00
parent f499b0563d
commit 0a4217b5b9
3 changed files with 99 additions and 144 deletions

View File

@ -4,23 +4,39 @@ import (
"archive/tar" "archive/tar"
"bytes" "bytes"
"compress/flate" "compress/flate"
"encoding/base64"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"log" "log"
"os" "os"
"strings"
) )
// Packs a whole fuzzer directory - at least queue/, fuzz_bitmap, fuzzer_stats // PackFuzzers packs all targeted fuzzers into a TAR - at least queue/, fuzz_bitmap, fuzzer_stats
func PackFuzzer(fuzzerName string, directory string) ([]byte, error) { func PackFuzzers(fuzzers []string, fuzzerDirectory string) ([]byte, error) {
// Gather contents // Create TAR archive
contentArray := [][]byte{ var tarBuffer bytes.Buffer
[]byte(fuzzerName), tarWriter := tar.NewWriter(&tarBuffer)
packSingleFile(directory, "fuzz_bitmap"),
packSingleFile(directory, "fuzzer_stats"), // Essentially we want to pack three things from each targeted fuzzer:
packQueueFiles(directory), // - the fuzz_bitmap file
// - the fuzzer_stats file
// - the is_main_fuzzer file if present
// - the queue/ directory
for _, fuzzer := range fuzzers {
// We need full paths to read, but will write relative paths into the TAR archive
absFuzzerPath := fuzzerDirectory
relFuzzerPath := strings.TrimPrefix(fuzzer, fuzzerDirectory)
// Read-n-Pack™
packSingleFile(tarWriter, absFuzzerPath, relFuzzerPath, "fuzz_bitmap", false)
packSingleFile(tarWriter, absFuzzerPath, relFuzzerPath, "fuzzer_stats", false)
packSingleFile(tarWriter, absFuzzerPath, relFuzzerPath, "is_main_fuzzer", true)
packQueueFiles(tarWriter, absFuzzerPath, relFuzzerPath)
} }
// Close TAR archive
tarWriter.Close()
// Prepare FLATE compression // Prepare FLATE compression
var flateBuffer bytes.Buffer var flateBuffer bytes.Buffer
flateWrite, flateErr := flate.NewWriter(&flateBuffer, flate.BestCompression) flateWrite, flateErr := flate.NewWriter(&flateBuffer, flate.BestCompression)
@ -28,52 +44,50 @@ func PackFuzzer(fuzzerName string, directory string) ([]byte, error) {
return nil, fmt.Errorf("unable to prepare flate compressor: %s", flateErr) return nil, fmt.Errorf("unable to prepare flate compressor: %s", flateErr)
} }
// Convert all parts to base64, and concat them to the packet // Apply FLATE compression
firstRun := true flateWrite.Write(tarBuffer.Bytes())
for _, a := range contentArray {
b64Buf := make([]byte, base64.StdEncoding.EncodedLen(len(a)))
base64.StdEncoding.Encode(b64Buf, a)
// Add newline char as separator, avoiding it on the first run
if firstRun {
firstRun = false
} else {
flateWrite.Write([]byte("\n"))
}
// Append base64 encoded content
flateWrite.Write(b64Buf)
}
flateWrite.Close() flateWrite.Close()
// Return result: a big byte array, representing concatted base64-encoded files // Return result: a DEFLATEd TAR archive
return flateBuffer.Bytes(), nil return flateBuffer.Bytes(), nil
} }
// Reads a single file and returns it // packSingleFile packs a single file and writes it to the archive
func packSingleFile(directory string, fileName string) []byte { // fuzzerDirectory is the base directory, e.g. /project/fuzzers/
path := fmt.Sprintf("%s%c%s", directory, os.PathSeparator, fileName) // fuzzer is the name of the fuzzer itself, e.g. main-fuzzer-01
contents, readErr := ioutil.ReadFile(path) // filename is the name of the file you want to pack, e.g. fuzzer_stats
// ignoreNotFound is just used for files which may not be present in all fuzzer directories, like is_main_fuzzer
func packSingleFile(tarWriter *tar.Writer, absPath string, relPath string, fileName string, ignoreNotFound bool) {
// Read file
readPath := fmt.Sprintf("%s%c%s%c%s", absPath, os.PathSeparator, relPath, os.PathSeparator, fileName)
contents, readErr := ioutil.ReadFile(readPath)
if readErr != nil { if readErr != nil {
log.Printf("Failed to read file %s: %s", path, readErr) if !ignoreNotFound {
return nil log.Printf("Failed to read file %s: %s", readPath, readErr)
}
return
} }
return contents // Create header for this file
header := &tar.Header{
Name: fmt.Sprintf("%s%c%s", relPath, os.PathSeparator, fileName),
Mode: 0600,
Size: int64(len(contents)),
}
// Add header and contents to archive
tarWriter.WriteHeader(header)
tarWriter.Write(contents)
} }
// Packs the files in the given directory into a tar archive // Packs the files in the given directory into a tar archive
func packQueueFiles(directory string) []byte { func packQueueFiles(tarWriter *tar.Writer, absPath string, relPath string) {
var tarBuffer bytes.Buffer
tarWriter := tar.NewWriter(&tarBuffer)
// Get list of queue files // Get list of queue files
queuePath := fmt.Sprintf("%s%cqueue", directory, os.PathSeparator) queuePath := fmt.Sprintf("%s%c%s%cqueue", absPath, os.PathSeparator, relPath, os.PathSeparator)
filesInDir, readErr := ioutil.ReadDir(queuePath) filesInDir, readErr := ioutil.ReadDir(queuePath)
if readErr != nil { if readErr != nil {
log.Printf("Failed to list directory content of %s: %s", directory, readErr) log.Printf("Failed to list directory content of %s: %s", queuePath, readErr)
return nil return
} }
// Walk over each file and add it to our archive // Walk over each file and add it to our archive
@ -84,29 +98,7 @@ func packQueueFiles(directory string) []byte {
continue continue
} }
// Create header for this file // Pack into the archive
header := &tar.Header{ packSingleFile(tarWriter, absPath, relPath, fmt.Sprintf("queue%c%s", os.PathSeparator, f.Name()), false)
Name: f.Name(),
Mode: 0600,
Size: f.Size(),
}
// Read file
path := fmt.Sprintf("%s%c%s", queuePath, os.PathSeparator, f.Name())
contents, readErr := ioutil.ReadFile(path)
if readErr != nil {
log.Printf("Failed to read file %s: %s", path, readErr)
continue
}
// Add header and contents to archive
tarWriter.WriteHeader(header)
tarWriter.Write(contents)
} }
// Close constructed tar archive
tarWriter.Close()
// And return it
return tarBuffer.Bytes()
} }

View File

@ -4,16 +4,15 @@ import (
"archive/tar" "archive/tar"
"bytes" "bytes"
"compress/flate" "compress/flate"
"encoding/base64"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"log" "log"
"os" "os"
"strings" "path"
) )
// Unpacks a raw string, creates files and stores them in the target directory. May return an error if one occurrs // UnpackInto decrompesses the given bytes with DEFLATE, then unpacks the result as TAR archive into the targetDir
func UnpackInto(raw []byte, targetDir string) error { func UnpackInto(raw []byte, targetDir string) error {
// Prepare FLATE decompressor // Prepare FLATE decompressor
var flateBuffer bytes.Buffer var flateBuffer bytes.Buffer
@ -23,74 +22,11 @@ func UnpackInto(raw []byte, targetDir string) error {
flateBuffer.Write(raw) flateBuffer.Write(raw)
raw, _ = ioutil.ReadAll(flateReader) raw, _ = ioutil.ReadAll(flateReader)
// Process raw bytes
splitted := bytes.Split(raw, []byte("\n"))
if len(splitted) != 4 {
// We are currently packing four things in there (the fuzzer name, queue/, fuzz_bitmap, fuzzer_stats)
// So if we don't get three parts, we have a malformed packet
return fmt.Errorf("unable to unpack packet: Expected 4 parts, got %d", len(splitted))
}
// base64 decode contents
for i, s := range splitted {
b64Buf := make([]byte, base64.StdEncoding.DecodedLen(len(s)))
base64.StdEncoding.Decode(b64Buf, s)
splitted[i] = b64Buf
}
// Check filename, and process it
fuzzerName := string(bytes.TrimRight(splitted[0], "\x00"))
if strings.Contains(fuzzerName, "/") {
return fmt.Errorf("received file name with a slash, discarding whole packet for fuzzer \"%s\"", fuzzerName)
}
// Check if our target directory (this very fuzzers directory) already exists, or if we need to create it
targetDir = fmt.Sprintf("%s%c%s", targetDir, os.PathSeparator, fuzzerName)
_, folderErr := os.Stat(targetDir)
if os.IsNotExist(folderErr) {
// directory doesn't yet exist, create it
mkdirErr := os.MkdirAll(targetDir, 0700)
if mkdirErr != nil {
// Creating the target directory failed, so we won't proceed unpacking into a non-existent directory
return fmt.Errorf("unable to unpack packet: could not create directory at %s: %s", targetDir, mkdirErr)
}
}
// Process every single part
unpackSingleFile(splitted[1], targetDir, "fuzz_bitmap")
unpackSingleFile(splitted[2], targetDir, "fuzzer_stats")
unpackQueueDir(splitted[3], targetDir)
return nil
}
// Writes the contents to the target
func unpackSingleFile(raw []byte, targetDirectory string, filename string) {
path := fmt.Sprintf("%s%c%s", targetDirectory, os.PathSeparator, filename)
// Check if the file already exists - we won't overwrite it then
_, fileInfoErr := os.Stat(path)
if os.IsExist(fileInfoErr) {
// File already exists, we don't need to write a thing
return
}
writeErr := ioutil.WriteFile(path, raw, 0644)
if writeErr != nil {
log.Printf("Unable to write to file %s: %s", path, writeErr)
}
}
// Writes all files in the raw byte array into the target directory
func unpackQueueDir(raw []byte, targetDir string) {
// Open TAR archive // Open TAR archive
var tarBuffer bytes.Buffer var tarBuffer bytes.Buffer
tarBuffer.Write(raw) tarBuffer.Write(raw)
tarReader := tar.NewReader(&tarBuffer) tarReader := tar.NewReader(&tarBuffer)
// Set correct path for files
targetDir = fmt.Sprintf("%s%cqueue", targetDir, os.PathSeparator)
// Create queue directory if it doesn't exist yet // Create queue directory if it doesn't exist yet
_, folderErr := os.Stat(targetDir) _, folderErr := os.Stat(targetDir)
if os.IsNotExist(folderErr) { if os.IsNotExist(folderErr) {
@ -115,4 +51,36 @@ func unpackQueueDir(raw []byte, targetDir string) {
io.Copy(&fileBuffer, tarReader) io.Copy(&fileBuffer, tarReader)
unpackSingleFile(fileBuffer.Bytes(), targetDir, header.Name) unpackSingleFile(fileBuffer.Bytes(), targetDir, header.Name)
} }
return nil
}
// Writes the contents to the target
func unpackSingleFile(raw []byte, targetDirectory string, filename string) {
destPath := fmt.Sprintf("%s%c%s", targetDirectory, os.PathSeparator, filename)
// Check if the file already exists - we won't overwrite it then
_, fileInfoErr := os.Stat(destPath)
if os.IsExist(fileInfoErr) {
// File already exists, we don't need to write a thing
return
}
// Check if the target directory already exists - otherwise we create it
dirOfFile := path.Dir(fmt.Sprintf("%s%c%s", targetDirectory, os.PathSeparator, filename))
_, dirInfoErr := os.Stat(dirOfFile)
if os.IsNotExist(dirInfoErr) {
// Create directories as required
mkdirErr := os.MkdirAll(dirOfFile, 0755)
if mkdirErr != nil {
log.Printf("Failed to create directory %s: %s", dirOfFile, mkdirErr)
return
}
}
// Write file
writeErr := ioutil.WriteFile(destPath, raw, 0644)
if writeErr != nil {
log.Printf("Unable to write to file %s: %s", destPath, writeErr)
}
} }

View File

@ -8,7 +8,6 @@ import (
"io/ioutil" "io/ioutil"
"log" "log"
"os" "os"
"path/filepath"
"time" "time"
) )
@ -27,20 +26,16 @@ func RegisterWatchdogFlags() {
func WatchFuzzers(outputDirectory string) { func WatchFuzzers(outputDirectory string) {
// Loop forever // Loop forever
for { for {
// Loop over those fuzzer directories we want to share // Pack important parts of the fuzzers into an archive
for _, localFuzzDir := range getTargetFuzzers(outputDirectory) { packedFuzzers, packerErr := logistic.PackFuzzers(getTargetFuzzers(outputDirectory), outputDirectory)
// Pack important parts of the fuzzer directory into a byte array if packerErr != nil {
fuzzerName := filepath.Base(localFuzzDir) log.Printf("Failed to pack fuzzer: %s", packerErr)
packedFuzzer, packerErr := logistic.PackFuzzer(fuzzerName, localFuzzDir) continue
if packerErr != nil {
log.Printf("Failed to pack fuzzer: %s", packerErr)
continue
}
// and send it to our peers
net.SendToPeers(packedFuzzer)
} }
// and send it to our peers
net.SendToPeers(packedFuzzers)
// Sleep a bit // Sleep a bit
time.Sleep(time.Duration(rescanSecs) * time.Second) time.Sleep(time.Duration(rescanSecs) * time.Second)
} }