Init commit

This commit is contained in:
maride 2020-06-19 15:31:20 +02:00
commit ed6baf7753
9 changed files with 521 additions and 0 deletions

3
go.mod Normal file
View File

@ -0,0 +1,3 @@
module github.com/maride/afl-transmit
go 1.13

98
logistic/packer.go Normal file
View File

@ -0,0 +1,98 @@
package logistic
import (
"archive/tar"
"bytes"
"encoding/base64"
"fmt"
"io/ioutil"
"log"
"os"
)
// Packs a whole fuzzer directory - at least queue/, fuzz_bitmap, fuzzer_stats
func PackFuzzer(fuzzerName string, directory string) []byte {
// Gather contents
contentArray := [][]byte{
[]byte(fuzzerName),
packSingleFile(directory, "fuzz_bitmap"),
packSingleFile(directory, "fuzzer_stats"),
packQueueFiles(directory),
}
// Convert all parts to base64, and concat them to the packet
var result []byte
for _, a := range contentArray {
b64Buf := make([]byte, base64.StdEncoding.EncodedLen(len(a)))
base64.StdEncoding.Encode(b64Buf, a)
// Add newline char as separator
result = append(result, '\n')
// Append base64 encoded content
result = append(result, b64Buf...)
}
// Return result: a big byte array, representing concatted base64-encoded files
return result
}
// Reads a single file and returns it
func packSingleFile(directory string, fileName string) []byte {
path := fmt.Sprintf("%s%c%s", directory, os.PathSeparator, fileName)
contents, readErr := ioutil.ReadFile(path)
if readErr != nil {
log.Printf("Failed to read file %s: %s", path, readErr)
return nil
}
return contents
}
// Packs the files in the given directory into a tar archive
func packQueueFiles(directory string) []byte {
var tarBuffer bytes.Buffer
tarWriter := tar.NewWriter(&tarBuffer)
// Get list of queue files
queuePath := fmt.Sprintf("%s%cqueue", directory, os.PathSeparator)
filesInDir, readErr := ioutil.ReadDir(queuePath)
if readErr != nil {
log.Printf("Failed to list directory content of %s: %s", directory, readErr)
return nil
}
// Walk over each file and add it to our archive
for _, f := range filesInDir {
// Check if we hit a directory (e.g. '.state')
if f.IsDir() {
// Ignore directories altogether
continue
}
// Create header for this file
header := &tar.Header{
Name: f.Name(),
Mode: 0600,
Size: f.Size(),
}
// Read file
path := fmt.Sprintf("%s%c%s", queuePath, os.PathSeparator, f.Name())
contents, readErr := ioutil.ReadFile(path)
if readErr != nil {
log.Printf("Failed to read file %s: %s", path, readErr)
continue
}
// Add header and contents to archive
tarWriter.WriteHeader(header)
tarWriter.Write(contents)
}
// Close constructed tar archive
tarWriter.Close()
// And return it
return tarBuffer.Bytes()
}

98
logistic/unpacker.go Normal file
View File

@ -0,0 +1,98 @@
package logistic
import (
"archive/tar"
"bytes"
"encoding/base64"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"strings"
)
// Unpacks a raw string, creates files and stores them in the target directory. May return an error if one occurrs
func UnpackInto(raw []byte, targetDir string) error {
// Clean raw bytes: trim possibly leading and/or trailing newlines
raw = bytes.Trim(raw, "\n")
// Process raw bytes
splitted := bytes.Split(raw, []byte("\n"))
if len(splitted) != 4 {
// We are currently packing four things in there (the fuzzer name, queue/, fuzz_bitmap, fuzzer_stats)
// So if we don't get three parts, we have a malformed packet
return fmt.Errorf("unable to unpack packet: Expected 4 parts, got %d", len(splitted))
}
// base64 decode contents
for i, s := range splitted {
b64Buf := make([]byte, base64.StdEncoding.DecodedLen(len(s)))
base64.StdEncoding.Decode(b64Buf, s)
splitted[i] = b64Buf
}
// Check filename, and process it
fuzzerName := string(bytes.TrimRight(splitted[0], "\x00"))
if strings.Contains(fuzzerName, "/") {
return fmt.Errorf("received file name with a slash, discarding whole packet for fuzzer \"%s\"", fuzzerName)
}
targetDir = fmt.Sprintf("%s%c%s", targetDir, os.PathSeparator, fuzzerName)
// Remove old target directory, and create a new one
mkdirErr := os.MkdirAll(targetDir, 0700)
if mkdirErr != nil {
// Creating the target directory failed, so we won't proceed unpacking into a non-existent directory
return fmt.Errorf("unable to unpack packet: could not create directory at %s: %s", targetDir, mkdirErr)
}
// Process every single part
unpackSingleFile(splitted[1], targetDir, "fuzz_bitmap")
unpackSingleFile(splitted[2], targetDir, "fuzzer_stats")
unpackQueueDir(splitted[3], targetDir)
return nil
}
// Writes the contents to the target
func unpackSingleFile(raw []byte, targetDirectory string, filename string) {
path := fmt.Sprintf("%s%c%s", targetDirectory, os.PathSeparator, filename)
writeErr := ioutil.WriteFile(path, raw, 0644)
if writeErr != nil {
log.Printf("Unable to write to file %s: %s", path, writeErr)
}
}
// Writes all files in the raw byte array into the target directory
func unpackQueueDir(raw []byte, targetDir string) {
// Open TAR archive
var tarBuffer bytes.Buffer
tarBuffer.Write(raw)
tarReader := tar.NewReader(&tarBuffer)
// Set correct path for files
targetDir = fmt.Sprintf("%s%cqueue", targetDir, os.PathSeparator)
// Remove queue directory and re-fill it
os.RemoveAll(targetDir)
os.Mkdir(targetDir, 0755)
// Iterate over all files in the archive
for {
// Read header
header, headerErr := tarReader.Next()
if headerErr == io.EOF {
// We reached the end of the TAR archive. Fine.
break
} else if headerErr != nil {
// Unknown error occurred
log.Printf("Error parsing TAR header entry: %s", headerErr)
break
}
// Write file
var fileBuffer bytes.Buffer
io.Copy(&fileBuffer, tarReader)
unpackSingleFile(fileBuffer.Bytes(), targetDir, header.Name)
}
}

45
main.go Normal file
View File

@ -0,0 +1,45 @@
package main
import (
"flag"
"fmt"
"github.com/maride/afl-transmit/net"
"github.com/maride/afl-transmit/watchdog"
"log"
)
var (
outputDirectory string
)
func main() {
// Register flags
watchdog.RegisterWatchdogFlags()
net.RegisterSenderFlags()
net.RegisterListenFlags()
RegisterGlobalFlags()
flag.Parse()
// Check if we have the only required argument present - outputDirectory
if outputDirectory == "" {
fmt.Println("Please specify fuzzer-directory. See help (--help) for details.")
return
}
// Read peers file
net.ReadPeers()
// Start watchdog for local afl instances
go watchdog.WatchFuzzers(outputDirectory)
// Listen for incoming connections
listenErr := net.Listen(outputDirectory)
if listenErr != nil {
log.Println(listenErr)
}
}
// Registers flags which are required by multiple modules and need to be handled here
func RegisterGlobalFlags() {
flag.StringVar(&outputDirectory, "fuzzer-directory", "", "The output directory of the fuzzer(s)")
}

3
net/const.go Normal file
View File

@ -0,0 +1,3 @@
package net
const ServerPort = 1337

72
net/listener.go Normal file
View File

@ -0,0 +1,72 @@
package net
import (
"bufio"
"flag"
"fmt"
"github.com/maride/afl-transmit/logistic"
"io"
"log"
"net"
"strings"
)
var (
port int
)
// Registers the flags required for the listener
func RegisterListenFlags() {
flag.IntVar(&port, "port", ServerPort, "Port to bind server component to")
}
// Sets up a listener and listens forever for packets on the given port, storing their contents in the outputDirectory
func Listen(outputDirectory string) error {
// Create listener
addrStr := fmt.Sprintf(":%v", port)
listener, listenErr := net.Listen("tcp", addrStr)
if listenErr != nil {
return listenErr
}
// Prepare output directory path
outputDirectory = strings.TrimRight(outputDirectory, "/")
// Listen forever
for {
// Accept connection
conn, connErr := listener.Accept()
if connErr != nil {
log.Printf("Encountered error while accepting from %s: %s", conn.RemoteAddr().String(), connErr)
continue
}
// Handle in a separate thread
go handle(conn, outputDirectory)
}
}
// Handles a single connection, and unpacks the received data into outputDirectory
func handle(conn net.Conn, outputDirectory string) {
// Make sure to close connection on return
defer conn.Close()
// Loop until we either hit EOF or an error
for {
// Read raw content
cont, contErr := bufio.NewReader(conn).ReadString('\x00')
if contErr == io.EOF {
// We received the whole content, time to process it
unpackErr := logistic.UnpackInto([]byte(cont), outputDirectory)
if unpackErr != nil {
log.Printf("Encountered error processing packet from %s: %s", conn.RemoteAddr().String(), unpackErr)
}
return
} else if contErr != nil {
// We encountered an error on that connection
log.Printf("Encountered error while reading from %s: %s", conn.RemoteAddr().String(), contErr)
return
}
}
}

30
net/peer.go Normal file
View File

@ -0,0 +1,30 @@
package net
import (
"log"
"net"
)
type Peer struct {
Address string
}
// Sends the given content to the peer
func (p *Peer) SendToPeer(content []byte) {
// Build up a connection
tcpConn, dialErr := net.Dial("tcp", p.Address)
if dialErr != nil {
log.Printf("Unable to connect to peer %s: %s", p.Address, dialErr)
return
}
// Send
_, writeErr := tcpConn.Write(content)
if writeErr != nil {
log.Printf("Unable to write to peer %s: %s", tcpConn.RemoteAddr().String(), writeErr)
return
}
// Close connection
tcpConn.Close()
}

96
net/sender.go Normal file
View File

@ -0,0 +1,96 @@
package net
import (
"flag"
"io/ioutil"
"log"
"strings"
)
var (
peers []Peer
peerFile string
peerString string
)
// Registers flags required for peer parsing
func RegisterSenderFlags() {
flag.StringVar(&peerFile, "peersFile", "", "File which contains the addresses for all peers, one per line")
flag.StringVar(&peerString, "peers", "", "Addresses to peers, comma-separated.")
}
// Send the given content to all peers
func SendToPeers(content []byte) {
for _, p := range peers {
p.SendToPeer(content)
}
}
// Parses both peerString and peerFile, and adds all the peers to an internal array.
func ReadPeers() {
// Read peer file if it is given
if peerFile != "" {
fileErr := readPeersFile(peerFile)
// Check if we encountered errors
if fileErr != nil {
log.Printf("Failed to read peer file: %s", fileErr)
}
}
// Read peer string if it is given
if peerString != "" {
readPeersString(peerString)
}
// Remove doubles.
removeDoubledPeers()
log.Printf("Configured %d unique peers.", len(peers))
}
// Read a peer file at the given path, parses it and adds newly created Peers to the internal peers array
func readPeersFile(path string) error {
// Read file
readContBytes, readErr := ioutil.ReadFile(path)
if readErr != nil {
return readErr
}
// Convert to string
readCont := string(readContBytes)
// Iterate over it, line by line
for _, line := range strings.Split(readCont, "\n") {
// Append newly created peer to array
peers = append(peers, Peer{
Address: line,
})
}
return nil
}
// Read peers from the given string, parses it and adds newly created Peers to the internal peers array
func readPeersString(raw string) {
for _, peer := range strings.Split(raw, ",") {
// Append newly created peer to array
peers = append(peers, Peer{
Address: strings.TrimSpace(peer),
})
}
}
// Iterates over the peers array and removes doubles
func removeDoubledPeers() {
// Outer loop - go over all peers
for i := 0; i < len(peers); i++ {
// Inner loop - go over peers after the current (i) one, removing those with the same address
for j := i + 1; j < len(peers); j++ {
if peers[j].Address == peers[i].Address {
// Double found, remove j'th element
peers = append(peers[:j], peers[j+1:]...)
}
}
}
}

76
watchdog/watchdog.go Normal file
View File

@ -0,0 +1,76 @@
package watchdog
import (
"flag"
"fmt"
"github.com/maride/afl-transmit/logistic"
"github.com/maride/afl-transmit/net"
"io/ioutil"
"log"
"os"
"path/filepath"
"time"
)
var (
rescanSecs int
)
// Register flags
func RegisterWatchdogFlags() {
flag.IntVar(&rescanSecs, "rescan-secs", 30, "Seconds to wait before rescanning local fuzzer directories")
}
// Watch over the specified directory, send updates to peers and re-scan after the specified amount of seconds
func WatchFuzzers(outputDirectory string) {
localFuzzers := detectLocalFuzzers(outputDirectory)
// Loop forever
for {
// Loop over local fuzzers
for _, localFuzzDir := range localFuzzers {
// Pack important parts of the fuzzer directory into a byte array
fuzzerName := filepath.Base(localFuzzDir)
packedFuzzer := logistic.PackFuzzer(fuzzerName, localFuzzDir)
// and send it to our peers
net.SendToPeers(packedFuzzer)
}
// Sleep a bit
time.Sleep(time.Duration(rescanSecs) * time.Second)
}
}
// Searches in the specified output directory for fuzzers which run locally. This is done by searching for a file which is not shared between fuzzers as it is not required for clusterized fuzzing: 'cmdline'.
func detectLocalFuzzers(outputDirectory string) []string {
var localFuzzers []string
// List files (read: fuzzers) in output directory
filesInDir, readErr := ioutil.ReadDir(outputDirectory)
if readErr != nil {
log.Printf("Failed to list directory content of %s: %s", outputDirectory, readErr)
return nil
}
// Walk over each and search for 'cmdline' file
for _, f := range filesInDir {
// Get stat for maybe-existent file
cmdlinePath := fmt.Sprintf("%s%c%s%ccmdline", outputDirectory, os.PathSeparator, f.Name(), os.PathSeparator)
_, statErr := os.Stat(cmdlinePath)
if os.IsNotExist(statErr) {
// File does not exist. That's fine. Next.
continue
} else if statErr != nil {
// An error occurred. File is maybe in a Schrödinger state.
log.Printf("Unable to stat file %s: %s", cmdlinePath, statErr)
continue
}
// File exists, let's watch it
fullPath := fmt.Sprintf("%s%c%s", outputDirectory, os.PathSeparator, f.Name())
localFuzzers = append(localFuzzers, fullPath)
}
return localFuzzers
}