Add output unit tests; add output.Process to strip debug; make MySQL debug fields omitempty too; use processor to strip data in Process()

This commit is contained in:
Justin Bastress 2018-04-03 17:15:20 -04:00
parent e4bd0bcc89
commit 7a013ca261
5 changed files with 305 additions and 698 deletions

View File

@ -14,6 +14,7 @@ all: zgrab2
# Test currently only runs on the modules folder because some of the # Test currently only runs on the modules folder because some of the
# third-party libraries in lib (e.g. http) are failing. # third-party libraries in lib (e.g. http) are failing.
test: test:
cd lib/output/test && go test -v ./...
cd modules && go test -v ./... cd modules && go test -v ./...
gofmt: gofmt:

View File

@ -3,17 +3,9 @@
package output package output
import ( import (
"fmt"
"reflect" "reflect"
"runtime"
"sort"
"strconv"
"strings" "strings"
"sync"
"sync/atomic"
"unicode"
"unicode/utf8"
"github.com/sirupsen/logrus"
) )
// ZGrabTag holds the information from the `zgrab` tag. Currently only supports // ZGrabTag holds the information from the `zgrab` tag. Currently only supports
@ -38,261 +30,178 @@ func parseZGrabTag(value string) *ZGrabTag {
return &ret return &ret
} }
// Check if the type is primitive, or eventually points to a primitive type. // ProcessCallback is called for each element in a struct; if it returns
func isPrimitiveType(what reflect.Type) bool { // a non-nil value, that value will be used and further processing on
return isPrimitiveKind(dereferenceType(what).Kind()) // that element will be skipped.
type ProcessCallback func(*Processor, reflect.Value) *reflect.Value
type pathEntry struct {
field string
value reflect.Value
} }
// Types that are considered to be non-primitive // Processor holds the state for a process run. A given processor should
var compoundKinds = map[reflect.Kind]bool{ // only be used on a single thread.
reflect.Struct: true, type Processor struct {
reflect.Slice: true, // Callback is a function that gets called on each element being
reflect.Array: true, // processed. If the callback returns a non-nil value, that value is
reflect.Map: true, // returned immediately instead of doing any further processing on
reflect.Interface: true, // the element.
} Callback ProcessCallback
// Get the eventual type for JSON-encoding purposes // Verbose determines whether `zgrab:"debug"` fields will be
func dereferenceType(what reflect.Type) reflect.Type { // included in the output.
for ; what.Kind() == reflect.Ptr; what = what.Elem() {
}
return what
}
// Check if the kind is primitive
func isPrimitiveKind(kind reflect.Kind) bool {
ret, ok := compoundKinds[kind]
return !(ret && ok)
}
// OutputProcessor holds the options and state for a processing run.
type OutputProcessor struct {
// Verbose indicates that debug fields should not be stripped out.
Verbose bool Verbose bool
depth int
mutex sync.Locker // Path is the current path being processed, from the root element.
// Used for debugging purposes only.
// If a panic occurs, the path will point to the element where the
// element that caused the problem.
Path []pathEntry
} }
// NewOutputProcessor gets a new OutputProcessor with the default settings. // NewProcessor returns a new Processor instance with the default settings.
func NewOutputProcessor() *OutputProcessor { func NewProcessor() *Processor {
return &OutputProcessor{ return &Processor{}
mutex: &sync.Mutex{}, }
Verbose: false,
// getPath returns a string representation of the current path.
func (processor *Processor) getPath() string {
ret := make([]string, len(processor.Path))
for i, v := range processor.Path {
ret[i] = v.field
} }
return strings.Join(ret, "->")
} }
// Process the input using the options in the given OutputProcessor. // callback invokes the callback (or the default, if none is present).
func (processor *OutputProcessor) Process(v interface{}) (interface{}, error) { // The callback can return an on-nil value to override the default behavior.
processor.mutex.Lock() func (processor *Processor) callback(v reflect.Value) *reflect.Value {
defer func() { callback := processor.Callback
if processor.depth != 0 { if callback == nil {
logrus.Warnf("process exited at nonzero depth %d", processor.depth) callback = NullProcessCallback
processor.depth = 0
}
processor.mutex.Unlock()
}()
ret, err := processor.process(v)
if err != nil {
return nil, err
} }
return ret.Interface(), nil return callback(processor, v)
} }
// Process the input using the default options (strip debug fields). // NullProcessCallback is the default ProcessCallback; it just returns nil.
func Process(v interface{}) (interface{}, error) { func NullProcessCallback(w *Processor, v reflect.Value) *reflect.Value {
return NewOutputProcessor().Process(v) return nil
} }
// Internal version to catch panics // duplicate a *primitive* value by doing a set-by-value (non-primitive values
func (processor *OutputProcessor) process(v interface{}) (ret reflect.Value, err error) { // should not use this).
defer func() { func (processor *Processor) duplicate(v reflect.Value) reflect.Value {
if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok {
panic(r)
}
if s, ok := r.(string); ok {
panic(s)
}
ret = reflect.ValueOf(nil)
err = r.(error)
}
}()
return processor.processValue(reflect.ValueOf(&v).Elem()), nil
}
// Handle an error
func (processor *OutputProcessor) error(err error) {
panic(err)
}
// Process the given value, returning the processed copy.
func (processor *OutputProcessor) processValue(v reflect.Value) reflect.Value {
return valueProcessor(v)(processor, v)
}
// processorFunc takes an OutputProcessor and a value, and returns a processed copy of the value.
type processorFunc func(s *OutputProcessor, v reflect.Value) reflect.Value
// processorCache maps reflect.Type to processorFunc, and caches the processors
// for the various types.
var processorCache sync.Map
// valueProcessor gets a processorFunc for the given actual value.
func valueProcessor(v reflect.Value) processorFunc {
if !v.IsValid() {
return dupeProcessor
}
return typeProcessor(v.Type())
}
// typeProcessor gets (potentially cached) a processorFunc for the given type.
func typeProcessor(t reflect.Type) processorFunc {
if fi, ok := processorCache.Load(t); ok {
return fi.(processorFunc)
}
// To deal with recursive types, populate the map with an
// indirect func before we build it. This type waits on the
// real func (f) to be ready and then calls it. This indirect
// func is only used for recursive types.
var (
wg sync.WaitGroup
f processorFunc
)
wg.Add(1)
fi, loaded := processorCache.LoadOrStore(t, processorFunc(func(processor *OutputProcessor, v reflect.Value) reflect.Value {
wg.Wait()
return f(processor, v)
}))
if loaded {
return fi.(processorFunc)
}
// Compute the real processor and replace the indirect func with it.
f = newTypeProcessor(t)
wg.Done()
processorCache.Store(t, f)
return f
}
// newTypeProcessor constructs a processorFunc for a type.
func newTypeProcessor(t reflect.Type) processorFunc {
switch t.Kind() {
case reflect.Interface:
return interfaceProcessor
case reflect.Struct:
return newStructProcessor(t)
case reflect.Map:
return newMapProcessor(t)
case reflect.Slice:
return newSliceProcessor(t)
case reflect.Array:
return newArrayProcessor(t)
case reflect.Ptr:
return newPtrProcessor(t)
default:
return dupeProcessor
}
}
// dupeProcessor is a processorFunc that returns a plain duplicate of the given
// (hopefully primitive) value.
func dupeProcessor(_ *OutputProcessor, v reflect.Value) reflect.Value {
ret := reflect.New(v.Type()).Elem() ret := reflect.New(v.Type()).Elem()
ret.Set(v) ret.Set(v)
return ret return ret
} }
// interfaceProcessor returns a processor for the value underlying the interface. // Add a path with the given key and value to the stack.
func interfaceProcessor(processor *OutputProcessor, v reflect.Value) reflect.Value { func (processor *Processor) pushPath(key string, value reflect.Value) {
if v.IsNil() { processor.Path = append(processor.Path, pathEntry{
return reflect.New(v.Type()).Elem() // nil field: key,
} value: value,
// FIXME: re-wrap in interface{}? })
ret := processor.processValue(v.Elem()) }
// Get the most recent path entry.
func (processor *Processor) topPath() *pathEntry {
return &processor.Path[len(processor.Path)-1]
}
// Remove the most recent entry from the stack (and return it).
func (processor *Processor) popPath() *pathEntry {
ret := processor.topPath()
processor.Path = processor.Path[0 : len(processor.Path)-1]
return ret return ret
} }
// structProcessor holds the state for processing a single struct type. // Helper to check if a value is nil. Non-nillable values are by definition
type structProcessor struct { // not nil (though they may be "zero").
// what is the type being processed. func isNil(v reflect.Value) bool {
what reflect.Type return (v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface || v.Kind() == reflect.Slice) && v.IsNil()
// fields contain the needed information to identify / locate / read / set
// the value of the field on an instance of the struct.
fields []field
// fieldEncs are the processorFuncs for the associated fields.
fieldEncs []processorFunc
} }
func setToNil(value reflect.Value) { // Check if a field should be copied over to the return value.
value.Set(reflect.Zero(value.Type())) // The only time a field should be wiped is if the field has the `zgrab:"debug"`
// tag set, and if the verbose flag is off.
// There is an additional caveat that, if the field is already nil, leave it
// (so that we don't set it to a non-nil "zero" value).
func (processor *Processor) shouldWipeField(parent reflect.Value, index int) bool {
tField := parent.Type().Field(index)
// Rather than zeroing out nil values, handle them at the outer level
if isNil(parent.Field(index)) {
//fmt.Printf("Bogus copy becase nil: %s (%#v) to zero\n", processor.getPath(), tField)
return false
}
tag := parseZGrabTag(tField.Tag.Get("zgrab"))
// The only time a field
return tag.Debug && !processor.Verbose
} }
// structProcessor.process processes each field in se.fields (unless omitted). // Process the struct instance.
func (se *structProcessor) process(processor *OutputProcessor, v reflect.Value) reflect.Value { func (processor *Processor) processStruct(v reflect.Value) reflect.Value {
t := v.Type()
ret := reflect.New(v.Type()).Elem() ret := reflect.New(v.Type()).Elem()
// Attempt a naive copy, to pick up any 'hidden' fields (debug fields will // Two possibilities:
// be zeroed out later). // (a) do ret.Set(v), then explicitly zero-out any debug fields.
// (b) only copy over fields that are non-debug.
// Going with (a)
ret.Set(v) ret.Set(v)
processor.depth++ for i := 0; i < v.NumField(); i++ {
defer func() { tField := t.Field(i)
processor.depth-- field := v.Field(i)
}() retField := ret.Field(i)
for i, f := range se.fields { if !retField.CanSet() {
fv := fieldByIndex(v, f.index) // skip non-exportable fields
if !fv.IsValid() {
// e.g. it's a field inside a null pointer
continue continue
} }
if processor.shouldWipeField(v, i) {
if f.zgrabTag.Debug && !processor.Verbose { retField.Set(reflect.Zero(field.Type()))
// overwrite the field with the zero value continue
rfv := writableFieldByIndex(ret, f.index)
if rfv.CanSet() {
setToNil(rfv)
} else {
logrus.Warnf("zgrab output process: Cannot nil over field %s (%v)", f.name, rfv)
}
} else {
// get output field
rfv := writableFieldByIndex(ret, f.index)
if rfv.CanSet() {
// set output field to processed value
rfv.Set(se.fieldEncs[i](processor, fv))
} else {
logrus.Warnf("zgrab output process: Cannot copy over field %s (%v)", f.name, rfv)
}
} }
processor.pushPath(fmt.Sprintf("%s(%d)", tField.Name, i), field)
copy := processor.process(field)
processor.popPath()
retField.Set(copy)
} }
return ret return ret
} }
// newStructProcessor constructs a processor for the struct. // Process a pointer (make a new pointer pointing to a new copy of v's referent).
func newStructProcessor(t reflect.Type) processorFunc { func (processor *Processor) processPtr(v reflect.Value) reflect.Value {
fields := cachedTypeFields(t) ret := reflect.New(v.Type().Elem()).Elem()
se := &structProcessor{ if v.IsNil() {
what: t, //fmt.Println("Goodbye to ", processor.getPath())
fields: fields, return ret.Addr()
fieldEncs: make([]processorFunc, len(fields)),
} }
for i, f := range fields { processor.pushPath("*", v.Elem())
se.fieldEncs[i] = typeProcessor(typeByIndex(t, f.index)) copy := processor.process(v.Elem())
} processor.popPath()
return se.process ret.Set(copy)
return ret.Addr()
} }
// mapProcessor holds the state for a specific type of map processor. // Process an interface instance (make a new interface and point it to a copy of
type mapProcessor struct { // v's referent).
elemEnc processorFunc func (processor *Processor) processInterface(v reflect.Value) reflect.Value {
ret := reflect.New(v.Type()).Elem()
if v.IsNil() {
return ret.Addr()
}
processor.pushPath("[interface:"+v.Type().Name()+")]", v.Elem())
copy := processor.process(v.Elem())
processor.popPath()
ret.Set(copy)
return ret
} }
// mapProcessor.process processes the given compound map type -- processes each // Process a map -- copy over all keys and (copies of) values into a new map.
// value and returns a copy of it. func (processor *Processor) processMap(v reflect.Value) reflect.Value {
func (me *mapProcessor) process(processor *OutputProcessor, v reflect.Value) reflect.Value {
if v.IsNil() { if v.IsNil() {
return reflect.New(v.Type()).Elem() // nil return reflect.New(v.Type()).Elem() // nil
} }
@ -302,457 +211,109 @@ func (me *mapProcessor) process(processor *OutputProcessor, v reflect.Value) ref
ret.Set(reflect.MakeMap(v.Type())) ret.Set(reflect.MakeMap(v.Type()))
keys := v.MapKeys() keys := v.MapKeys()
sv := make([]reflectWithString, len(keys))
for i, v := range keys {
sv[i].v = v
if err := sv[i].resolve(); err != nil {
processor.error(err)
}
}
for _, kv := range sv { for _, key := range keys {
ret.SetMapIndex(kv.v, me.elemEnc(processor, v.MapIndex(kv.v))) value := v.MapIndex(key)
processor.pushPath(fmt.Sprintf("[%v]", key), value)
copy := processor.process(value)
processor.popPath()
ret.SetMapIndex(key, copy)
} }
return ret return ret
} }
// newMapProcessor constructs a map processor for the given map type; primitive // Process an array (add copies of each element into a new array).
// types are just duplicated, while compound types get special handling. func (processor *Processor) processArray(v reflect.Value) reflect.Value {
func newMapProcessor(t reflect.Type) processorFunc {
if isPrimitiveType(t.Elem()) {
return dupeProcessor
}
me := &mapProcessor{typeProcessor(t.Elem())}
return me.process
}
// sliceProcessor just wraps an arrayProcessor, checking to make sure the value isn't nil.
type sliceProcessor struct {
arrayEnc processorFunc
}
// sliceProcessor.process just wraps the equivalent arrayProcessor.
func (se *sliceProcessor) process(processor *OutputProcessor, v reflect.Value) reflect.Value {
if v.IsNil() {
return reflect.New(v.Type()).Elem() // nil
}
ret := se.arrayEnc(processor, v)
return ret
}
// newSliceProcessor constructs a slice processorFunc -- for primitive types,
// just duplicates the slice, while compound types get special handling.
func newSliceProcessor(t reflect.Type) processorFunc {
if isPrimitiveType(t.Elem()) {
return dupeProcessor
}
enc := &sliceProcessor{newArrayProcessor(t)}
return enc.process
}
// arrayProcessor calls the elemEnc for each element of the array (or slice).
type arrayProcessor struct {
elemEnc processorFunc
}
// arrayProcessor.process creates a new slice/array, then calls the element
// processor on each element.
func (ae *arrayProcessor) process(processor *OutputProcessor, v reflect.Value) reflect.Value {
n := v.Len()
var ret reflect.Value
if v.Kind() == reflect.Slice {
// You cannot call Set() or Addr() on the slice directly; so we create
// the pointer to the slice, and then set ret = *ptr = make([]type, n, cap)
ret = reflect.New(v.Type()).Elem()
ret.Set(reflect.MakeSlice(v.Type(), n, v.Cap()))
} else {
ret = reflect.New(v.Type()).Elem()
}
for i := 0; i < n; i++ {
ret.Index(i).Set(ae.elemEnc(processor, v.Index(i)))
}
return ret
}
// newArrayProcessor constructs a new processorFunc
func newArrayProcessor(t reflect.Type) processorFunc {
if isPrimitiveType(t.Elem()) {
return dupeProcessor
}
enc := &arrayProcessor{typeProcessor(t.Elem())}
return enc.process
}
// ptrProcessor wraps the state for processing a single pointer type
type ptrProcessor struct {
elemEnc processorFunc
}
// ptrProcessor.process creates a new pointer then uses the element processor to full it.
func (pe *ptrProcessor) process(processor *OutputProcessor, v reflect.Value) reflect.Value {
if v.IsNil() {
return reflect.New(v.Type()).Elem() // nil
}
// type = *elem
// ret = new(type) = new(*elem)
ret := reflect.New(v.Type()).Elem() ret := reflect.New(v.Type()).Elem()
child := pe.elemEnc(processor, v.Elem()) for i := 0; i < v.Len(); i++ {
// *ret = &child elt := v.Index(i)
ret.Set(child.Addr()) processor.pushPath(fmt.Sprintf("[%d]", i), elt)
copy := processor.process(elt)
ret.Index(i).Set(copy)
processor.popPath()
}
return ret return ret
} }
// newPtrProcessor constructs a processorFunc for the given pointer type. // Return a copy of the given byte-slice-compatible value.
func newPtrProcessor(t reflect.Type) processorFunc { func (processor *Processor) copyByteSlice(v reflect.Value) reflect.Value {
enc := &ptrProcessor{typeProcessor(t.Elem())} ret := reflect.New(v.Type()).Elem()
return enc.process ret.Set(reflect.MakeSlice(v.Type(), v.Len(), v.Cap()))
reflect.Copy(ret, v)
return ret
} }
// isValidJSONNameTag checks if the `json` tag is a valid field name. // Process a slice (add copies of each element into a new slice with the same
func isValidJSONNameTag(s string) bool { // length and capacity).
if s == "" { func (processor *Processor) processSlice(v reflect.Value) reflect.Value {
return false if v.IsNil() {
panic(fmt.Errorf("Slice %#v (%s) is nil?\n", v, processor.getPath()))
} }
for _, c := range s { if v.Type().Elem().Kind() == reflect.Uint8 {
switch { return processor.copyByteSlice(v)
case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): }
// Backslash and quote chars are reserved, but
// otherwise any punctuation chars are allowed n := v.Len()
// in a tag name. ret := reflect.New(v.Type()).Elem()
default: ret.Set(reflect.MakeSlice(v.Type(), n, v.Cap()))
if !unicode.IsLetter(c) && !unicode.IsDigit(c) { for i := 0; i < n; i++ {
return false elt := v.Index(i)
processor.pushPath(fmt.Sprintf("[%d]", i), elt)
copy := processor.process(elt)
ret.Index(i).Set(copy)
processor.popPath()
}
return ret
}
// Process an arbitrary value. Invokes the processor's callback; if it returns
// a non-nil value, return that. Otherwise, continue recursively processing
// the value.
func (processor *Processor) process(v reflect.Value) reflect.Value {
temp := processor.callback(v)
if temp != nil {
return *temp
}
if isNil(v) {
// Just leave nil values alone.
return v
}
t := v.Type()
switch t.Kind() {
case reflect.Struct:
return processor.processStruct(v)
case reflect.Ptr:
return processor.processPtr(v)
case reflect.Slice:
return processor.processSlice(v)
case reflect.Array:
return processor.processArray(v)
case reflect.Interface:
return processor.processInterface(v)
case reflect.Map:
return processor.processMap(v)
default:
return processor.duplicate(v)
}
}
// Process the given value recursively using the options in this processor.
func (processor *Processor) Process(v interface{}) (ret interface{}, err error) {
defer func() {
if thrown := recover(); thrown != nil {
cast, ok := thrown.(error)
if !ok {
panic(thrown)
} }
err = cast
ret = nil
} }
} }()
return true return processor.process(reflect.ValueOf(v)).Interface(), nil
} }
// fieldByIndex gets the field of value with the given "index" (which is // Process the given value recursively using the default options.
// actually a sequence of indexes). func Process(v interface{}) (interface{}, error) {
func fieldByIndex(v reflect.Value, index []int) reflect.Value { return NewProcessor().Process(v)
for _, i := range index {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
return reflect.Value{}
}
v = v.Elem()
}
v = v.Field(i)
}
return v
}
// Since a class's "fields" may actually be fields of its anonymous member
// structs, and some of these may include pointers, instantiate any nils along
// the way (as such, this should only be called if it is really gointg to be
// written).
func writableFieldByIndex(v reflect.Value, index []int) reflect.Value {
for _, i := range index {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
v = v.Elem()
}
v = v.Field(i)
}
return v
}
// typeByIndex gets the type of the field with the given "index"
func typeByIndex(t reflect.Type, index []int) reflect.Type {
for _, i := range index {
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
t = t.Field(i).Type
}
return t
}
// reflectWithString gets the string version of the given value (for use as a
// key value)
type reflectWithString struct {
v reflect.Value
s string
}
func (w *reflectWithString) resolve() error {
if w.v.Kind() == reflect.String {
w.s = w.v.String()
return nil
}
switch w.v.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
w.s = strconv.FormatInt(w.v.Int(), 10)
return nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
w.s = strconv.FormatUint(w.v.Uint(), 10)
return nil
}
panic("unexpected map key type")
}
// A field represents a single field found in a struct.
type field struct {
name string
nameBytes []byte // []byte(name)
equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
parent reflect.Type
tag bool
index []int
typ reflect.Type
zgrabTag ZGrabTag
}
// byIndex sorts field by index sequence.
type byIndex []field
// Len gets the length of the index sequence.
func (x byIndex) Len() int { return len(x) }
// Swap swaps the ith and jth indexes.
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
// Less compares the ith and jth index
func (x byIndex) Less(i, j int) bool {
for k, xik := range x[i].index {
if k >= len(x[j].index) {
return false
}
if xik != x[j].index[k] {
return xik < x[j].index[k]
}
}
return len(x[i].index) < len(x[j].index)
}
// typeFields returns a list of fields that JSON should recognize for the given type.
// The algorithm is breadth-first search over the set of structs to include - the top struct
// and then any reachable anonymous structs.
func typeFields(t reflect.Type) []field {
// Anonymous fields to explore at the current level and the next.
current := []field{}
next := []field{{typ: t, parent: t}}
// Count of queued names for current level and the next.
count := map[reflect.Type]int{}
nextCount := map[reflect.Type]int{}
// Types already visited at an earlier level.
visited := map[reflect.Type]bool{}
// Fields found.
var fields []field
for len(next) > 0 {
current, next = next, current[:0]
count, nextCount = nextCount, map[reflect.Type]int{}
for _, f := range current {
if visited[f.typ] {
continue
}
visited[f.typ] = true
// Scan f.typ for fields to include.
for i := 0; i < f.typ.NumField(); i++ {
sf := f.typ.Field(i)
if sf.Anonymous {
t := sf.Type
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
// If embedded, StructField.PkgPath is not a reliable
// indicator of whether the field is exported.
// See https://golang.org/issue/21122
if !isExported(t.Name()) && t.Kind() != reflect.Struct {
// Ignore embedded fields of unexported non-struct types.
// Do not ignore embedded fields of unexported struct types
// since they may have exported fields.
continue
}
} else if sf.PkgPath != "" {
// Ignore unexported non-embedded fields.
continue
}
tag := sf.Tag.Get("json")
if tag == "-" {
continue
}
name := strings.SplitN(tag, ",", 2)[0]
if !isValidJSONNameTag(name) {
name = ""
}
index := make([]int, len(f.index)+1)
copy(index, f.index)
index[len(f.index)] = i
ft := sf.Type
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
// Follow pointer.
ft = ft.Elem()
}
// Record found field and index sequence.
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
tagged := name != ""
if name == "" {
name = sf.Name
}
fields = append(fields, field{
name: name,
tag: tagged,
index: index,
typ: ft,
parent: t,
zgrabTag: *parseZGrabTag(sf.Tag.Get("zgrab")),
})
if count[f.typ] > 1 {
// If there were multiple instances, add a second,
// so that the annihilation code will see a duplicate.
// It only cares about the distinction between 1 or 2,
// so don't bother generating any more copies.
fields = append(fields, fields[len(fields)-1])
}
continue
}
// Record new anonymous struct to explore in next round.
nextCount[ft]++
if nextCount[ft] == 1 {
next = append(next, field{name: ft.Name(), index: index, typ: ft, parent: t})
}
}
}
}
sort.Slice(fields, func(i, j int) bool {
x := fields
// sort field by name, breaking ties with depth, then
// breaking ties with "name came from json tag", then
// breaking ties with index sequence.
if x[i].name != x[j].name {
return x[i].name < x[j].name
}
if len(x[i].index) != len(x[j].index) {
return len(x[i].index) < len(x[j].index)
}
if x[i].tag != x[j].tag {
return x[i].tag
}
return byIndex(x).Less(i, j)
})
// Delete all fields that are hidden by the Go rules for embedded fields,
// except that fields with JSON tags are promoted.
// The fields are sorted in primary order of name, secondary order
// of field index length. Loop over names; for each name, delete
// hidden fields by choosing the one dominant field that survives.
out := fields[:0]
for advance, i := 0, 0; i < len(fields); i += advance {
// One iteration per name.
// Find the sequence of fields with the name of this first field.
fi := fields[i]
name := fi.name
for advance = 1; i+advance < len(fields); advance++ {
fj := fields[i+advance]
if fj.name != name {
break
}
}
if advance == 1 { // Only one field with this name
out = append(out, fi)
continue
}
dominant, ok := dominantField(fields[i : i+advance])
if ok {
out = append(out, dominant)
}
}
fields = out
sort.Sort(byIndex(fields))
return fields
}
// isExported reports whether the identifier is exported.
func isExported(id string) bool {
r, _ := utf8.DecodeRuneInString(id)
return unicode.IsUpper(r)
}
// dominantField looks through the fields, all of which are known to
// have the same name, to find the single field that dominates the
// others using Go's embedding rules, modified by the presence of
// JSON tags. If there are multiple top-level fields, the boolean
// will be false: This condition is an error in Go and we skip all
// the fields.
func dominantField(fields []field) (field, bool) {
// The fields are sorted in increasing index-length order. The winner
// must therefore be one with the shortest index length. Drop all
// longer entries, which is easy: just truncate the slice.
length := len(fields[0].index)
tagged := -1 // Index of first tagged field.
for i, f := range fields {
if len(f.index) > length {
fields = fields[:i]
break
}
if f.tag {
if tagged >= 0 {
// Multiple tagged fields at the same level: conflict.
// Return no field.
return field{}, false
}
tagged = i
}
}
if tagged >= 0 {
return fields[tagged], true
}
// All remaining fields have the same length. If there's more than one,
// we have a conflict (two fields named "X" at the same level) and we
// return no field.
if len(fields) > 1 {
return field{}, false
}
return fields[0], true
}
var fieldCache struct {
value atomic.Value // map[reflect.Type][]field
mu sync.Mutex // used only by writers
}
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
func cachedTypeFields(t reflect.Type) []field {
m, _ := fieldCache.value.Load().(map[reflect.Type][]field)
f := m[t]
if f != nil {
return f
}
// Compute fields without lock.
// Might duplicate effort but won't hold other computations back.
f = typeFields(t)
if f == nil {
f = []field{}
}
fieldCache.mu.Lock()
m, _ = fieldCache.value.Load().(map[reflect.Type][]field)
newM := make(map[reflect.Type][]field, len(m)+1)
for k, v := range m {
newM[k] = v
}
newM[t] = f
fieldCache.value.Store(newM)
fieldCache.mu.Unlock()
return f
} }

View File

@ -1,5 +1,7 @@
package test package test
// FIXME: This is in its own package to work around import loops.
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
@ -15,20 +17,26 @@ import (
"strings" "strings"
"io/ioutil"
"os/exec"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
jsonKeys "github.com/zmap/zcrypto/json" jsonKeys "github.com/zmap/zcrypto/json"
"github.com/zmap/zcrypto/tls" "github.com/zmap/zcrypto/tls"
"github.com/zmap/zcrypto/x509" "github.com/zmap/zcrypto/x509"
"github.com/zmap/zcrypto/x509/pkix" "github.com/zmap/zcrypto/x509/pkix"
"github.com/zmap/zgrab2"
"github.com/zmap/zgrab2/lib/output" "github.com/zmap/zgrab2/lib/output"
"github.com/zmap/zgrab2/lib/output/types"
) )
const doFailDiffs = false
// The tests operate by manually constructing the stripped versions of the output. // The tests operate by manually constructing the stripped versions of the output.
type Strippable interface { type Strippable interface {
Stripped() string Stripped() string
} }
// JSON encode the value, then decode it as a map[string]interface{}.
func toMap(v interface{}) map[string]interface{} { func toMap(v interface{}) map[string]interface{} {
ret, err := json.MarshalIndent(v, "", " ") ret, err := json.MarshalIndent(v, "", " ")
if err != nil { if err != nil {
@ -42,25 +50,29 @@ func toMap(v interface{}) map[string]interface{} {
return *theMap return *theMap
} }
func mapPath(v interface{}, keys ...string) (interface{}, error) { // Get v[key0][key1]...[keyN], or return nil, error if any values along the way
// are nil / not present / not maps.
func mapPath(theMap interface{}, keys ...string) (interface{}, error) {
for i, key := range keys { for i, key := range keys {
cast, ok := v.(map[string]interface{}) cast, ok := theMap.(map[string]interface{})
if !ok { if !ok {
return nil, fmt.Errorf("%s in map is not a map", strings.Join(keys[0:i], ".")) return nil, fmt.Errorf("%s in map is not a map", strings.Join(keys[0:i], "."))
} }
v = cast theMap = cast
next, ok := cast[key] next, ok := cast[key]
if !ok { if !ok {
return nil, fmt.Errorf("map does not contain %s", strings.Join(keys[0:i+1], ".")) return nil, fmt.Errorf("map does not contain %s", strings.Join(keys[0:i+1], "."))
} }
v = next theMap = next
} }
return v, nil return theMap, nil
} }
func nilOut(v map[string]interface{}, keys ...string) error { // Set theMap[key0][key1]...[keyN] = value, or return error if any values along
// the way are nil / not present / not maps.
func setMapValue(theMap map[string]interface{}, value interface{}, keys ...string) error {
lastIndex := len(keys) - 1 lastIndex := len(keys) - 1
out, err := mapPath(v, keys[0:lastIndex]...) out, err := mapPath(theMap, keys[0:lastIndex]...)
if err != nil { if err != nil {
return err return err
} }
@ -68,7 +80,23 @@ func nilOut(v map[string]interface{}, keys ...string) error {
if !ok { if !ok {
return fmt.Errorf("%s in map is not a map", strings.Join(keys[0:lastIndex], ".")) return fmt.Errorf("%s in map is not a map", strings.Join(keys[0:lastIndex], "."))
} }
cast[keys[lastIndex]] = nil cast[keys[lastIndex]] = value
return nil
}
// delete the value at theMap[key0][key1]...[keyN], or return an error if any
// values along the way are nil / not present / not maps.
func delOut(theMap map[string]interface{}, keys ...string) error {
lastIndex := len(keys) - 1
out, err := mapPath(theMap, keys[0:lastIndex]...)
if err != nil {
return err
}
cast, ok := out.(map[string]interface{})
if !ok {
return fmt.Errorf("%s in map is not a map", strings.Join(keys[0:lastIndex], "."))
}
delete(cast, keys[lastIndex])
return nil return nil
} }
@ -85,14 +113,20 @@ func marshal(v interface{}) string {
return string(realRet) return string(realRet)
} }
// Helper to process then marshal the input using the given processor. // Get the processed copy of v using the given verbosity value.
func process(verbose bool, v interface{}) string { func process(verbose bool, v interface{}) interface{} {
proc := output.NewOutputProcessor() proc := output.NewProcessor()
proc.Verbose = verbose proc.Verbose = verbose
theCopy, err := proc.Process(v) ret, err := proc.Process(v)
if err != nil { if err != nil {
logrus.Fatalf("Error processing: %v", err) panic(err)
} }
return ret
}
// Return the marshalled processed copy of v using the given verbosity value.
func strip(verbose bool, v interface{}) string {
theCopy := process(verbose, v)
return marshal(theCopy) return marshal(theCopy)
} }
@ -793,8 +827,16 @@ func getDeepAnon(id string, depth int) *DeepAnon {
} }
return ret return ret
} }
func fail(t *testing.T, id string, expected string, actual string) { func fail(t *testing.T, id string, expected string, actual string) {
t.Logf("%s: mismatch: expected %s, got %s", id, expected, actual) t.Logf("%s: mismatch: expected %s, got %s", id, expected, actual)
if doFailDiffs {
ioutil.WriteFile(id+"-expected.json", []byte(expected), 0)
ioutil.WriteFile(id+"-actual.json", []byte(actual), 0)
cmd := exec.Command("diff", "-u", id+"-expected.json", id+"-actual.json")
ret, _ := cmd.Output()
ioutil.WriteFile(id+".diff", ret, 0)
}
t.Errorf("%s mismatch", id) t.Errorf("%s mismatch", id)
} }
@ -802,13 +844,13 @@ func fail(t *testing.T, id string, expected string, actual string) {
func TestProcess(t *testing.T) { func TestProcess(t *testing.T) {
tests := map[string]Strippable{ tests := map[string]Strippable{
"flat": getFlat("flat"), "flat": getFlat("flat"),
"deep": getDeep("deep", 1), "deep": getDeep("deep", 3),
"deepAnon": getDeepAnon("deepAnon", 1), "deepAnon": getDeepAnon("deepAnon", 3),
"deepArray": getDeepArray("deepArray", 1), "deepArray": getDeepArray("deepArray", 3),
"deepIface": getDeepIface("deepIface", 1), "deepIface": getDeepIface("deepIface", 3),
"deepIfaceArray": getDeepIfaceArray("deepIfaceArray", 1), "deepIfaceArray": getDeepIfaceArray("deepIfaceArray", 3),
"deepIfaceSlice": getDeepIfaceSlice("deepIfaceSlice", 1), "deepIfaceSlice": getDeepIfaceSlice("deepIfaceSlice", 3),
"deepSlice": getDeepSlice("deepSlice", 1), "deepSlice": getDeepSlice("deepSlice", 3),
} }
doTest := func(verbose bool, id string, input Strippable) { doTest := func(verbose bool, id string, input Strippable) {
@ -824,7 +866,7 @@ func TestProcess(t *testing.T) {
} else { } else {
expected = input.Stripped() expected = input.Stripped()
} }
actual := process(verbose, input) actual := strip(verbose, input)
if expected != actual { if expected != actual {
fail(t, testID, expected, actual) fail(t, testID, expected, actual)
} }
@ -833,10 +875,12 @@ func TestProcess(t *testing.T) {
var done sync.WaitGroup var done sync.WaitGroup
done.Add(len(tests)) done.Add(len(tests))
for k, v := range tests { for k, v := range tests {
//done.Add(1)
go func(id string, input Strippable) { go func(id string, input Strippable) {
defer done.Done() defer done.Done()
doTest(verbose, id, input) doTest(verbose, id, input)
}(k, v) }(k, v)
//done.Wait()
} }
done.Wait() done.Wait()
} }
@ -886,7 +930,7 @@ type fakeMySQLScanResults struct {
// CharacterSet is the identifier for the character set the server is // CharacterSet is the identifier for the character set the server is
// using. Returned in the initial HandshakePacket. // using. Returned in the initial HandshakePacket.
CharacterSet byte `json:"character_set" zgrab:"debug"` CharacterSet byte `json:"character_set,omitempty" zgrab:"debug"`
// StatusFlags is the set of status flags the server returned in the // StatusFlags is the set of status flags the server returned in the
// initial HandshakePacket. Each true entry in the map corresponds to // initial HandshakePacket. Each true entry in the map corresponds to
@ -917,7 +961,7 @@ type fakeMySQLScanResults struct {
RawPackets []string `json:"raw_packets,omitempty"` RawPackets []string `json:"raw_packets,omitempty"`
// TLSLog contains the usual shared TLS logs. // TLSLog contains the usual shared TLS logs.
TLSLog *types.TLSLog `json:"tls,omitempty"` TLSLog *zgrab2.TLSLog `json:"tls,omitempty"`
} }
// TestMySQL builds a bogus MySQL result, and then manually checks that the // TestMySQL builds a bogus MySQL result, and then manually checks that the
@ -945,7 +989,7 @@ func TestMySQL(t *testing.T) {
results.StatusFlags = map[string]bool{ results.StatusFlags = map[string]bool{
"SERVER_STATUS_AUTOCOMMIT": true, "SERVER_STATUS_AUTOCOMMIT": true,
} }
results.TLSLog = new(types.TLSLog) results.TLSLog = new(zgrab2.TLSLog)
results.TLSLog.HandshakeLog = &tls.ServerHandshake{ results.TLSLog.HandshakeLog = &tls.ServerHandshake{
ClientFinished: &tls.Finished{ ClientFinished: &tls.Finished{
VerifyData: []byte("not real data"), VerifyData: []byte("not real data"),
@ -1047,14 +1091,9 @@ func TestMySQL(t *testing.T) {
mapVal := toMap(results) mapVal := toMap(results)
mapVal["auth_plugin_data"] = nil mapVal["auth_plugin_data"] = nil
mapVal["connection_id"] = 0 mapVal["connection_id"] = 0
delOut(mapVal, "tls", "handshake_log", "client_hello")
expected := marshal(mapVal) expected := marshal(mapVal)
p := output.NewOutputProcessor() actual := strip(false, results)
p.Verbose = false
output, err := p.Process(results)
if err != nil {
panic(err)
}
actual := marshal(output)
if actual != expected { if actual != expected {
fail(t, "fake-mysql", expected, actual) fail(t, "fake-mysql", expected, actual)
} }

View File

@ -27,16 +27,16 @@ type ScanResults struct {
// ConnectionID is the server's internal identifier for this client's // ConnectionID is the server's internal identifier for this client's
// connection, sent in the initial HandshakePacket. // connection, sent in the initial HandshakePacket.
ConnectionID uint32 `json:"connection_id" zgrab:"debug"` ConnectionID uint32 `json:"connection_id,omitempty" zgrab:"debug"`
// AuthPluginData is optional plugin-specific data, whose meaning // AuthPluginData is optional plugin-specific data, whose meaning
// depends on the value of AuthPluginName. Returned in the initial // depends on the value of AuthPluginName. Returned in the initial
// HandshakePacket. // HandshakePacket.
AuthPluginData []byte `json:"auth_plugin_data" zgrab:"debug"` AuthPluginData []byte `json:"auth_plugin_data,omitempty" zgrab:"debug"`
// CharacterSet is the identifier for the character set the server is // CharacterSet is the identifier for the character set the server is
// using. Returned in the initial HandshakePacket. // using. Returned in the initial HandshakePacket.
CharacterSet byte `json:"character_set" zgrab:"debug"` CharacterSet byte `json:"character_set,omitempty" zgrab:"debug"`
// StatusFlags is the set of status flags the server returned in the // StatusFlags is the set of status flags the server returned in the
// initial HandshakePacket. Each true entry in the map corresponds to // initial HandshakePacket. Each true entry in the map corresponds to

View File

@ -104,8 +104,14 @@ func grabTarget(input ScanTarget, m *Monitor) []byte {
ipstr = s ipstr = s
} }
a := Grab{IP: ipstr, Domain: input.Domain, Data: moduleResult} raw := Grab{IP: ipstr, Domain: input.Domain, Data: moduleResult}
stripped, err := output.Process(a)
// TODO FIXME: Move verbosity to global level, or add a Verbosity() method to the Module interface.
stripped, err := output.Process(raw)
if err != nil {
log.Warnf("Error processing results: %v", err)
stripped = raw
}
result, err := json.Marshal(stripped) result, err := json.Marshal(stripped)
if err != nil { if err != nil {