github.com/prysmaticlabs/prysm@v1.4.4/third_party/afl/afl-fuzz.c (about)

     1  /*
     2     american fuzzy lop - fuzzer code
     3     --------------------------------
     4  
     5     Written and maintained by Michal Zalewski <lcamtuf@google.com>
     6  
     7     Forkserver design by Jann Horn <jannhorn@googlemail.com>
     8  
     9     Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved.
    10  
    11     Licensed under the Apache License, Version 2.0 (the "License");
    12     you may not use this file except in compliance with the License.
    13     You may obtain a copy of the License at:
    14  
    15       http://www.apache.org/licenses/LICENSE-2.0
    16  
    17     This is the real deal: the program takes an instrumented binary and
    18     attempts a variety of basic fuzzing tricks, paying close attention to
    19     how they affect the execution path.
    20  
    21   */
    22  
    23  #define AFL_MAIN
    24  #define MESSAGES_TO_STDOUT
    25  
    26  #ifndef _GNU_SOURCE
    27  #define _GNU_SOURCE
    28  #endif
    29  #define _FILE_OFFSET_BITS 64
    30  
    31  #include "config.h"
    32  #include "types.h"
    33  #include "debug.h"
    34  #include "alloc-inl.h"
    35  #include "hash.h"
    36  
    37  #include <stdio.h>
    38  #include <unistd.h>
    39  #include <stdlib.h>
    40  #include <string.h>
    41  #include <time.h>
    42  #include <errno.h>
    43  #include <signal.h>
    44  #include <dirent.h>
    45  #include <ctype.h>
    46  #include <fcntl.h>
    47  #include <termios.h>
    48  #include <dlfcn.h>
    49  #include <sched.h>
    50  
    51  #include <sys/wait.h>
    52  #include <sys/time.h>
    53  #include <sys/shm.h>
    54  #include <sys/stat.h>
    55  #include <sys/types.h>
    56  #include <sys/resource.h>
    57  #include <sys/mman.h>
    58  #include <sys/ioctl.h>
    59  #include <sys/file.h>
    60  
    61  #if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__)
    62  #  include <sys/sysctl.h>
    63  #endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */
    64  
    65  /* For systems that have sched_setaffinity; right now just Linux, but one
    66     can hope... */
    67  
    68  #ifdef __linux__
    69  #  define HAVE_AFFINITY 1
    70  #endif /* __linux__ */
    71  
    72  /* A toggle to export some variables when building as a library. Not very
    73     useful for the general public. */
    74  
    75  #ifdef AFL_LIB
    76  #  define EXP_ST
    77  #else
    78  #  define EXP_ST static
    79  #endif /* ^AFL_LIB */
    80  
    81  /* Lots of globals, but mostly for the status UI and other things where it
    82     really makes no sense to haul them around as function parameters. */
    83  
    84  
    85  EXP_ST u8 *in_dir,                    /* Input directory with test cases  */
    86            *out_file,                  /* File to fuzz, if any             */
    87            *out_dir,                   /* Working & output directory       */
    88            *sync_dir,                  /* Synchronization directory        */
    89            *sync_id,                   /* Fuzzer ID                        */
    90            *use_banner,                /* Display banner                   */
    91            *in_bitmap,                 /* Input bitmap                     */
    92            *doc_path,                  /* Path to documentation dir        */
    93            *target_path,               /* Path to target binary            */
    94            *orig_cmdline;              /* Original command line            */
    95  
    96  EXP_ST u32 exec_tmout = EXEC_TIMEOUT; /* Configurable exec timeout (ms)   */
    97  static u32 hang_tmout = EXEC_TIMEOUT; /* Timeout used for hang det (ms)   */
    98  
    99  EXP_ST u64 mem_limit  = MEM_LIMIT;    /* Memory cap for child (MB)        */
   100  
   101  static u32 stats_update_freq = 1;     /* Stats update frequency (execs)   */
   102  
   103  EXP_ST u8  skip_deterministic,        /* Skip deterministic stages?       */
   104             force_deterministic,       /* Force deterministic stages?      */
   105             use_splicing,              /* Recombine input files?           */
   106             dumb_mode,                 /* Run in non-instrumented mode?    */
   107             score_changed,             /* Scoring for favorites changed?   */
   108             kill_signal,               /* Signal that killed the child     */
   109             resuming_fuzz,             /* Resuming an older fuzzing job?   */
   110             timeout_given,             /* Specific timeout given?          */
   111             not_on_tty,                /* stdout is not a tty              */
   112             term_too_small,            /* terminal dimensions too small    */
   113             uses_asan,                 /* Target uses ASAN?                */
   114             no_forkserver,             /* Disable forkserver?              */
   115             crash_mode,                /* Crash mode! Yeah!                */
   116             in_place_resume,           /* Attempt in-place resume?         */
   117             auto_changed,              /* Auto-generated tokens changed?   */
   118             no_cpu_meter_red,          /* Feng shui on the status screen   */
   119             no_arith,                  /* Skip most arithmetic ops         */
   120             shuffle_queue,             /* Shuffle input queue?             */
   121             bitmap_changed = 1,        /* Time to update bitmap?           */
   122             qemu_mode,                 /* Running in QEMU mode?            */
   123             skip_requested,            /* Skip request, via SIGUSR1        */
   124             run_over10m,               /* Run time over 10 minutes?        */
   125             persistent_mode,           /* Running in persistent mode?      */
   126             deferred_mode,             /* Deferred forkserver mode?        */
   127             fast_cal;                  /* Try to calibrate faster?         */
   128  
   129  static s32 out_fd,                    /* Persistent fd for out_file       */
   130             dev_urandom_fd = -1,       /* Persistent fd for /dev/urandom   */
   131             dev_null_fd = -1,          /* Persistent fd for /dev/null      */
   132             fsrv_ctl_fd,               /* Fork server control pipe (write) */
   133             fsrv_st_fd;                /* Fork server status pipe (read)   */
   134  
   135  static s32 forksrv_pid,               /* PID of the fork server           */
   136             child_pid = -1,            /* PID of the fuzzed program        */
   137             out_dir_fd = -1;           /* FD of the lock file              */
   138  
   139  EXP_ST u8* trace_bits;                /* SHM with instrumentation bitmap  */
   140  
   141  EXP_ST u8  virgin_bits[MAP_SIZE],     /* Regions yet untouched by fuzzing */
   142             virgin_tmout[MAP_SIZE],    /* Bits we haven't seen in tmouts   */
   143             virgin_crash[MAP_SIZE];    /* Bits we haven't seen in crashes  */
   144  
   145  static u8  var_bytes[MAP_SIZE];       /* Bytes that appear to be variable */
   146  
   147  static s32 shm_id;                    /* ID of the SHM region             */
   148  
   149  static volatile u8 stop_soon,         /* Ctrl-C pressed?                  */
   150                     clear_screen = 1,  /* Window resized?                  */
   151                     child_timed_out;   /* Traced process timed out?        */
   152  
   153  EXP_ST u32 queued_paths,              /* Total number of queued testcases */
   154             queued_variable,           /* Testcases with variable behavior */
   155             queued_at_start,           /* Total number of initial inputs   */
   156             queued_discovered,         /* Items discovered during this run */
   157             queued_imported,           /* Items imported via -S            */
   158             queued_favored,            /* Paths deemed favorable           */
   159             queued_with_cov,           /* Paths with new coverage bytes    */
   160             pending_not_fuzzed,        /* Queued but not done yet          */
   161             pending_favored,           /* Pending favored paths            */
   162             cur_skipped_paths,         /* Abandoned inputs in cur cycle    */
   163             cur_depth,                 /* Current path depth               */
   164             max_depth,                 /* Max path depth                   */
   165             useless_at_start,          /* Number of useless starting paths */
   166             var_byte_count,            /* Bitmap bytes with var behavior   */
   167             current_entry,             /* Current queue entry ID           */
   168             havoc_div = 1;             /* Cycle count divisor for havoc    */
   169  
   170  EXP_ST u64 total_crashes,             /* Total number of crashes          */
   171             unique_crashes,            /* Crashes with unique signatures   */
   172             total_tmouts,              /* Total number of timeouts         */
   173             unique_tmouts,             /* Timeouts with unique signatures  */
   174             unique_hangs,              /* Hangs with unique signatures     */
   175             total_execs,               /* Total execve() calls             */
   176             start_time,                /* Unix start time (ms)             */
   177             last_path_time,            /* Time for most recent path (ms)   */
   178             last_crash_time,           /* Time for most recent crash (ms)  */
   179             last_hang_time,            /* Time for most recent hang (ms)   */
   180             last_crash_execs,          /* Exec counter at last crash       */
   181             queue_cycle,               /* Queue round counter              */
   182             cycles_wo_finds,           /* Cycles without any new paths     */
   183             trim_execs,                /* Execs done to trim input files   */
   184             bytes_trim_in,             /* Bytes coming into the trimmer    */
   185             bytes_trim_out,            /* Bytes coming outa the trimmer    */
   186             blocks_eff_total,          /* Blocks subject to effector maps  */
   187             blocks_eff_select;         /* Blocks selected as fuzzable      */
   188  
   189  static u32 subseq_tmouts;             /* Number of timeouts in a row      */
   190  
   191  static u8 *stage_name = "init",       /* Name of the current fuzz stage   */
   192            *stage_short,               /* Short stage name                 */
   193            *syncing_party;             /* Currently syncing with...        */
   194  
   195  static s32 stage_cur, stage_max;      /* Stage progression                */
   196  static s32 splicing_with = -1;        /* Splicing with which test case?   */
   197  
   198  static u32 master_id, master_max;     /* Master instance job splitting    */
   199  
   200  static u32 syncing_case;              /* Syncing with case #...           */
   201  
   202  static s32 stage_cur_byte,            /* Byte offset of current stage op  */
   203             stage_cur_val;             /* Value used for stage op          */
   204  
   205  static u8  stage_val_type;            /* Value type (STAGE_VAL_*)         */
   206  
   207  static u64 stage_finds[32],           /* Patterns found per fuzz stage    */
   208             stage_cycles[32];          /* Execs per fuzz stage             */
   209  
   210  static u32 rand_cnt;                  /* Random number counter            */
   211  
   212  static u64 total_cal_us,              /* Total calibration time (us)      */
   213             total_cal_cycles;          /* Total calibration cycles         */
   214  
   215  static u64 total_bitmap_size,         /* Total bit count for all bitmaps  */
   216             total_bitmap_entries;      /* Number of bitmaps counted        */
   217  
   218  static s32 cpu_core_count;            /* CPU core count                   */
   219  
   220  #ifdef HAVE_AFFINITY
   221  
   222  static s32 cpu_aff = -1;       	      /* Selected CPU core                */
   223  
   224  #endif /* HAVE_AFFINITY */
   225  
   226  static FILE* plot_file;               /* Gnuplot output file              */
   227  
   228  struct queue_entry {
   229  
   230    u8* fname;                          /* File name for the test case      */
   231    u32 len;                            /* Input length                     */
   232  
   233    u8  cal_failed,                     /* Calibration failed?              */
   234        trim_done,                      /* Trimmed?                         */
   235        was_fuzzed,                     /* Had any fuzzing done yet?        */
   236        passed_det,                     /* Deterministic stages passed?     */
   237        has_new_cov,                    /* Triggers new coverage?           */
   238        var_behavior,                   /* Variable behavior?               */
   239        favored,                        /* Currently favored?               */
   240        fs_redundant;                   /* Marked as redundant in the fs?   */
   241  
   242    u32 bitmap_size,                    /* Number of bits set in bitmap     */
   243        exec_cksum;                     /* Checksum of the execution trace  */
   244  
   245    u64 exec_us,                        /* Execution time (us)              */
   246        handicap,                       /* Number of queue cycles behind    */
   247        depth;                          /* Path depth                       */
   248  
   249    u8* trace_mini;                     /* Trace bytes, if kept             */
   250    u32 tc_ref;                         /* Trace bytes ref count            */
   251  
   252    struct queue_entry *next,           /* Next element, if any             */
   253                       *next_100;       /* 100 elements ahead               */
   254  
   255  };
   256  
   257  static struct queue_entry *queue,     /* Fuzzing queue (linked list)      */
   258                            *queue_cur, /* Current offset within the queue  */
   259                            *queue_top, /* Top of the list                  */
   260                            *q_prev100; /* Previous 100 marker              */
   261  
   262  static struct queue_entry*
   263    top_rated[MAP_SIZE];                /* Top entries for bitmap bytes     */
   264  
   265  struct extra_data {
   266    u8* data;                           /* Dictionary token data            */
   267    u32 len;                            /* Dictionary token length          */
   268    u32 hit_cnt;                        /* Use count in the corpus          */
   269  };
   270  
   271  static struct extra_data* extras;     /* Extra tokens to fuzz with        */
   272  static u32 extras_cnt;                /* Total number of tokens read      */
   273  
   274  static struct extra_data* a_extras;   /* Automatically selected extras    */
   275  static u32 a_extras_cnt;              /* Total number of tokens available */
   276  
   277  static u8* (*post_handler)(u8* buf, u32* len);
   278  
   279  /* Interesting values, as per config.h */
   280  
   281  static s8  interesting_8[]  = { INTERESTING_8 };
   282  static s16 interesting_16[] = { INTERESTING_8, INTERESTING_16 };
   283  static s32 interesting_32[] = { INTERESTING_8, INTERESTING_16, INTERESTING_32 };
   284  
   285  /* Fuzzing stages */
   286  
   287  enum {
   288    /* 00 */ STAGE_FLIP1,
   289    /* 01 */ STAGE_FLIP2,
   290    /* 02 */ STAGE_FLIP4,
   291    /* 03 */ STAGE_FLIP8,
   292    /* 04 */ STAGE_FLIP16,
   293    /* 05 */ STAGE_FLIP32,
   294    /* 06 */ STAGE_ARITH8,
   295    /* 07 */ STAGE_ARITH16,
   296    /* 08 */ STAGE_ARITH32,
   297    /* 09 */ STAGE_INTEREST8,
   298    /* 10 */ STAGE_INTEREST16,
   299    /* 11 */ STAGE_INTEREST32,
   300    /* 12 */ STAGE_EXTRAS_UO,
   301    /* 13 */ STAGE_EXTRAS_UI,
   302    /* 14 */ STAGE_EXTRAS_AO,
   303    /* 15 */ STAGE_HAVOC,
   304    /* 16 */ STAGE_SPLICE
   305  };
   306  
   307  /* Stage value types */
   308  
   309  enum {
   310    /* 00 */ STAGE_VAL_NONE,
   311    /* 01 */ STAGE_VAL_LE,
   312    /* 02 */ STAGE_VAL_BE
   313  };
   314  
   315  /* Execution status fault codes */
   316  
   317  enum {
   318    /* 00 */ FAULT_NONE,
   319    /* 01 */ FAULT_TMOUT,
   320    /* 02 */ FAULT_CRASH,
   321    /* 03 */ FAULT_ERROR,
   322    /* 04 */ FAULT_NOINST,
   323    /* 05 */ FAULT_NOBITS
   324  };
   325  
   326  
   327  /* Get unix time in milliseconds */
   328  
   329  static u64 get_cur_time(void) {
   330  
   331    struct timeval tv;
   332    struct timezone tz;
   333  
   334    gettimeofday(&tv, &tz);
   335  
   336    return (tv.tv_sec * 1000ULL) + (tv.tv_usec / 1000);
   337  
   338  }
   339  
   340  
   341  /* Get unix time in microseconds */
   342  
   343  static u64 get_cur_time_us(void) {
   344  
   345    struct timeval tv;
   346    struct timezone tz;
   347  
   348    gettimeofday(&tv, &tz);
   349  
   350    return (tv.tv_sec * 1000000ULL) + tv.tv_usec;
   351  
   352  }
   353  
   354  
   355  /* Generate a random number (from 0 to limit - 1). This may
   356     have slight bias. */
   357  
   358  static inline u32 UR(u32 limit) {
   359  
   360    if (unlikely(!rand_cnt--)) {
   361  
   362      u32 seed[2];
   363  
   364      ck_read(dev_urandom_fd, &seed, sizeof(seed), "/dev/urandom");
   365  
   366      srandom(seed[0]);
   367      rand_cnt = (RESEED_RNG / 2) + (seed[1] % RESEED_RNG);
   368  
   369    }
   370  
   371    return random() % limit;
   372  
   373  }
   374  
   375  
   376  /* Shuffle an array of pointers. Might be slightly biased. */
   377  
   378  static void shuffle_ptrs(void** ptrs, u32 cnt) {
   379  
   380    u32 i;
   381  
   382    for (i = 0; i < cnt - 2; i++) {
   383  
   384      u32 j = i + UR(cnt - i);
   385      void *s = ptrs[i];
   386      ptrs[i] = ptrs[j];
   387      ptrs[j] = s;
   388  
   389    }
   390  
   391  }
   392  
   393  
   394  #ifdef HAVE_AFFINITY
   395  
   396  /* Build a list of processes bound to specific cores. Returns -1 if nothing
   397     can be found. Assumes an upper bound of 4k CPUs. */
   398  
   399  static void bind_to_free_cpu(void) {
   400  
   401    DIR* d;
   402    struct dirent* de;
   403    cpu_set_t c;
   404  
   405    u8 cpu_used[4096] = { 0 };
   406    u32 i;
   407  
   408    if (cpu_core_count < 2) return;
   409  
   410    if (getenv("AFL_NO_AFFINITY")) {
   411  
   412      WARNF("Not binding to a CPU core (AFL_NO_AFFINITY set).");
   413      return;
   414  
   415    }
   416  
   417    d = opendir("/proc");
   418  
   419    if (!d) {
   420  
   421      WARNF("Unable to access /proc - can't scan for free CPU cores.");
   422      return;
   423  
   424    }
   425  
   426    ACTF("Checking CPU core loadout...");
   427  
   428    /* Introduce some jitter, in case multiple AFL tasks are doing the same
   429       thing at the same time... */
   430  
   431    usleep(R(1000) * 250);
   432  
   433    /* Scan all /proc/<pid>/status entries, checking for Cpus_allowed_list.
   434       Flag all processes bound to a specific CPU using cpu_used[]. This will
   435       fail for some exotic binding setups, but is likely good enough in almost
   436       all real-world use cases. */
   437  
   438    while ((de = readdir(d))) {
   439  
   440      u8* fn;
   441      FILE* f;
   442      u8 tmp[MAX_LINE];
   443      u8 has_vmsize = 0;
   444  
   445      if (!isdigit(de->d_name[0])) continue;
   446  
   447      fn = alloc_printf("/proc/%s/status", de->d_name);
   448  
   449      if (!(f = fopen(fn, "r"))) {
   450        ck_free(fn);
   451        continue;
   452      }
   453  
   454      while (fgets(tmp, MAX_LINE, f)) {
   455  
   456        u32 hval;
   457  
   458        /* Processes without VmSize are probably kernel tasks. */
   459  
   460        if (!strncmp(tmp, "VmSize:\t", 8)) has_vmsize = 1;
   461  
   462        if (!strncmp(tmp, "Cpus_allowed_list:\t", 19) &&
   463            !strchr(tmp, '-') && !strchr(tmp, ',') &&
   464            sscanf(tmp + 19, "%u", &hval) == 1 && hval < sizeof(cpu_used) &&
   465            has_vmsize) {
   466  
   467          cpu_used[hval] = 1;
   468          break;
   469  
   470        }
   471  
   472      }
   473  
   474      ck_free(fn);
   475      fclose(f);
   476  
   477    }
   478  
   479    closedir(d);
   480  
   481    for (i = 0; i < cpu_core_count; i++) if (!cpu_used[i]) break;
   482  
   483    if (i == cpu_core_count) {
   484  
   485      SAYF("\n" cLRD "[-] " cRST
   486           "Uh-oh, looks like all %u CPU cores on your system are allocated to\n"
   487           "    other instances of afl-fuzz (or similar CPU-locked tasks). Starting\n"
   488           "    another fuzzer on this machine is probably a bad plan, but if you are\n"
   489           "    absolutely sure, you can set AFL_NO_AFFINITY and try again.\n",
   490           cpu_core_count);
   491  
   492      FATAL("No more free CPU cores");
   493  
   494    }
   495  
   496    OKF("Found a free CPU core, binding to #%u.", i);
   497  
   498    cpu_aff = i;
   499  
   500    CPU_ZERO(&c);
   501    CPU_SET(i, &c);
   502  
   503    if (sched_setaffinity(0, sizeof(c), &c))
   504      PFATAL("sched_setaffinity failed");
   505  
   506  }
   507  
   508  #endif /* HAVE_AFFINITY */
   509  
   510  #ifndef IGNORE_FINDS
   511  
   512  /* Helper function to compare buffers; returns first and last differing offset. We
   513     use this to find reasonable locations for splicing two files. */
   514  
   515  static void locate_diffs(u8* ptr1, u8* ptr2, u32 len, s32* first, s32* last) {
   516  
   517    s32 f_loc = -1;
   518    s32 l_loc = -1;
   519    u32 pos;
   520  
   521    for (pos = 0; pos < len; pos++) {
   522  
   523      if (*(ptr1++) != *(ptr2++)) {
   524  
   525        if (f_loc == -1) f_loc = pos;
   526        l_loc = pos;
   527  
   528      }
   529  
   530    }
   531  
   532    *first = f_loc;
   533    *last = l_loc;
   534  
   535    return;
   536  
   537  }
   538  
   539  #endif /* !IGNORE_FINDS */
   540  
   541  
   542  /* Describe integer. Uses 12 cyclic static buffers for return values. The value
   543     returned should be five characters or less for all the integers we reasonably
   544     expect to see. */
   545  
   546  static u8* DI(u64 val) {
   547  
   548    static u8 tmp[12][16];
   549    static u8 cur;
   550  
   551    cur = (cur + 1) % 12;
   552  
   553  #define CHK_FORMAT(_divisor, _limit_mult, _fmt, _cast) do { \
   554      if (val < (_divisor) * (_limit_mult)) { \
   555        sprintf(tmp[cur], _fmt, ((_cast)val) / (_divisor)); \
   556        return tmp[cur]; \
   557      } \
   558    } while (0)
   559  
   560    /* 0-9999 */
   561    CHK_FORMAT(1, 10000, "%llu", u64);
   562  
   563    /* 10.0k - 99.9k */
   564    CHK_FORMAT(1000, 99.95, "%0.01fk", double);
   565  
   566    /* 100k - 999k */
   567    CHK_FORMAT(1000, 1000, "%lluk", u64);
   568  
   569    /* 1.00M - 9.99M */
   570    CHK_FORMAT(1000 * 1000, 9.995, "%0.02fM", double);
   571  
   572    /* 10.0M - 99.9M */
   573    CHK_FORMAT(1000 * 1000, 99.95, "%0.01fM", double);
   574  
   575    /* 100M - 999M */
   576    CHK_FORMAT(1000 * 1000, 1000, "%lluM", u64);
   577  
   578    /* 1.00G - 9.99G */
   579    CHK_FORMAT(1000LL * 1000 * 1000, 9.995, "%0.02fG", double);
   580  
   581    /* 10.0G - 99.9G */
   582    CHK_FORMAT(1000LL * 1000 * 1000, 99.95, "%0.01fG", double);
   583  
   584    /* 100G - 999G */
   585    CHK_FORMAT(1000LL * 1000 * 1000, 1000, "%lluG", u64);
   586  
   587    /* 1.00T - 9.99G */
   588    CHK_FORMAT(1000LL * 1000 * 1000 * 1000, 9.995, "%0.02fT", double);
   589  
   590    /* 10.0T - 99.9T */
   591    CHK_FORMAT(1000LL * 1000 * 1000 * 1000, 99.95, "%0.01fT", double);
   592  
   593    /* 100T+ */
   594    strcpy(tmp[cur], "infty");
   595    return tmp[cur];
   596  
   597  }
   598  
   599  
   600  /* Describe float. Similar to the above, except with a single 
   601     static buffer. */
   602  
   603  static u8* DF(double val) {
   604  
   605    static u8 tmp[16];
   606  
   607    if (val < 99.995) {
   608      sprintf(tmp, "%0.02f", val);
   609      return tmp;
   610    }
   611  
   612    if (val < 999.95) {
   613      sprintf(tmp, "%0.01f", val);
   614      return tmp;
   615    }
   616  
   617    return DI((u64)val);
   618  
   619  }
   620  
   621  
   622  /* Describe integer as memory size. */
   623  
   624  static u8* DMS(u64 val) {
   625  
   626    static u8 tmp[12][16];
   627    static u8 cur;
   628  
   629    cur = (cur + 1) % 12;
   630  
   631    /* 0-9999 */
   632    CHK_FORMAT(1, 10000, "%llu B", u64);
   633  
   634    /* 10.0k - 99.9k */
   635    CHK_FORMAT(1024, 99.95, "%0.01f kB", double);
   636  
   637    /* 100k - 999k */
   638    CHK_FORMAT(1024, 1000, "%llu kB", u64);
   639  
   640    /* 1.00M - 9.99M */
   641    CHK_FORMAT(1024 * 1024, 9.995, "%0.02f MB", double);
   642  
   643    /* 10.0M - 99.9M */
   644    CHK_FORMAT(1024 * 1024, 99.95, "%0.01f MB", double);
   645  
   646    /* 100M - 999M */
   647    CHK_FORMAT(1024 * 1024, 1000, "%llu MB", u64);
   648  
   649    /* 1.00G - 9.99G */
   650    CHK_FORMAT(1024LL * 1024 * 1024, 9.995, "%0.02f GB", double);
   651  
   652    /* 10.0G - 99.9G */
   653    CHK_FORMAT(1024LL * 1024 * 1024, 99.95, "%0.01f GB", double);
   654  
   655    /* 100G - 999G */
   656    CHK_FORMAT(1024LL * 1024 * 1024, 1000, "%llu GB", u64);
   657  
   658    /* 1.00T - 9.99G */
   659    CHK_FORMAT(1024LL * 1024 * 1024 * 1024, 9.995, "%0.02f TB", double);
   660  
   661    /* 10.0T - 99.9T */
   662    CHK_FORMAT(1024LL * 1024 * 1024 * 1024, 99.95, "%0.01f TB", double);
   663  
   664  #undef CHK_FORMAT
   665  
   666    /* 100T+ */
   667    strcpy(tmp[cur], "infty");
   668    return tmp[cur];
   669  
   670  }
   671  
   672  
   673  /* Describe time delta. Returns one static buffer, 34 chars of less. */
   674  
   675  static u8* DTD(u64 cur_ms, u64 event_ms) {
   676  
   677    static u8 tmp[64];
   678    u64 delta;
   679    s32 t_d, t_h, t_m, t_s;
   680  
   681    if (!event_ms) return "none seen yet";
   682  
   683    delta = cur_ms - event_ms;
   684  
   685    t_d = delta / 1000 / 60 / 60 / 24;
   686    t_h = (delta / 1000 / 60 / 60) % 24;
   687    t_m = (delta / 1000 / 60) % 60;
   688    t_s = (delta / 1000) % 60;
   689  
   690    sprintf(tmp, "%s days, %u hrs, %u min, %u sec", DI(t_d), t_h, t_m, t_s);
   691    return tmp;
   692  
   693  }
   694  
   695  
   696  /* Mark deterministic checks as done for a particular queue entry. We use the
   697     .state file to avoid repeating deterministic fuzzing when resuming aborted
   698     scans. */
   699  
   700  static void mark_as_det_done(struct queue_entry* q) {
   701  
   702    u8* fn = strrchr(q->fname, '/');
   703    s32 fd;
   704  
   705    fn = alloc_printf("%s/queue/.state/deterministic_done/%s", out_dir, fn + 1);
   706  
   707    fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
   708    if (fd < 0) PFATAL("Unable to create '%s'", fn);
   709    close(fd);
   710  
   711    ck_free(fn);
   712  
   713    q->passed_det = 1;
   714  
   715  }
   716  
   717  
   718  /* Mark as variable. Create symlinks if possible to make it easier to examine
   719     the files. */
   720  
   721  static void mark_as_variable(struct queue_entry* q) {
   722  
   723    u8 *fn = strrchr(q->fname, '/') + 1, *ldest;
   724  
   725    ldest = alloc_printf("../../%s", fn);
   726    fn = alloc_printf("%s/queue/.state/variable_behavior/%s", out_dir, fn);
   727  
   728    if (symlink(ldest, fn)) {
   729  
   730      s32 fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
   731      if (fd < 0) PFATAL("Unable to create '%s'", fn);
   732      close(fd);
   733  
   734    }
   735  
   736    ck_free(ldest);
   737    ck_free(fn);
   738  
   739    q->var_behavior = 1;
   740  
   741  }
   742  
   743  
   744  /* Mark / unmark as redundant (edge-only). This is not used for restoring state,
   745     but may be useful for post-processing datasets. */
   746  
   747  static void mark_as_redundant(struct queue_entry* q, u8 state) {
   748  
   749    u8* fn;
   750    s32 fd;
   751  
   752    if (state == q->fs_redundant) return;
   753  
   754    q->fs_redundant = state;
   755  
   756    fn = strrchr(q->fname, '/');
   757    fn = alloc_printf("%s/queue/.state/redundant_edges/%s", out_dir, fn + 1);
   758  
   759    if (state) {
   760  
   761      fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
   762      if (fd < 0) PFATAL("Unable to create '%s'", fn);
   763      close(fd);
   764  
   765    } else {
   766  
   767      if (unlink(fn)) PFATAL("Unable to remove '%s'", fn);
   768  
   769    }
   770  
   771    ck_free(fn);
   772  
   773  }
   774  
   775  
   776  /* Append new test case to the queue. */
   777  
   778  static void add_to_queue(u8* fname, u32 len, u8 passed_det) {
   779  
   780    struct queue_entry* q = ck_alloc(sizeof(struct queue_entry));
   781  
   782    q->fname        = fname;
   783    q->len          = len;
   784    q->depth        = cur_depth + 1;
   785    q->passed_det   = passed_det;
   786  
   787    if (q->depth > max_depth) max_depth = q->depth;
   788  
   789    if (queue_top) {
   790  
   791      queue_top->next = q;
   792      queue_top = q;
   793  
   794    } else q_prev100 = queue = queue_top = q;
   795  
   796    queued_paths++;
   797    pending_not_fuzzed++;
   798  
   799    cycles_wo_finds = 0;
   800  
   801    if (!(queued_paths % 100)) {
   802  
   803      q_prev100->next_100 = q;
   804      q_prev100 = q;
   805  
   806    }
   807  
   808    last_path_time = get_cur_time();
   809  
   810  }
   811  
   812  
   813  /* Destroy the entire queue. */
   814  
   815  EXP_ST void destroy_queue(void) {
   816  
   817    struct queue_entry *q = queue, *n;
   818  
   819    while (q) {
   820  
   821      n = q->next;
   822      ck_free(q->fname);
   823      ck_free(q->trace_mini);
   824      ck_free(q);
   825      q = n;
   826  
   827    }
   828  
   829  }
   830  
   831  
   832  /* Write bitmap to file. The bitmap is useful mostly for the secret
   833     -B option, to focus a separate fuzzing session on a particular
   834     interesting input without rediscovering all the others. */
   835  
   836  EXP_ST void write_bitmap(void) {
   837  
   838    u8* fname;
   839    s32 fd;
   840  
   841    if (!bitmap_changed) return;
   842    bitmap_changed = 0;
   843  
   844    fname = alloc_printf("%s/fuzz_bitmap", out_dir);
   845    fd = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0600);
   846  
   847    if (fd < 0) PFATAL("Unable to open '%s'", fname);
   848  
   849    ck_write(fd, virgin_bits, MAP_SIZE, fname);
   850  
   851    close(fd);
   852    ck_free(fname);
   853  
   854  }
   855  
   856  
   857  /* Read bitmap from file. This is for the -B option again. */
   858  
   859  EXP_ST void read_bitmap(u8* fname) {
   860  
   861    s32 fd = open(fname, O_RDONLY);
   862  
   863    if (fd < 0) PFATAL("Unable to open '%s'", fname);
   864  
   865    ck_read(fd, virgin_bits, MAP_SIZE, fname);
   866  
   867    close(fd);
   868  
   869  }
   870  
   871  
   872  /* Check if the current execution path brings anything new to the table.
   873     Update virgin bits to reflect the finds. Returns 1 if the only change is
   874     the hit-count for a particular tuple; 2 if there are new tuples seen. 
   875     Updates the map, so subsequent calls will always return 0.
   876  
   877     This function is called after every exec() on a fairly large buffer, so
   878     it needs to be fast. We do this in 32-bit and 64-bit flavors. */
   879  
   880  static inline u8 has_new_bits(u8* virgin_map) {
   881  
   882  #ifdef __x86_64__
   883  
   884    u64* current = (u64*)trace_bits;
   885    u64* virgin  = (u64*)virgin_map;
   886  
   887    u32  i = (MAP_SIZE >> 3);
   888  
   889  #else
   890  
   891    u32* current = (u32*)trace_bits;
   892    u32* virgin  = (u32*)virgin_map;
   893  
   894    u32  i = (MAP_SIZE >> 2);
   895  
   896  #endif /* ^__x86_64__ */
   897  
   898    u8   ret = 0;
   899  
   900    while (i--) {
   901  
   902      /* Optimize for (*current & *virgin) == 0 - i.e., no bits in current bitmap
   903         that have not been already cleared from the virgin map - since this will
   904         almost always be the case. */
   905  
   906      if (unlikely(*current) && unlikely(*current & *virgin)) {
   907  
   908        if (likely(ret < 2)) {
   909  
   910          u8* cur = (u8*)current;
   911          u8* vir = (u8*)virgin;
   912  
   913          /* Looks like we have not found any new bytes yet; see if any non-zero
   914             bytes in current[] are pristine in virgin[]. */
   915  
   916  #ifdef __x86_64__
   917  
   918          if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) ||
   919              (cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff) ||
   920              (cur[4] && vir[4] == 0xff) || (cur[5] && vir[5] == 0xff) ||
   921              (cur[6] && vir[6] == 0xff) || (cur[7] && vir[7] == 0xff)) ret = 2;
   922          else ret = 1;
   923  
   924  #else
   925  
   926          if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) ||
   927              (cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff)) ret = 2;
   928          else ret = 1;
   929  
   930  #endif /* ^__x86_64__ */
   931  
   932        }
   933  
   934        *virgin &= ~*current;
   935  
   936      }
   937  
   938      current++;
   939      virgin++;
   940  
   941    }
   942  
   943    if (ret && virgin_map == virgin_bits) bitmap_changed = 1;
   944  
   945    return ret;
   946  
   947  }
   948  
   949  
   950  /* Count the number of bits set in the provided bitmap. Used for the status
   951     screen several times every second, does not have to be fast. */
   952  
   953  static u32 count_bits(u8* mem) {
   954  
   955    u32* ptr = (u32*)mem;
   956    u32  i   = (MAP_SIZE >> 2);
   957    u32  ret = 0;
   958  
   959    while (i--) {
   960  
   961      u32 v = *(ptr++);
   962  
   963      /* This gets called on the inverse, virgin bitmap; optimize for sparse
   964         data. */
   965  
   966      if (v == 0xffffffff) {
   967        ret += 32;
   968        continue;
   969      }
   970  
   971      v -= ((v >> 1) & 0x55555555);
   972      v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
   973      ret += (((v + (v >> 4)) & 0xF0F0F0F) * 0x01010101) >> 24;
   974  
   975    }
   976  
   977    return ret;
   978  
   979  }
   980  
   981  
   982  #define FF(_b)  (0xff << ((_b) << 3))
   983  
   984  /* Count the number of bytes set in the bitmap. Called fairly sporadically,
   985     mostly to update the status screen or calibrate and examine confirmed
   986     new paths. */
   987  
   988  static u32 count_bytes(u8* mem) {
   989  
   990    u32* ptr = (u32*)mem;
   991    u32  i   = (MAP_SIZE >> 2);
   992    u32  ret = 0;
   993  
   994    while (i--) {
   995  
   996      u32 v = *(ptr++);
   997  
   998      if (!v) continue;
   999      if (v & FF(0)) ret++;
  1000      if (v & FF(1)) ret++;
  1001      if (v & FF(2)) ret++;
  1002      if (v & FF(3)) ret++;
  1003  
  1004    }
  1005  
  1006    return ret;
  1007  
  1008  }
  1009  
  1010  
  1011  /* Count the number of non-255 bytes set in the bitmap. Used strictly for the
  1012     status screen, several calls per second or so. */
  1013  
  1014  static u32 count_non_255_bytes(u8* mem) {
  1015  
  1016    u32* ptr = (u32*)mem;
  1017    u32  i   = (MAP_SIZE >> 2);
  1018    u32  ret = 0;
  1019  
  1020    while (i--) {
  1021  
  1022      u32 v = *(ptr++);
  1023  
  1024      /* This is called on the virgin bitmap, so optimize for the most likely
  1025         case. */
  1026  
  1027      if (v == 0xffffffff) continue;
  1028      if ((v & FF(0)) != FF(0)) ret++;
  1029      if ((v & FF(1)) != FF(1)) ret++;
  1030      if ((v & FF(2)) != FF(2)) ret++;
  1031      if ((v & FF(3)) != FF(3)) ret++;
  1032  
  1033    }
  1034  
  1035    return ret;
  1036  
  1037  }
  1038  
  1039  
  1040  /* Destructively simplify trace by eliminating hit count information
  1041     and replacing it with 0x80 or 0x01 depending on whether the tuple
  1042     is hit or not. Called on every new crash or timeout, should be
  1043     reasonably fast. */
  1044  
  1045  static const u8 simplify_lookup[256] = { 
  1046  
  1047    [0]         = 1,
  1048    [1 ... 255] = 128
  1049  
  1050  };
  1051  
  1052  #ifdef __x86_64__
  1053  
  1054  static void simplify_trace(u64* mem) {
  1055  
  1056    u32 i = MAP_SIZE >> 3;
  1057  
  1058    while (i--) {
  1059  
  1060      /* Optimize for sparse bitmaps. */
  1061  
  1062      if (unlikely(*mem)) {
  1063  
  1064        u8* mem8 = (u8*)mem;
  1065  
  1066        mem8[0] = simplify_lookup[mem8[0]];
  1067        mem8[1] = simplify_lookup[mem8[1]];
  1068        mem8[2] = simplify_lookup[mem8[2]];
  1069        mem8[3] = simplify_lookup[mem8[3]];
  1070        mem8[4] = simplify_lookup[mem8[4]];
  1071        mem8[5] = simplify_lookup[mem8[5]];
  1072        mem8[6] = simplify_lookup[mem8[6]];
  1073        mem8[7] = simplify_lookup[mem8[7]];
  1074  
  1075      } else *mem = 0x0101010101010101ULL;
  1076  
  1077      mem++;
  1078  
  1079    }
  1080  
  1081  }
  1082  
  1083  #else
  1084  
  1085  static void simplify_trace(u32* mem) {
  1086  
  1087    u32 i = MAP_SIZE >> 2;
  1088  
  1089    while (i--) {
  1090  
  1091      /* Optimize for sparse bitmaps. */
  1092  
  1093      if (unlikely(*mem)) {
  1094  
  1095        u8* mem8 = (u8*)mem;
  1096  
  1097        mem8[0] = simplify_lookup[mem8[0]];
  1098        mem8[1] = simplify_lookup[mem8[1]];
  1099        mem8[2] = simplify_lookup[mem8[2]];
  1100        mem8[3] = simplify_lookup[mem8[3]];
  1101  
  1102      } else *mem = 0x01010101;
  1103  
  1104      mem++;
  1105    }
  1106  
  1107  }
  1108  
  1109  #endif /* ^__x86_64__ */
  1110  
  1111  
  1112  /* Destructively classify execution counts in a trace. This is used as a
  1113     preprocessing step for any newly acquired traces. Called on every exec,
  1114     must be fast. */
  1115  
  1116  static const u8 count_class_lookup8[256] = {
  1117  
  1118    [0]           = 0,
  1119    [1]           = 1,
  1120    [2]           = 2,
  1121    [3]           = 4,
  1122    [4 ... 7]     = 8,
  1123    [8 ... 15]    = 16,
  1124    [16 ... 31]   = 32,
  1125    [32 ... 127]  = 64,
  1126    [128 ... 255] = 128
  1127  
  1128  };
  1129  
  1130  static u16 count_class_lookup16[65536];
  1131  
  1132  
  1133  EXP_ST void init_count_class16(void) {
  1134  
  1135    u32 b1, b2;
  1136  
  1137    for (b1 = 0; b1 < 256; b1++) 
  1138      for (b2 = 0; b2 < 256; b2++)
  1139        count_class_lookup16[(b1 << 8) + b2] = 
  1140          (count_class_lookup8[b1] << 8) |
  1141          count_class_lookup8[b2];
  1142  
  1143  }
  1144  
  1145  
  1146  #ifdef __x86_64__
  1147  
  1148  static inline void classify_counts(u64* mem) {
  1149  
  1150    u32 i = MAP_SIZE >> 3;
  1151  
  1152    while (i--) {
  1153  
  1154      /* Optimize for sparse bitmaps. */
  1155  
  1156      if (unlikely(*mem)) {
  1157  
  1158        u16* mem16 = (u16*)mem;
  1159  
  1160        mem16[0] = count_class_lookup16[mem16[0]];
  1161        mem16[1] = count_class_lookup16[mem16[1]];
  1162        mem16[2] = count_class_lookup16[mem16[2]];
  1163        mem16[3] = count_class_lookup16[mem16[3]];
  1164  
  1165      }
  1166  
  1167      mem++;
  1168  
  1169    }
  1170  
  1171  }
  1172  
  1173  #else
  1174  
  1175  static inline void classify_counts(u32* mem) {
  1176  
  1177    u32 i = MAP_SIZE >> 2;
  1178  
  1179    while (i--) {
  1180  
  1181      /* Optimize for sparse bitmaps. */
  1182  
  1183      if (unlikely(*mem)) {
  1184  
  1185        u16* mem16 = (u16*)mem;
  1186  
  1187        mem16[0] = count_class_lookup16[mem16[0]];
  1188        mem16[1] = count_class_lookup16[mem16[1]];
  1189  
  1190      }
  1191  
  1192      mem++;
  1193  
  1194    }
  1195  
  1196  }
  1197  
  1198  #endif /* ^__x86_64__ */
  1199  
  1200  
  1201  /* Get rid of shared memory (atexit handler). */
  1202  
  1203  static void remove_shm(void) {
  1204  
  1205    shmctl(shm_id, IPC_RMID, NULL);
  1206  
  1207  }
  1208  
  1209  
  1210  /* Compact trace bytes into a smaller bitmap. We effectively just drop the
  1211     count information here. This is called only sporadically, for some
  1212     new paths. */
  1213  
  1214  static void minimize_bits(u8* dst, u8* src) {
  1215  
  1216    u32 i = 0;
  1217  
  1218    while (i < MAP_SIZE) {
  1219  
  1220      if (*(src++)) dst[i >> 3] |= 1 << (i & 7);
  1221      i++;
  1222  
  1223    }
  1224  
  1225  }
  1226  
  1227  
  1228  /* When we bump into a new path, we call this to see if the path appears
  1229     more "favorable" than any of the existing ones. The purpose of the
  1230     "favorables" is to have a minimal set of paths that trigger all the bits
  1231     seen in the bitmap so far, and focus on fuzzing them at the expense of
  1232     the rest.
  1233  
  1234     The first step of the process is to maintain a list of top_rated[] entries
  1235     for every byte in the bitmap. We win that slot if there is no previous
  1236     contender, or if the contender has a more favorable speed x size factor. */
  1237  
  1238  static void update_bitmap_score(struct queue_entry* q) {
  1239  
  1240    u32 i;
  1241    u64 fav_factor = q->exec_us * q->len;
  1242  
  1243    /* For every byte set in trace_bits[], see if there is a previous winner,
  1244       and how it compares to us. */
  1245  
  1246    for (i = 0; i < MAP_SIZE; i++)
  1247  
  1248      if (trace_bits[i]) {
  1249  
  1250         if (top_rated[i]) {
  1251  
  1252           /* Faster-executing or smaller test cases are favored. */
  1253  
  1254           if (fav_factor > top_rated[i]->exec_us * top_rated[i]->len) continue;
  1255  
  1256           /* Looks like we're going to win. Decrease ref count for the
  1257              previous winner, discard its trace_bits[] if necessary. */
  1258  
  1259           if (!--top_rated[i]->tc_ref) {
  1260             ck_free(top_rated[i]->trace_mini);
  1261             top_rated[i]->trace_mini = 0;
  1262           }
  1263  
  1264         }
  1265  
  1266         /* Insert ourselves as the new winner. */
  1267  
  1268         top_rated[i] = q;
  1269         q->tc_ref++;
  1270  
  1271         if (!q->trace_mini) {
  1272           q->trace_mini = ck_alloc(MAP_SIZE >> 3);
  1273           minimize_bits(q->trace_mini, trace_bits);
  1274         }
  1275  
  1276         score_changed = 1;
  1277  
  1278       }
  1279  
  1280  }
  1281  
  1282  
  1283  /* The second part of the mechanism discussed above is a routine that
  1284     goes over top_rated[] entries, and then sequentially grabs winners for
  1285     previously-unseen bytes (temp_v) and marks them as favored, at least
  1286     until the next run. The favored entries are given more air time during
  1287     all fuzzing steps. */
  1288  
  1289  static void cull_queue(void) {
  1290  
  1291    struct queue_entry* q;
  1292    static u8 temp_v[MAP_SIZE >> 3];
  1293    u32 i;
  1294  
  1295    if (dumb_mode || !score_changed) return;
  1296  
  1297    score_changed = 0;
  1298  
  1299    memset(temp_v, 255, MAP_SIZE >> 3);
  1300  
  1301    queued_favored  = 0;
  1302    pending_favored = 0;
  1303  
  1304    q = queue;
  1305  
  1306    while (q) {
  1307      q->favored = 0;
  1308      q = q->next;
  1309    }
  1310  
  1311    /* Let's see if anything in the bitmap isn't captured in temp_v.
  1312       If yes, and if it has a top_rated[] contender, let's use it. */
  1313  
  1314    for (i = 0; i < MAP_SIZE; i++)
  1315      if (top_rated[i] && (temp_v[i >> 3] & (1 << (i & 7)))) {
  1316  
  1317        u32 j = MAP_SIZE >> 3;
  1318  
  1319        /* Remove all bits belonging to the current entry from temp_v. */
  1320  
  1321        while (j--) 
  1322          if (top_rated[i]->trace_mini[j])
  1323            temp_v[j] &= ~top_rated[i]->trace_mini[j];
  1324  
  1325        top_rated[i]->favored = 1;
  1326        queued_favored++;
  1327  
  1328        if (!top_rated[i]->was_fuzzed) pending_favored++;
  1329  
  1330      }
  1331  
  1332    q = queue;
  1333  
  1334    while (q) {
  1335      mark_as_redundant(q, !q->favored);
  1336      q = q->next;
  1337    }
  1338  
  1339  }
  1340  
  1341  
  1342  /* Configure shared memory and virgin_bits. This is called at startup. */
  1343  
  1344  EXP_ST void setup_shm(void) {
  1345  
  1346    u8* shm_str;
  1347  
  1348    if (!in_bitmap) memset(virgin_bits, 255, MAP_SIZE);
  1349  
  1350    memset(virgin_tmout, 255, MAP_SIZE);
  1351    memset(virgin_crash, 255, MAP_SIZE);
  1352  
  1353    shm_id = shmget(IPC_PRIVATE, MAP_SIZE, IPC_CREAT | IPC_EXCL | 0600);
  1354  
  1355    if (shm_id < 0) PFATAL("shmget() failed");
  1356  
  1357    atexit(remove_shm);
  1358  
  1359    shm_str = alloc_printf("%d", shm_id);
  1360  
  1361    /* If somebody is asking us to fuzz instrumented binaries in dumb mode,
  1362       we don't want them to detect instrumentation, since we won't be sending
  1363       fork server commands. This should be replaced with better auto-detection
  1364       later on, perhaps? */
  1365  
  1366    if (!dumb_mode) setenv(SHM_ENV_VAR, shm_str, 1);
  1367  
  1368    ck_free(shm_str);
  1369  
  1370    trace_bits = shmat(shm_id, NULL, 0);
  1371    
  1372    if (!trace_bits) PFATAL("shmat() failed");
  1373  
  1374  }
  1375  
  1376  
  1377  /* Load postprocessor, if available. */
  1378  
  1379  static void setup_post(void) {
  1380  
  1381    void* dh;
  1382    u8* fn = getenv("AFL_POST_LIBRARY");
  1383    u32 tlen = 6;
  1384  
  1385    if (!fn) return;
  1386  
  1387    ACTF("Loading postprocessor from '%s'...", fn);
  1388  
  1389    dh = dlopen(fn, RTLD_NOW);
  1390    if (!dh) FATAL("%s", dlerror());
  1391  
  1392    post_handler = dlsym(dh, "afl_postprocess");
  1393    if (!post_handler) FATAL("Symbol 'afl_postprocess' not found.");
  1394  
  1395    /* Do a quick test. It's better to segfault now than later =) */
  1396  
  1397    post_handler("hello", &tlen);
  1398  
  1399    OKF("Postprocessor installed successfully.");
  1400  
  1401  }
  1402  
  1403  
  1404  /* Read all testcases from the input directory, then queue them for testing.
  1405     Called at startup. */
  1406  
  1407  static void read_testcases(void) {
  1408  
  1409    struct dirent **nl;
  1410    s32 nl_cnt;
  1411    u32 i;
  1412    u8* fn;
  1413  
  1414    /* Auto-detect non-in-place resumption attempts. */
  1415  
  1416    fn = alloc_printf("%s/queue", in_dir);
  1417    if (!access(fn, F_OK)) in_dir = fn; else ck_free(fn);
  1418  
  1419    ACTF("Scanning '%s'...", in_dir);
  1420  
  1421    /* We use scandir() + alphasort() rather than readdir() because otherwise,
  1422       the ordering  of test cases would vary somewhat randomly and would be
  1423       difficult to control. */
  1424  
  1425    nl_cnt = scandir(in_dir, &nl, NULL, alphasort);
  1426  
  1427    if (nl_cnt < 0) {
  1428  
  1429      if (errno == ENOENT || errno == ENOTDIR)
  1430  
  1431        SAYF("\n" cLRD "[-] " cRST
  1432             "The input directory does not seem to be valid - try again. The fuzzer needs\n"
  1433             "    one or more test case to start with - ideally, a small file under 1 kB\n"
  1434             "    or so. The cases must be stored as regular files directly in the input\n"
  1435             "    directory.\n");
  1436  
  1437      PFATAL("Unable to open '%s'", in_dir);
  1438  
  1439    }
  1440  
  1441    if (shuffle_queue && nl_cnt > 1) {
  1442  
  1443      ACTF("Shuffling queue...");
  1444      shuffle_ptrs((void**)nl, nl_cnt);
  1445  
  1446    }
  1447  
  1448    for (i = 0; i < nl_cnt; i++) {
  1449  
  1450      struct stat st;
  1451  
  1452      u8* fn = alloc_printf("%s/%s", in_dir, nl[i]->d_name);
  1453      u8* dfn = alloc_printf("%s/.state/deterministic_done/%s", in_dir, nl[i]->d_name);
  1454  
  1455      u8  passed_det = 0;
  1456  
  1457      free(nl[i]); /* not tracked */
  1458   
  1459      if (lstat(fn, &st) || access(fn, R_OK))
  1460        PFATAL("Unable to access '%s'", fn);
  1461  
  1462      /* This also takes care of . and .. */
  1463  
  1464      if (!S_ISREG(st.st_mode) || !st.st_size || strstr(fn, "/README.txt")) {
  1465  
  1466        ck_free(fn);
  1467        ck_free(dfn);
  1468        continue;
  1469  
  1470      }
  1471  
  1472      if (st.st_size > MAX_FILE) 
  1473        FATAL("Test case '%s' is too big (%s, limit is %s)", fn,
  1474              DMS(st.st_size), DMS(MAX_FILE));
  1475  
  1476      /* Check for metadata that indicates that deterministic fuzzing
  1477         is complete for this entry. We don't want to repeat deterministic
  1478         fuzzing when resuming aborted scans, because it would be pointless
  1479         and probably very time-consuming. */
  1480  
  1481      if (!access(dfn, F_OK)) passed_det = 1;
  1482      ck_free(dfn);
  1483  
  1484      add_to_queue(fn, st.st_size, passed_det);
  1485  
  1486    }
  1487  
  1488    free(nl); /* not tracked */
  1489  
  1490    if (!queued_paths) {
  1491  
  1492      SAYF("\n" cLRD "[-] " cRST
  1493           "Looks like there are no valid test cases in the input directory! The fuzzer\n"
  1494           "    needs one or more test case to start with - ideally, a small file under\n"
  1495           "    1 kB or so. The cases must be stored as regular files directly in the\n"
  1496           "    input directory.\n");
  1497  
  1498      FATAL("No usable test cases in '%s'", in_dir);
  1499  
  1500    }
  1501  
  1502    last_path_time = 0;
  1503    queued_at_start = queued_paths;
  1504  
  1505  }
  1506  
  1507  
  1508  /* Helper function for load_extras. */
  1509  
  1510  static int compare_extras_len(const void* p1, const void* p2) {
  1511    struct extra_data *e1 = (struct extra_data*)p1,
  1512                      *e2 = (struct extra_data*)p2;
  1513  
  1514    return e1->len - e2->len;
  1515  }
  1516  
  1517  static int compare_extras_use_d(const void* p1, const void* p2) {
  1518    struct extra_data *e1 = (struct extra_data*)p1,
  1519                      *e2 = (struct extra_data*)p2;
  1520  
  1521    return e2->hit_cnt - e1->hit_cnt;
  1522  }
  1523  
  1524  
  1525  /* Read extras from a file, sort by size. */
  1526  
  1527  static void load_extras_file(u8* fname, u32* min_len, u32* max_len,
  1528                               u32 dict_level) {
  1529  
  1530    FILE* f;
  1531    u8  buf[MAX_LINE];
  1532    u8  *lptr;
  1533    u32 cur_line = 0;
  1534  
  1535    f = fopen(fname, "r");
  1536  
  1537    if (!f) PFATAL("Unable to open '%s'", fname);
  1538  
  1539    while ((lptr = fgets(buf, MAX_LINE, f))) {
  1540  
  1541      u8 *rptr, *wptr;
  1542      u32 klen = 0;
  1543  
  1544      cur_line++;
  1545  
  1546      /* Trim on left and right. */
  1547  
  1548      while (isspace(*lptr)) lptr++;
  1549  
  1550      rptr = lptr + strlen(lptr) - 1;
  1551      while (rptr >= lptr && isspace(*rptr)) rptr--;
  1552      rptr++;
  1553      *rptr = 0;
  1554  
  1555      /* Skip empty lines and comments. */
  1556  
  1557      if (!*lptr || *lptr == '#') continue;
  1558  
  1559      /* All other lines must end with '"', which we can consume. */
  1560  
  1561      rptr--;
  1562  
  1563      if (rptr < lptr || *rptr != '"')
  1564        FATAL("Malformed name=\"value\" pair in line %u.", cur_line);
  1565  
  1566      *rptr = 0;
  1567  
  1568      /* Skip alphanumerics and dashes (label). */
  1569  
  1570      while (isalnum(*lptr) || *lptr == '_') lptr++;
  1571  
  1572      /* If @number follows, parse that. */
  1573  
  1574      if (*lptr == '@') {
  1575  
  1576        lptr++;
  1577        if (atoi(lptr) > dict_level) continue;
  1578        while (isdigit(*lptr)) lptr++;
  1579  
  1580      }
  1581  
  1582      /* Skip whitespace and = signs. */
  1583  
  1584      while (isspace(*lptr) || *lptr == '=') lptr++;
  1585  
  1586      /* Consume opening '"'. */
  1587  
  1588      if (*lptr != '"')
  1589        FATAL("Malformed name=\"keyword\" pair in line %u.", cur_line);
  1590  
  1591      lptr++;
  1592  
  1593      if (!*lptr) FATAL("Empty keyword in line %u.", cur_line);
  1594  
  1595      /* Okay, let's allocate memory and copy data between "...", handling
  1596         \xNN escaping, \\, and \". */
  1597  
  1598      extras = ck_realloc_block(extras, (extras_cnt + 1) *
  1599                 sizeof(struct extra_data));
  1600  
  1601      wptr = extras[extras_cnt].data = ck_alloc(rptr - lptr);
  1602  
  1603      while (*lptr) {
  1604  
  1605        char* hexdigits = "0123456789abcdef";
  1606  
  1607        switch (*lptr) {
  1608  
  1609          case 1 ... 31:
  1610          case 128 ... 255:
  1611            FATAL("Non-printable characters in line %u.", cur_line);
  1612  
  1613          case '\\':
  1614  
  1615            lptr++;
  1616  
  1617            if (*lptr == '\\' || *lptr == '"') {
  1618              *(wptr++) = *(lptr++);
  1619              klen++;
  1620              break;
  1621            }
  1622  
  1623            if (*lptr != 'x' || !isxdigit(lptr[1]) || !isxdigit(lptr[2]))
  1624              FATAL("Invalid escaping (not \\xNN) in line %u.", cur_line);
  1625  
  1626            *(wptr++) =
  1627              ((strchr(hexdigits, tolower(lptr[1])) - hexdigits) << 4) |
  1628              (strchr(hexdigits, tolower(lptr[2])) - hexdigits);
  1629  
  1630            lptr += 3;
  1631            klen++;
  1632  
  1633            break;
  1634  
  1635          default:
  1636  
  1637            *(wptr++) = *(lptr++);
  1638            klen++;
  1639  
  1640        }
  1641  
  1642      }
  1643  
  1644      extras[extras_cnt].len = klen;
  1645  
  1646      if (extras[extras_cnt].len > MAX_DICT_FILE)
  1647        FATAL("Keyword too big in line %u (%s, limit is %s)", cur_line,
  1648              DMS(klen), DMS(MAX_DICT_FILE));
  1649  
  1650      if (*min_len > klen) *min_len = klen;
  1651      if (*max_len < klen) *max_len = klen;
  1652  
  1653      extras_cnt++;
  1654  
  1655    }
  1656  
  1657    fclose(f);
  1658  
  1659  }
  1660  
  1661  
  1662  /* Read extras from the extras directory and sort them by size. */
  1663  
  1664  static void load_extras(u8* dir) {
  1665  
  1666    DIR* d;
  1667    struct dirent* de;
  1668    u32 min_len = MAX_DICT_FILE, max_len = 0, dict_level = 0;
  1669    u8* x;
  1670  
  1671    /* If the name ends with @, extract level and continue. */
  1672  
  1673    if ((x = strchr(dir, '@'))) {
  1674  
  1675      *x = 0;
  1676      dict_level = atoi(x + 1);
  1677  
  1678    }
  1679  
  1680    ACTF("Loading extra dictionary from '%s' (level %u)...", dir, dict_level);
  1681  
  1682    d = opendir(dir);
  1683  
  1684    if (!d) {
  1685  
  1686      if (errno == ENOTDIR) {
  1687        load_extras_file(dir, &min_len, &max_len, dict_level);
  1688        goto check_and_sort;
  1689      }
  1690  
  1691      PFATAL("Unable to open '%s'", dir);
  1692  
  1693    }
  1694  
  1695    if (x) FATAL("Dictionary levels not supported for directories.");
  1696  
  1697    while ((de = readdir(d))) {
  1698  
  1699      struct stat st;
  1700      u8* fn = alloc_printf("%s/%s", dir, de->d_name);
  1701      s32 fd;
  1702  
  1703      if (lstat(fn, &st) || access(fn, R_OK))
  1704        PFATAL("Unable to access '%s'", fn);
  1705  
  1706      /* This also takes care of . and .. */
  1707      if (!S_ISREG(st.st_mode) || !st.st_size) {
  1708  
  1709        ck_free(fn);
  1710        continue;
  1711  
  1712      }
  1713  
  1714      if (st.st_size > MAX_DICT_FILE)
  1715        FATAL("Extra '%s' is too big (%s, limit is %s)", fn,
  1716              DMS(st.st_size), DMS(MAX_DICT_FILE));
  1717  
  1718      if (min_len > st.st_size) min_len = st.st_size;
  1719      if (max_len < st.st_size) max_len = st.st_size;
  1720  
  1721      extras = ck_realloc_block(extras, (extras_cnt + 1) *
  1722                 sizeof(struct extra_data));
  1723  
  1724      extras[extras_cnt].data = ck_alloc(st.st_size);
  1725      extras[extras_cnt].len  = st.st_size;
  1726  
  1727      fd = open(fn, O_RDONLY);
  1728  
  1729      if (fd < 0) PFATAL("Unable to open '%s'", fn);
  1730  
  1731      ck_read(fd, extras[extras_cnt].data, st.st_size, fn);
  1732  
  1733      close(fd);
  1734      ck_free(fn);
  1735  
  1736      extras_cnt++;
  1737  
  1738    }
  1739  
  1740    closedir(d);
  1741  
  1742  check_and_sort:
  1743  
  1744    if (!extras_cnt) FATAL("No usable files in '%s'", dir);
  1745  
  1746    qsort(extras, extras_cnt, sizeof(struct extra_data), compare_extras_len);
  1747  
  1748    OKF("Loaded %u extra tokens, size range %s to %s.", extras_cnt,
  1749        DMS(min_len), DMS(max_len));
  1750  
  1751    if (max_len > 32)
  1752      WARNF("Some tokens are relatively large (%s) - consider trimming.",
  1753            DMS(max_len));
  1754  
  1755    if (extras_cnt > MAX_DET_EXTRAS)
  1756      WARNF("More than %u tokens - will use them probabilistically.",
  1757            MAX_DET_EXTRAS);
  1758  
  1759  }
  1760  
  1761  
  1762  
  1763  
  1764  /* Helper function for maybe_add_auto() */
  1765  
  1766  static inline u8 memcmp_nocase(u8* m1, u8* m2, u32 len) {
  1767  
  1768    while (len--) if (tolower(*(m1++)) ^ tolower(*(m2++))) return 1;
  1769    return 0;
  1770  
  1771  }
  1772  
  1773  
  1774  /* Maybe add automatic extra. */
  1775  
  1776  static void maybe_add_auto(u8* mem, u32 len) {
  1777  
  1778    u32 i;
  1779  
  1780    /* Allow users to specify that they don't want auto dictionaries. */
  1781  
  1782    if (!MAX_AUTO_EXTRAS || !USE_AUTO_EXTRAS) return;
  1783  
  1784    /* Skip runs of identical bytes. */
  1785  
  1786    for (i = 1; i < len; i++)
  1787      if (mem[0] ^ mem[i]) break;
  1788  
  1789    if (i == len) return;
  1790  
  1791    /* Reject builtin interesting values. */
  1792  
  1793    if (len == 2) {
  1794  
  1795      i = sizeof(interesting_16) >> 1;
  1796  
  1797      while (i--) 
  1798        if (*((u16*)mem) == interesting_16[i] ||
  1799            *((u16*)mem) == SWAP16(interesting_16[i])) return;
  1800  
  1801    }
  1802  
  1803    if (len == 4) {
  1804  
  1805      i = sizeof(interesting_32) >> 2;
  1806  
  1807      while (i--) 
  1808        if (*((u32*)mem) == interesting_32[i] ||
  1809            *((u32*)mem) == SWAP32(interesting_32[i])) return;
  1810  
  1811    }
  1812  
  1813    /* Reject anything that matches existing extras. Do a case-insensitive
  1814       match. We optimize by exploiting the fact that extras[] are sorted
  1815       by size. */
  1816  
  1817    for (i = 0; i < extras_cnt; i++)
  1818      if (extras[i].len >= len) break;
  1819  
  1820    for (; i < extras_cnt && extras[i].len == len; i++)
  1821      if (!memcmp_nocase(extras[i].data, mem, len)) return;
  1822  
  1823    /* Last but not least, check a_extras[] for matches. There are no
  1824       guarantees of a particular sort order. */
  1825  
  1826    auto_changed = 1;
  1827  
  1828    for (i = 0; i < a_extras_cnt; i++) {
  1829  
  1830      if (a_extras[i].len == len && !memcmp_nocase(a_extras[i].data, mem, len)) {
  1831  
  1832        a_extras[i].hit_cnt++;
  1833        goto sort_a_extras;
  1834  
  1835      }
  1836  
  1837    }
  1838  
  1839    /* At this point, looks like we're dealing with a new entry. So, let's
  1840       append it if we have room. Otherwise, let's randomly evict some other
  1841       entry from the bottom half of the list. */
  1842  
  1843    if (a_extras_cnt < MAX_AUTO_EXTRAS) {
  1844  
  1845      a_extras = ck_realloc_block(a_extras, (a_extras_cnt + 1) *
  1846                                  sizeof(struct extra_data));
  1847  
  1848      a_extras[a_extras_cnt].data = ck_memdup(mem, len);
  1849      a_extras[a_extras_cnt].len  = len;
  1850      a_extras_cnt++;
  1851  
  1852    } else {
  1853  
  1854      i = MAX_AUTO_EXTRAS / 2 +
  1855          UR((MAX_AUTO_EXTRAS + 1) / 2);
  1856  
  1857      ck_free(a_extras[i].data);
  1858  
  1859      a_extras[i].data    = ck_memdup(mem, len);
  1860      a_extras[i].len     = len;
  1861      a_extras[i].hit_cnt = 0;
  1862  
  1863    }
  1864  
  1865  sort_a_extras:
  1866  
  1867    /* First, sort all auto extras by use count, descending order. */
  1868  
  1869    qsort(a_extras, a_extras_cnt, sizeof(struct extra_data),
  1870          compare_extras_use_d);
  1871  
  1872    /* Then, sort the top USE_AUTO_EXTRAS entries by size. */
  1873  
  1874    qsort(a_extras, MIN(USE_AUTO_EXTRAS, a_extras_cnt),
  1875          sizeof(struct extra_data), compare_extras_len);
  1876  
  1877  }
  1878  
  1879  
  1880  /* Save automatically generated extras. */
  1881  
  1882  static void save_auto(void) {
  1883  
  1884    u32 i;
  1885  
  1886    if (!auto_changed) return;
  1887    auto_changed = 0;
  1888  
  1889    for (i = 0; i < MIN(USE_AUTO_EXTRAS, a_extras_cnt); i++) {
  1890  
  1891      u8* fn = alloc_printf("%s/queue/.state/auto_extras/auto_%06u", out_dir, i);
  1892      s32 fd;
  1893  
  1894      fd = open(fn, O_WRONLY | O_CREAT | O_TRUNC, 0600);
  1895  
  1896      if (fd < 0) PFATAL("Unable to create '%s'", fn);
  1897  
  1898      ck_write(fd, a_extras[i].data, a_extras[i].len, fn);
  1899  
  1900      close(fd);
  1901      ck_free(fn);
  1902  
  1903    }
  1904  
  1905  }
  1906  
  1907  
  1908  /* Load automatically generated extras. */
  1909  
  1910  static void load_auto(void) {
  1911  
  1912    u32 i;
  1913  
  1914    for (i = 0; i < USE_AUTO_EXTRAS; i++) {
  1915  
  1916      u8  tmp[MAX_AUTO_EXTRA + 1];
  1917      u8* fn = alloc_printf("%s/.state/auto_extras/auto_%06u", in_dir, i);
  1918      s32 fd, len;
  1919  
  1920      fd = open(fn, O_RDONLY, 0600);
  1921  
  1922      if (fd < 0) {
  1923  
  1924        if (errno != ENOENT) PFATAL("Unable to open '%s'", fn);
  1925        ck_free(fn);
  1926        break;
  1927  
  1928      }
  1929  
  1930      /* We read one byte more to cheaply detect tokens that are too
  1931         long (and skip them). */
  1932  
  1933      len = read(fd, tmp, MAX_AUTO_EXTRA + 1);
  1934  
  1935      if (len < 0) PFATAL("Unable to read from '%s'", fn);
  1936  
  1937      if (len >= MIN_AUTO_EXTRA && len <= MAX_AUTO_EXTRA)
  1938        maybe_add_auto(tmp, len);
  1939  
  1940      close(fd);
  1941      ck_free(fn);
  1942  
  1943    }
  1944  
  1945    if (i) OKF("Loaded %u auto-discovered dictionary tokens.", i);
  1946    else OKF("No auto-generated dictionary tokens to reuse.");
  1947  
  1948  }
  1949  
  1950  
  1951  /* Destroy extras. */
  1952  
  1953  static void destroy_extras(void) {
  1954  
  1955    u32 i;
  1956  
  1957    for (i = 0; i < extras_cnt; i++) 
  1958      ck_free(extras[i].data);
  1959  
  1960    ck_free(extras);
  1961  
  1962    for (i = 0; i < a_extras_cnt; i++) 
  1963      ck_free(a_extras[i].data);
  1964  
  1965    ck_free(a_extras);
  1966  
  1967  }
  1968  
  1969  
  1970  /* Spin up fork server (instrumented mode only). The idea is explained here:
  1971  
  1972     http://lcamtuf.blogspot.com/2014/10/fuzzing-binaries-without-execve.html
  1973  
  1974     In essence, the instrumentation allows us to skip execve(), and just keep
  1975     cloning a stopped child. So, we just execute once, and then send commands
  1976     through a pipe. The other part of this logic is in afl-as.h. */
  1977  
  1978  EXP_ST void init_forkserver(char** argv) {
  1979  
  1980    static struct itimerval it;
  1981    int st_pipe[2], ctl_pipe[2];
  1982    int status;
  1983    s32 rlen;
  1984  
  1985    ACTF("Spinning up the fork server...");
  1986  
  1987    if (pipe(st_pipe) || pipe(ctl_pipe)) PFATAL("pipe() failed");
  1988  
  1989    forksrv_pid = fork();
  1990  
  1991    if (forksrv_pid < 0) PFATAL("fork() failed");
  1992  
  1993    if (!forksrv_pid) {
  1994  
  1995      struct rlimit r;
  1996  
  1997      /* Umpf. On OpenBSD, the default fd limit for root users is set to
  1998         soft 128. Let's try to fix that... */
  1999  
  2000      if (!getrlimit(RLIMIT_NOFILE, &r) && r.rlim_cur < FORKSRV_FD + 2) {
  2001  
  2002        r.rlim_cur = FORKSRV_FD + 2;
  2003        setrlimit(RLIMIT_NOFILE, &r); /* Ignore errors */
  2004  
  2005      }
  2006  
  2007      if (mem_limit) {
  2008  
  2009        r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20;
  2010  
  2011  #ifdef RLIMIT_AS
  2012  
  2013        setrlimit(RLIMIT_AS, &r); /* Ignore errors */
  2014  
  2015  #else
  2016  
  2017        /* This takes care of OpenBSD, which doesn't have RLIMIT_AS, but
  2018           according to reliable sources, RLIMIT_DATA covers anonymous
  2019           maps - so we should be getting good protection against OOM bugs. */
  2020  
  2021        setrlimit(RLIMIT_DATA, &r); /* Ignore errors */
  2022  
  2023  #endif /* ^RLIMIT_AS */
  2024  
  2025  
  2026      }
  2027  
  2028      /* Dumping cores is slow and can lead to anomalies if SIGKILL is delivered
  2029         before the dump is complete. */
  2030  
  2031      r.rlim_max = r.rlim_cur = 0;
  2032  
  2033      setrlimit(RLIMIT_CORE, &r); /* Ignore errors */
  2034  
  2035      /* Isolate the process and configure standard descriptors. If out_file is
  2036         specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */
  2037  
  2038      setsid();
  2039  
  2040      dup2(dev_null_fd, 1);
  2041      dup2(dev_null_fd, 2);
  2042  
  2043      if (out_file) {
  2044  
  2045        dup2(dev_null_fd, 0);
  2046  
  2047      } else {
  2048  
  2049        dup2(out_fd, 0);
  2050        close(out_fd);
  2051  
  2052      }
  2053  
  2054      /* Set up control and status pipes, close the unneeded original fds. */
  2055  
  2056      if (dup2(ctl_pipe[0], FORKSRV_FD) < 0) PFATAL("dup2() failed");
  2057      if (dup2(st_pipe[1], FORKSRV_FD + 1) < 0) PFATAL("dup2() failed");
  2058  
  2059      close(ctl_pipe[0]);
  2060      close(ctl_pipe[1]);
  2061      close(st_pipe[0]);
  2062      close(st_pipe[1]);
  2063  
  2064      close(out_dir_fd);
  2065      close(dev_null_fd);
  2066      close(dev_urandom_fd);
  2067      close(fileno(plot_file));
  2068  
  2069      /* This should improve performance a bit, since it stops the linker from
  2070         doing extra work post-fork(). */
  2071  
  2072      if (!getenv("LD_BIND_LAZY")) setenv("LD_BIND_NOW", "1", 0);
  2073  
  2074      /* Set sane defaults for ASAN if nothing else specified. */
  2075  
  2076      setenv("ASAN_OPTIONS", "abort_on_error=1:"
  2077                             "detect_leaks=0:"
  2078                             "symbolize=0:"
  2079                             "allocator_may_return_null=1", 0);
  2080  
  2081      /* MSAN is tricky, because it doesn't support abort_on_error=1 at this
  2082         point. So, we do this in a very hacky way. */
  2083  
  2084      setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":"
  2085                             "symbolize=0:"
  2086                             "abort_on_error=1:"
  2087                             "allocator_may_return_null=1:"
  2088                             "msan_track_origins=0", 0);
  2089  
  2090      execv(target_path, argv);
  2091  
  2092      /* Use a distinctive bitmap signature to tell the parent about execv()
  2093         falling through. */
  2094  
  2095      *(u32*)trace_bits = EXEC_FAIL_SIG;
  2096      exit(0);
  2097  
  2098    }
  2099  
  2100    /* Close the unneeded endpoints. */
  2101  
  2102    close(ctl_pipe[0]);
  2103    close(st_pipe[1]);
  2104  
  2105    fsrv_ctl_fd = ctl_pipe[1];
  2106    fsrv_st_fd  = st_pipe[0];
  2107  
  2108    /* Wait for the fork server to come up, but don't wait too long. */
  2109  
  2110    it.it_value.tv_sec = ((exec_tmout * FORK_WAIT_MULT) / 1000);
  2111    it.it_value.tv_usec = ((exec_tmout * FORK_WAIT_MULT) % 1000) * 1000;
  2112  
  2113    setitimer(ITIMER_REAL, &it, NULL);
  2114  
  2115    rlen = read(fsrv_st_fd, &status, 4);
  2116  
  2117    it.it_value.tv_sec = 0;
  2118    it.it_value.tv_usec = 0;
  2119  
  2120    setitimer(ITIMER_REAL, &it, NULL);
  2121  
  2122    /* If we have a four-byte "hello" message from the server, we're all set.
  2123       Otherwise, try to figure out what went wrong. */
  2124  
  2125    if (rlen == 4) {
  2126      OKF("All right - fork server is up.");
  2127      return;
  2128    }
  2129  
  2130    if (child_timed_out)
  2131      FATAL("Timeout while initializing fork server (adjusting -t may help)");
  2132  
  2133    if (waitpid(forksrv_pid, &status, 0) <= 0)
  2134      PFATAL("waitpid() failed");
  2135  
  2136    if (WIFSIGNALED(status)) {
  2137  
  2138      if (mem_limit && mem_limit < 500 && uses_asan) {
  2139  
  2140        SAYF("\n" cLRD "[-] " cRST
  2141             "Whoops, the target binary crashed suddenly, before receiving any input\n"
  2142             "    from the fuzzer! Since it seems to be built with ASAN and you have a\n"
  2143             "    restrictive memory limit configured, this is expected; please read\n"
  2144             "    %s/notes_for_asan.txt for help.\n", doc_path);
  2145  
  2146      } else if (!mem_limit) {
  2147  
  2148        SAYF("\n" cLRD "[-] " cRST
  2149             "Whoops, the target binary crashed suddenly, before receiving any input\n"
  2150             "    from the fuzzer! There are several probable explanations:\n\n"
  2151  
  2152             "    - The binary is just buggy and explodes entirely on its own. If so, you\n"
  2153             "      need to fix the underlying problem or find a better replacement.\n\n"
  2154  
  2155  #ifdef __APPLE__
  2156  
  2157             "    - On MacOS X, the semantics of fork() syscalls are non-standard and may\n"
  2158             "      break afl-fuzz performance optimizations when running platform-specific\n"
  2159             "      targets. To fix this, set AFL_NO_FORKSRV=1 in the environment.\n\n"
  2160  
  2161  #endif /* __APPLE__ */
  2162  
  2163             "    - Less likely, there is a horrible bug in the fuzzer. If other options\n"
  2164             "      fail, poke <lcamtuf@coredump.cx> for troubleshooting tips.\n");
  2165  
  2166      } else {
  2167  
  2168        SAYF("\n" cLRD "[-] " cRST
  2169             "Whoops, the target binary crashed suddenly, before receiving any input\n"
  2170             "    from the fuzzer! There are several probable explanations:\n\n"
  2171  
  2172             "    - The current memory limit (%s) is too restrictive, causing the\n"
  2173             "      target to hit an OOM condition in the dynamic linker. Try bumping up\n"
  2174             "      the limit with the -m setting in the command line. A simple way confirm\n"
  2175             "      this diagnosis would be:\n\n"
  2176  
  2177  #ifdef RLIMIT_AS
  2178             "      ( ulimit -Sv $[%llu << 10]; /path/to/fuzzed_app )\n\n"
  2179  #else
  2180             "      ( ulimit -Sd $[%llu << 10]; /path/to/fuzzed_app )\n\n"
  2181  #endif /* ^RLIMIT_AS */
  2182  
  2183             "      Tip: you can use http://jwilk.net/software/recidivm to quickly\n"
  2184             "      estimate the required amount of virtual memory for the binary.\n\n"
  2185  
  2186             "    - The binary is just buggy and explodes entirely on its own. If so, you\n"
  2187             "      need to fix the underlying problem or find a better replacement.\n\n"
  2188  
  2189  #ifdef __APPLE__
  2190  
  2191             "    - On MacOS X, the semantics of fork() syscalls are non-standard and may\n"
  2192             "      break afl-fuzz performance optimizations when running platform-specific\n"
  2193             "      targets. To fix this, set AFL_NO_FORKSRV=1 in the environment.\n\n"
  2194  
  2195  #endif /* __APPLE__ */
  2196  
  2197             "    - Less likely, there is a horrible bug in the fuzzer. If other options\n"
  2198             "      fail, poke <lcamtuf@coredump.cx> for troubleshooting tips.\n",
  2199             DMS(mem_limit << 20), mem_limit - 1);
  2200  
  2201      }
  2202  
  2203      FATAL("Fork server crashed with signal %d", WTERMSIG(status));
  2204  
  2205    }
  2206  
  2207    if (*(u32*)trace_bits == EXEC_FAIL_SIG)
  2208      FATAL("Unable to execute target application ('%s')", argv[0]);
  2209  
  2210    if (mem_limit && mem_limit < 500 && uses_asan) {
  2211  
  2212      SAYF("\n" cLRD "[-] " cRST
  2213             "Hmm, looks like the target binary terminated before we could complete a\n"
  2214             "    handshake with the injected code. Since it seems to be built with ASAN and\n"
  2215             "    you have a restrictive memory limit configured, this is expected; please\n"
  2216             "    read %s/notes_for_asan.txt for help.\n", doc_path);
  2217  
  2218    } else if (!mem_limit) {
  2219  
  2220      SAYF("\n" cLRD "[-] " cRST
  2221           "Hmm, looks like the target binary terminated before we could complete a\n"
  2222           "    handshake with the injected code. Perhaps there is a horrible bug in the\n"
  2223           "    fuzzer. Poke <lcamtuf@coredump.cx> for troubleshooting tips.\n");
  2224  
  2225    } else {
  2226  
  2227      SAYF("\n" cLRD "[-] " cRST
  2228           "Hmm, looks like the target binary terminated before we could complete a\n"
  2229           "    handshake with the injected code. There are %s probable explanations:\n\n"
  2230  
  2231           "%s"
  2232           "    - The current memory limit (%s) is too restrictive, causing an OOM\n"
  2233           "      fault in the dynamic linker. This can be fixed with the -m option. A\n"
  2234           "      simple way to confirm the diagnosis may be:\n\n"
  2235  
  2236  #ifdef RLIMIT_AS
  2237           "      ( ulimit -Sv $[%llu << 10]; /path/to/fuzzed_app )\n\n"
  2238  #else
  2239           "      ( ulimit -Sd $[%llu << 10]; /path/to/fuzzed_app )\n\n"
  2240  #endif /* ^RLIMIT_AS */
  2241  
  2242           "      Tip: you can use http://jwilk.net/software/recidivm to quickly\n"
  2243           "      estimate the required amount of virtual memory for the binary.\n\n"
  2244  
  2245           "    - Less likely, there is a horrible bug in the fuzzer. If other options\n"
  2246           "      fail, poke <lcamtuf@coredump.cx> for troubleshooting tips.\n",
  2247           getenv(DEFER_ENV_VAR) ? "three" : "two",
  2248           getenv(DEFER_ENV_VAR) ?
  2249           "    - You are using deferred forkserver, but __AFL_INIT() is never\n"
  2250           "      reached before the program terminates.\n\n" : "",
  2251           DMS(mem_limit << 20), mem_limit - 1);
  2252  
  2253    }
  2254  
  2255    FATAL("Fork server handshake failed");
  2256  
  2257  }
  2258  
  2259  
  2260  /* Execute target application, monitoring for timeouts. Return status
  2261     information. The called program will update trace_bits[]. */
  2262  
  2263  static u8 run_target(char** argv, u32 timeout) {
  2264  
  2265    static struct itimerval it;
  2266    static u32 prev_timed_out = 0;
  2267  
  2268    int status = 0;
  2269    u32 tb4;
  2270  
  2271    child_timed_out = 0;
  2272  
  2273    /* After this memset, trace_bits[] are effectively volatile, so we
  2274       must prevent any earlier operations from venturing into that
  2275       territory. */
  2276  
  2277    memset(trace_bits, 0, MAP_SIZE);
  2278    MEM_BARRIER();
  2279  
  2280    /* If we're running in "dumb" mode, we can't rely on the fork server
  2281       logic compiled into the target program, so we will just keep calling
  2282       execve(). There is a bit of code duplication between here and 
  2283       init_forkserver(), but c'est la vie. */
  2284  
  2285    if (dumb_mode == 1 || no_forkserver) {
  2286  
  2287      child_pid = fork();
  2288  
  2289      if (child_pid < 0) PFATAL("fork() failed");
  2290  
  2291      if (!child_pid) {
  2292  
  2293        struct rlimit r;
  2294  
  2295        if (mem_limit) {
  2296  
  2297          r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20;
  2298  
  2299  #ifdef RLIMIT_AS
  2300  
  2301          setrlimit(RLIMIT_AS, &r); /* Ignore errors */
  2302  
  2303  #else
  2304  
  2305          setrlimit(RLIMIT_DATA, &r); /* Ignore errors */
  2306  
  2307  #endif /* ^RLIMIT_AS */
  2308  
  2309        }
  2310  
  2311        r.rlim_max = r.rlim_cur = 0;
  2312  
  2313        setrlimit(RLIMIT_CORE, &r); /* Ignore errors */
  2314  
  2315        /* Isolate the process and configure standard descriptors. If out_file is
  2316           specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */
  2317  
  2318        setsid();
  2319  
  2320        dup2(dev_null_fd, 1);
  2321        dup2(dev_null_fd, 2);
  2322  
  2323        if (out_file) {
  2324  
  2325          dup2(dev_null_fd, 0);
  2326  
  2327        } else {
  2328  
  2329          dup2(out_fd, 0);
  2330          close(out_fd);
  2331  
  2332        }
  2333  
  2334        /* On Linux, would be faster to use O_CLOEXEC. Maybe TODO. */
  2335  
  2336        close(dev_null_fd);
  2337        close(out_dir_fd);
  2338        close(dev_urandom_fd);
  2339        close(fileno(plot_file));
  2340  
  2341        /* Set sane defaults for ASAN if nothing else specified. */
  2342  
  2343        setenv("ASAN_OPTIONS", "abort_on_error=1:"
  2344                               "detect_leaks=0:"
  2345                               "symbolize=0:"
  2346                               "allocator_may_return_null=1", 0);
  2347  
  2348        setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":"
  2349                               "symbolize=0:"
  2350                               "msan_track_origins=0", 0);
  2351  
  2352        execv(target_path, argv);
  2353  
  2354        /* Use a distinctive bitmap value to tell the parent about execv()
  2355           falling through. */
  2356  
  2357        *(u32*)trace_bits = EXEC_FAIL_SIG;
  2358        exit(0);
  2359  
  2360      }
  2361  
  2362    } else {
  2363  
  2364      s32 res;
  2365  
  2366      /* In non-dumb mode, we have the fork server up and running, so simply
  2367         tell it to have at it, and then read back PID. */
  2368  
  2369      if ((res = write(fsrv_ctl_fd, &prev_timed_out, 4)) != 4) {
  2370  
  2371        if (stop_soon) return 0;
  2372        RPFATAL(res, "Unable to request new process from fork server (OOM?)");
  2373  
  2374      }
  2375  
  2376      if ((res = read(fsrv_st_fd, &child_pid, 4)) != 4) {
  2377  
  2378        if (stop_soon) return 0;
  2379        RPFATAL(res, "Unable to request new process from fork server (OOM?)");
  2380  
  2381      }
  2382  
  2383      if (child_pid <= 0) FATAL("Fork server is misbehaving (OOM?)");
  2384  
  2385    }
  2386  
  2387    /* Configure timeout, as requested by user, then wait for child to terminate. */
  2388  
  2389    it.it_value.tv_sec = (timeout / 1000);
  2390    it.it_value.tv_usec = (timeout % 1000) * 1000;
  2391  
  2392    setitimer(ITIMER_REAL, &it, NULL);
  2393  
  2394    /* The SIGALRM handler simply kills the child_pid and sets child_timed_out. */
  2395  
  2396    if (dumb_mode == 1 || no_forkserver) {
  2397  
  2398      if (waitpid(child_pid, &status, 0) <= 0) PFATAL("waitpid() failed");
  2399  
  2400    } else {
  2401  
  2402      s32 res;
  2403  
  2404      if ((res = read(fsrv_st_fd, &status, 4)) != 4) {
  2405  
  2406        if (stop_soon) return 0;
  2407        RPFATAL(res, "Unable to communicate with fork server (OOM?)");
  2408  
  2409      }
  2410  
  2411    }
  2412  
  2413    if (!WIFSTOPPED(status)) child_pid = 0;
  2414  
  2415    it.it_value.tv_sec = 0;
  2416    it.it_value.tv_usec = 0;
  2417  
  2418    setitimer(ITIMER_REAL, &it, NULL);
  2419  
  2420    total_execs++;
  2421  
  2422    /* Any subsequent operations on trace_bits must not be moved by the
  2423       compiler below this point. Past this location, trace_bits[] behave
  2424       very normally and do not have to be treated as volatile. */
  2425  
  2426    MEM_BARRIER();
  2427  
  2428    tb4 = *(u32*)trace_bits;
  2429  
  2430  #ifdef __x86_64__
  2431    classify_counts((u64*)trace_bits);
  2432  #else
  2433    classify_counts((u32*)trace_bits);
  2434  #endif /* ^__x86_64__ */
  2435  
  2436    prev_timed_out = child_timed_out;
  2437  
  2438    /* Report outcome to caller. */
  2439  
  2440    if (WIFSIGNALED(status) && !stop_soon) {
  2441  
  2442      kill_signal = WTERMSIG(status);
  2443  
  2444      if (child_timed_out && kill_signal == SIGKILL) return FAULT_TMOUT;
  2445  
  2446      return FAULT_CRASH;
  2447  
  2448    }
  2449  
  2450    /* A somewhat nasty hack for MSAN, which doesn't support abort_on_error and
  2451       must use a special exit code. */
  2452  
  2453    if (uses_asan && WEXITSTATUS(status) == MSAN_ERROR) {
  2454      kill_signal = 0;
  2455      return FAULT_CRASH;
  2456    }
  2457  
  2458    if ((dumb_mode == 1 || no_forkserver) && tb4 == EXEC_FAIL_SIG)
  2459      return FAULT_ERROR;
  2460  
  2461    return FAULT_NONE;
  2462  
  2463  }
  2464  
  2465  
  2466  /* Write modified data to file for testing. If out_file is set, the old file
  2467     is unlinked and a new one is created. Otherwise, out_fd is rewound and
  2468     truncated. */
  2469  
  2470  static void write_to_testcase(void* mem, u32 len) {
  2471  
  2472    s32 fd = out_fd;
  2473  
  2474    if (out_file) {
  2475  
  2476      unlink(out_file); /* Ignore errors. */
  2477  
  2478      fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
  2479  
  2480      if (fd < 0) PFATAL("Unable to create '%s'", out_file);
  2481  
  2482    } else lseek(fd, 0, SEEK_SET);
  2483  
  2484    ck_write(fd, mem, len, out_file);
  2485  
  2486    if (!out_file) {
  2487  
  2488      if (ftruncate(fd, len)) PFATAL("ftruncate() failed");
  2489      lseek(fd, 0, SEEK_SET);
  2490  
  2491    } else close(fd);
  2492  
  2493  }
  2494  
  2495  
  2496  /* The same, but with an adjustable gap. Used for trimming. */
  2497  
  2498  static void write_with_gap(void* mem, u32 len, u32 skip_at, u32 skip_len) {
  2499  
  2500    s32 fd = out_fd;
  2501    u32 tail_len = len - skip_at - skip_len;
  2502  
  2503    if (out_file) {
  2504  
  2505      unlink(out_file); /* Ignore errors. */
  2506  
  2507      fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
  2508  
  2509      if (fd < 0) PFATAL("Unable to create '%s'", out_file);
  2510  
  2511    } else lseek(fd, 0, SEEK_SET);
  2512  
  2513    if (skip_at) ck_write(fd, mem, skip_at, out_file);
  2514  
  2515    if (tail_len) ck_write(fd, mem + skip_at + skip_len, tail_len, out_file);
  2516  
  2517    if (!out_file) {
  2518  
  2519      if (ftruncate(fd, len - skip_len)) PFATAL("ftruncate() failed");
  2520      lseek(fd, 0, SEEK_SET);
  2521  
  2522    } else close(fd);
  2523  
  2524  }
  2525  
  2526  
  2527  static void show_stats(void);
  2528  
  2529  /* Calibrate a new test case. This is done when processing the input directory
  2530     to warn about flaky or otherwise problematic test cases early on; and when
  2531     new paths are discovered to detect variable behavior and so on. */
  2532  
  2533  static u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem,
  2534                           u32 handicap, u8 from_queue) {
  2535  
  2536    static u8 first_trace[MAP_SIZE];
  2537  
  2538    u8  fault = 0, new_bits = 0, var_detected = 0,
  2539        first_run = (q->exec_cksum == 0);
  2540  
  2541    u64 start_us, stop_us;
  2542  
  2543    s32 old_sc = stage_cur, old_sm = stage_max;
  2544    u32 use_tmout = exec_tmout;
  2545    u8* old_sn = stage_name;
  2546  
  2547    /* Be a bit more generous about timeouts when resuming sessions, or when
  2548       trying to calibrate already-added finds. This helps avoid trouble due
  2549       to intermittent latency. */
  2550  
  2551    if (!from_queue || resuming_fuzz)
  2552      use_tmout = MAX(exec_tmout + CAL_TMOUT_ADD,
  2553                      exec_tmout * CAL_TMOUT_PERC / 100);
  2554  
  2555    q->cal_failed++;
  2556  
  2557    stage_name = "calibration";
  2558    stage_max  = fast_cal ? 3 : CAL_CYCLES;
  2559  
  2560    /* Make sure the forkserver is up before we do anything, and let's not
  2561       count its spin-up time toward binary calibration. */
  2562  
  2563    if (dumb_mode != 1 && !no_forkserver && !forksrv_pid)
  2564      init_forkserver(argv);
  2565  
  2566    if (q->exec_cksum) memcpy(first_trace, trace_bits, MAP_SIZE);
  2567  
  2568    start_us = get_cur_time_us();
  2569  
  2570    for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {
  2571  
  2572      u32 cksum;
  2573  
  2574      if (!first_run && !(stage_cur % stats_update_freq)) show_stats();
  2575  
  2576      write_to_testcase(use_mem, q->len);
  2577  
  2578      fault = run_target(argv, use_tmout);
  2579  
  2580      /* stop_soon is set by the handler for Ctrl+C. When it's pressed,
  2581         we want to bail out quickly. */
  2582  
  2583      if (stop_soon || fault != crash_mode) goto abort_calibration;
  2584  
  2585      if (!dumb_mode && !stage_cur && !count_bytes(trace_bits)) {
  2586        fault = FAULT_NOINST;
  2587        goto abort_calibration;
  2588      }
  2589  
  2590      cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
  2591  
  2592      if (q->exec_cksum != cksum) {
  2593  
  2594        u8 hnb = has_new_bits(virgin_bits);
  2595        if (hnb > new_bits) new_bits = hnb;
  2596  
  2597        if (q->exec_cksum) {
  2598  
  2599          u32 i;
  2600  
  2601          for (i = 0; i < MAP_SIZE; i++) {
  2602  
  2603            if (!var_bytes[i] && first_trace[i] != trace_bits[i]) {
  2604  
  2605              var_bytes[i] = 1;
  2606              stage_max    = CAL_CYCLES_LONG;
  2607  
  2608            }
  2609  
  2610          }
  2611  
  2612          var_detected = 1;
  2613  
  2614        } else {
  2615  
  2616          q->exec_cksum = cksum;
  2617          memcpy(first_trace, trace_bits, MAP_SIZE);
  2618  
  2619        }
  2620  
  2621      }
  2622  
  2623    }
  2624  
  2625    stop_us = get_cur_time_us();
  2626  
  2627    total_cal_us     += stop_us - start_us;
  2628    total_cal_cycles += stage_max;
  2629  
  2630    /* OK, let's collect some stats about the performance of this test case.
  2631       This is used for fuzzing air time calculations in calculate_score(). */
  2632  
  2633    q->exec_us     = (stop_us - start_us) / stage_max;
  2634    q->bitmap_size = count_bytes(trace_bits);
  2635    q->handicap    = handicap;
  2636    q->cal_failed  = 0;
  2637  
  2638    total_bitmap_size += q->bitmap_size;
  2639    total_bitmap_entries++;
  2640  
  2641    update_bitmap_score(q);
  2642  
  2643    /* If this case didn't result in new output from the instrumentation, tell
  2644       parent. This is a non-critical problem, but something to warn the user
  2645       about. */
  2646  
  2647    if (!dumb_mode && first_run && !fault && !new_bits) fault = FAULT_NOBITS;
  2648  
  2649  abort_calibration:
  2650  
  2651    if (new_bits == 2 && !q->has_new_cov) {
  2652      q->has_new_cov = 1;
  2653      queued_with_cov++;
  2654    }
  2655  
  2656    /* Mark variable paths. */
  2657  
  2658    if (var_detected) {
  2659  
  2660      var_byte_count = count_bytes(var_bytes);
  2661  
  2662      if (!q->var_behavior) {
  2663        mark_as_variable(q);
  2664        queued_variable++;
  2665      }
  2666  
  2667    }
  2668  
  2669    stage_name = old_sn;
  2670    stage_cur  = old_sc;
  2671    stage_max  = old_sm;
  2672  
  2673    if (!first_run) show_stats();
  2674  
  2675    return fault;
  2676  
  2677  }
  2678  
  2679  
  2680  /* Examine map coverage. Called once, for first test case. */
  2681  
  2682  static void check_map_coverage(void) {
  2683  
  2684    u32 i;
  2685  
  2686    if (count_bytes(trace_bits) < 100) return;
  2687  
  2688    for (i = (1 << (MAP_SIZE_POW2 - 1)); i < MAP_SIZE; i++)
  2689      if (trace_bits[i]) return;
  2690  
  2691    WARNF("Recompile binary with newer version of afl to improve coverage!");
  2692  
  2693  }
  2694  
  2695  
  2696  /* Perform dry run of all test cases to confirm that the app is working as
  2697     expected. This is done only for the initial inputs, and only once. */
  2698  
  2699  static void perform_dry_run(char** argv) {
  2700  
  2701    struct queue_entry* q = queue;
  2702    u32 cal_failures = 0;
  2703    u8* skip_crashes = getenv("AFL_SKIP_CRASHES");
  2704  
  2705    while (q) {
  2706  
  2707      u8* use_mem;
  2708      u8  res;
  2709      s32 fd;
  2710  
  2711      u8* fn = strrchr(q->fname, '/') + 1;
  2712  
  2713      ACTF("Attempting dry run with '%s'...", fn);
  2714  
  2715      fd = open(q->fname, O_RDONLY);
  2716      if (fd < 0) PFATAL("Unable to open '%s'", q->fname);
  2717  
  2718      use_mem = ck_alloc_nozero(q->len);
  2719  
  2720      if (read(fd, use_mem, q->len) != q->len)
  2721        FATAL("Short read from '%s'", q->fname);
  2722  
  2723      close(fd);
  2724  
  2725      res = calibrate_case(argv, q, use_mem, 0, 1);
  2726      ck_free(use_mem);
  2727  
  2728      if (stop_soon) return;
  2729  
  2730      if (res == crash_mode || res == FAULT_NOBITS)
  2731        SAYF(cGRA "    len = %u, map size = %u, exec speed = %llu us\n" cRST, 
  2732             q->len, q->bitmap_size, q->exec_us);
  2733  
  2734      switch (res) {
  2735  
  2736        case FAULT_NONE:
  2737  
  2738          if (q == queue) check_map_coverage();
  2739  
  2740          if (crash_mode) FATAL("Test case '%s' does *NOT* crash", fn);
  2741  
  2742          break;
  2743  
  2744        case FAULT_TMOUT:
  2745  
  2746          if (timeout_given) {
  2747  
  2748            /* The -t nn+ syntax in the command line sets timeout_given to '2' and
  2749               instructs afl-fuzz to tolerate but skip queue entries that time
  2750               out. */
  2751  
  2752            if (timeout_given > 1) {
  2753              WARNF("Test case results in a timeout (skipping)");
  2754              q->cal_failed = CAL_CHANCES;
  2755              cal_failures++;
  2756              break;
  2757            }
  2758  
  2759            SAYF("\n" cLRD "[-] " cRST
  2760                 "The program took more than %u ms to process one of the initial test cases.\n"
  2761                 "    Usually, the right thing to do is to relax the -t option - or to delete it\n"
  2762                 "    altogether and allow the fuzzer to auto-calibrate. That said, if you know\n"
  2763                 "    what you are doing and want to simply skip the unruly test cases, append\n"
  2764                 "    '+' at the end of the value passed to -t ('-t %u+').\n", exec_tmout,
  2765                 exec_tmout);
  2766  
  2767            FATAL("Test case '%s' results in a timeout", fn);
  2768  
  2769          } else {
  2770  
  2771            SAYF("\n" cLRD "[-] " cRST
  2772                 "The program took more than %u ms to process one of the initial test cases.\n"
  2773                 "    This is bad news; raising the limit with the -t option is possible, but\n"
  2774                 "    will probably make the fuzzing process extremely slow.\n\n"
  2775  
  2776                 "    If this test case is just a fluke, the other option is to just avoid it\n"
  2777                 "    altogether, and find one that is less of a CPU hog.\n", exec_tmout);
  2778  
  2779            FATAL("Test case '%s' results in a timeout", fn);
  2780  
  2781          }
  2782  
  2783        case FAULT_CRASH:  
  2784  
  2785          if (crash_mode) break;
  2786  
  2787          if (skip_crashes) {
  2788            WARNF("Test case results in a crash (skipping)");
  2789            q->cal_failed = CAL_CHANCES;
  2790            cal_failures++;
  2791            break;
  2792          }
  2793  
  2794          if (mem_limit) {
  2795  
  2796            SAYF("\n" cLRD "[-] " cRST
  2797                 "Oops, the program crashed with one of the test cases provided. There are\n"
  2798                 "    several possible explanations:\n\n"
  2799  
  2800                 "    - The test case causes known crashes under normal working conditions. If\n"
  2801                 "      so, please remove it. The fuzzer should be seeded with interesting\n"
  2802                 "      inputs - but not ones that cause an outright crash.\n\n"
  2803  
  2804                 "    - The current memory limit (%s) is too low for this program, causing\n"
  2805                 "      it to die due to OOM when parsing valid files. To fix this, try\n"
  2806                 "      bumping it up with the -m setting in the command line. If in doubt,\n"
  2807                 "      try something along the lines of:\n\n"
  2808  
  2809  #ifdef RLIMIT_AS
  2810                 "      ( ulimit -Sv $[%llu << 10]; /path/to/binary [...] <testcase )\n\n"
  2811  #else
  2812                 "      ( ulimit -Sd $[%llu << 10]; /path/to/binary [...] <testcase )\n\n"
  2813  #endif /* ^RLIMIT_AS */
  2814  
  2815                 "      Tip: you can use http://jwilk.net/software/recidivm to quickly\n"
  2816                 "      estimate the required amount of virtual memory for the binary. Also,\n"
  2817                 "      if you are using ASAN, see %s/notes_for_asan.txt.\n\n"
  2818  
  2819  #ifdef __APPLE__
  2820    
  2821                 "    - On MacOS X, the semantics of fork() syscalls are non-standard and may\n"
  2822                 "      break afl-fuzz performance optimizations when running platform-specific\n"
  2823                 "      binaries. To fix this, set AFL_NO_FORKSRV=1 in the environment.\n\n"
  2824  
  2825  #endif /* __APPLE__ */
  2826  
  2827                 "    - Least likely, there is a horrible bug in the fuzzer. If other options\n"
  2828                 "      fail, poke <lcamtuf@coredump.cx> for troubleshooting tips.\n",
  2829                 DMS(mem_limit << 20), mem_limit - 1, doc_path);
  2830  
  2831          } else {
  2832  
  2833            SAYF("\n" cLRD "[-] " cRST
  2834                 "Oops, the program crashed with one of the test cases provided. There are\n"
  2835                 "    several possible explanations:\n\n"
  2836  
  2837                 "    - The test case causes known crashes under normal working conditions. If\n"
  2838                 "      so, please remove it. The fuzzer should be seeded with interesting\n"
  2839                 "      inputs - but not ones that cause an outright crash.\n\n"
  2840  
  2841  #ifdef __APPLE__
  2842    
  2843                 "    - On MacOS X, the semantics of fork() syscalls are non-standard and may\n"
  2844                 "      break afl-fuzz performance optimizations when running platform-specific\n"
  2845                 "      binaries. To fix this, set AFL_NO_FORKSRV=1 in the environment.\n\n"
  2846  
  2847  #endif /* __APPLE__ */
  2848  
  2849                 "    - Least likely, there is a horrible bug in the fuzzer. If other options\n"
  2850                 "      fail, poke <lcamtuf@coredump.cx> for troubleshooting tips.\n");
  2851  
  2852          }
  2853  
  2854          FATAL("Test case '%s' results in a crash", fn);
  2855  
  2856        case FAULT_ERROR:
  2857  
  2858          FATAL("Unable to execute target application ('%s')", argv[0]);
  2859  
  2860        case FAULT_NOINST:
  2861  
  2862          FATAL("No instrumentation detected");
  2863  
  2864        case FAULT_NOBITS: 
  2865  
  2866          useless_at_start++;
  2867  
  2868          if (!in_bitmap && !shuffle_queue)
  2869            WARNF("No new instrumentation output, test case may be useless.");
  2870  
  2871          break;
  2872  
  2873      }
  2874  
  2875      if (q->var_behavior) WARNF("Instrumentation output varies across runs.");
  2876  
  2877      q = q->next;
  2878  
  2879    }
  2880  
  2881    if (cal_failures) {
  2882  
  2883      if (cal_failures == queued_paths)
  2884        FATAL("All test cases time out%s, giving up!",
  2885              skip_crashes ? " or crash" : "");
  2886  
  2887      WARNF("Skipped %u test cases (%0.02f%%) due to timeouts%s.", cal_failures,
  2888            ((double)cal_failures) * 100 / queued_paths,
  2889            skip_crashes ? " or crashes" : "");
  2890  
  2891      if (cal_failures * 5 > queued_paths)
  2892        WARNF(cLRD "High percentage of rejected test cases, check settings!");
  2893  
  2894    }
  2895  
  2896    OKF("All test cases processed.");
  2897  
  2898  }
  2899  
  2900  
  2901  /* Helper function: link() if possible, copy otherwise. */
  2902  
  2903  static void link_or_copy(u8* old_path, u8* new_path) {
  2904  
  2905    s32 i = link(old_path, new_path);
  2906    s32 sfd, dfd;
  2907    u8* tmp;
  2908  
  2909    if (!i) return;
  2910  
  2911    sfd = open(old_path, O_RDONLY);
  2912    if (sfd < 0) PFATAL("Unable to open '%s'", old_path);
  2913  
  2914    dfd = open(new_path, O_WRONLY | O_CREAT | O_EXCL, 0600);
  2915    if (dfd < 0) PFATAL("Unable to create '%s'", new_path);
  2916  
  2917    tmp = ck_alloc(64 * 1024);
  2918  
  2919    while ((i = read(sfd, tmp, 64 * 1024)) > 0) 
  2920      ck_write(dfd, tmp, i, new_path);
  2921  
  2922    if (i < 0) PFATAL("read() failed");
  2923  
  2924    ck_free(tmp);
  2925    close(sfd);
  2926    close(dfd);
  2927  
  2928  }
  2929  
  2930  
  2931  static void nuke_resume_dir(void);
  2932  
  2933  /* Create hard links for input test cases in the output directory, choosing
  2934     good names and pivoting accordingly. */
  2935  
  2936  static void pivot_inputs(void) {
  2937  
  2938    struct queue_entry* q = queue;
  2939    u32 id = 0;
  2940  
  2941    ACTF("Creating hard links for all input files...");
  2942  
  2943    while (q) {
  2944  
  2945      u8  *nfn, *rsl = strrchr(q->fname, '/');
  2946      u32 orig_id;
  2947  
  2948      if (!rsl) rsl = q->fname; else rsl++;
  2949  
  2950      /* If the original file name conforms to the syntax and the recorded
  2951         ID matches the one we'd assign, just use the original file name.
  2952         This is valuable for resuming fuzzing runs. */
  2953  
  2954  #ifndef SIMPLE_FILES
  2955  #  define CASE_PREFIX "id:"
  2956  #else
  2957  #  define CASE_PREFIX "id_"
  2958  #endif /* ^!SIMPLE_FILES */
  2959  
  2960      if (!strncmp(rsl, CASE_PREFIX, 3) &&
  2961          sscanf(rsl + 3, "%06u", &orig_id) == 1 && orig_id == id) {
  2962  
  2963        u8* src_str;
  2964        u32 src_id;
  2965  
  2966        resuming_fuzz = 1;
  2967        nfn = alloc_printf("%s/queue/%s", out_dir, rsl);
  2968  
  2969        /* Since we're at it, let's also try to find parent and figure out the
  2970           appropriate depth for this entry. */
  2971  
  2972        src_str = strchr(rsl + 3, ':');
  2973  
  2974        if (src_str && sscanf(src_str + 1, "%06u", &src_id) == 1) {
  2975  
  2976          struct queue_entry* s = queue;
  2977          while (src_id-- && s) s = s->next;
  2978          if (s) q->depth = s->depth + 1;
  2979  
  2980          if (max_depth < q->depth) max_depth = q->depth;
  2981  
  2982        }
  2983  
  2984      } else {
  2985  
  2986        /* No dice - invent a new name, capturing the original one as a
  2987           substring. */
  2988  
  2989  #ifndef SIMPLE_FILES
  2990  
  2991        u8* use_name = strstr(rsl, ",orig:");
  2992  
  2993        if (use_name) use_name += 6; else use_name = rsl;
  2994        nfn = alloc_printf("%s/queue/id:%06u,orig:%s", out_dir, id, use_name);
  2995  
  2996  #else
  2997  
  2998        nfn = alloc_printf("%s/queue/id_%06u", out_dir, id);
  2999  
  3000  #endif /* ^!SIMPLE_FILES */
  3001  
  3002      }
  3003  
  3004      /* Pivot to the new queue entry. */
  3005  
  3006      link_or_copy(q->fname, nfn);
  3007      ck_free(q->fname);
  3008      q->fname = nfn;
  3009  
  3010      /* Make sure that the passed_det value carries over, too. */
  3011  
  3012      if (q->passed_det) mark_as_det_done(q);
  3013  
  3014      q = q->next;
  3015      id++;
  3016  
  3017    }
  3018  
  3019    if (in_place_resume) nuke_resume_dir();
  3020  
  3021  }
  3022  
  3023  
  3024  #ifndef SIMPLE_FILES
  3025  
  3026  /* Construct a file name for a new test case, capturing the operation
  3027     that led to its discovery. Uses a static buffer. */
  3028  
  3029  static u8* describe_op(u8 hnb) {
  3030  
  3031    static u8 ret[256];
  3032  
  3033    if (syncing_party) {
  3034  
  3035      sprintf(ret, "sync:%s,src:%06u", syncing_party, syncing_case);
  3036  
  3037    } else {
  3038  
  3039      sprintf(ret, "src:%06u", current_entry);
  3040  
  3041      if (splicing_with >= 0)
  3042        sprintf(ret + strlen(ret), "+%06u", splicing_with);
  3043  
  3044      sprintf(ret + strlen(ret), ",op:%s", stage_short);
  3045  
  3046      if (stage_cur_byte >= 0) {
  3047  
  3048        sprintf(ret + strlen(ret), ",pos:%u", stage_cur_byte);
  3049  
  3050        if (stage_val_type != STAGE_VAL_NONE)
  3051          sprintf(ret + strlen(ret), ",val:%s%+d", 
  3052                  (stage_val_type == STAGE_VAL_BE) ? "be:" : "",
  3053                  stage_cur_val);
  3054  
  3055      } else sprintf(ret + strlen(ret), ",rep:%u", stage_cur_val);
  3056  
  3057    }
  3058  
  3059    if (hnb == 2) strcat(ret, ",+cov");
  3060  
  3061    return ret;
  3062  
  3063  }
  3064  
  3065  #endif /* !SIMPLE_FILES */
  3066  
  3067  
  3068  /* Write a message accompanying the crash directory :-) */
  3069  
  3070  static void write_crash_readme(void) {
  3071  
  3072    u8* fn = alloc_printf("%s/crashes/README.txt", out_dir);
  3073    s32 fd;
  3074    FILE* f;
  3075  
  3076    fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
  3077    ck_free(fn);
  3078  
  3079    /* Do not die on errors here - that would be impolite. */
  3080  
  3081    if (fd < 0) return;
  3082  
  3083    f = fdopen(fd, "w");
  3084  
  3085    if (!f) {
  3086      close(fd);
  3087      return;
  3088    }
  3089  
  3090    fprintf(f, "Command line used to find this crash:\n\n"
  3091  
  3092               "%s\n\n"
  3093  
  3094               "If you can't reproduce a bug outside of afl-fuzz, be sure to set the same\n"
  3095               "memory limit. The limit used for this fuzzing session was %s.\n\n"
  3096  
  3097               "Need a tool to minimize test cases before investigating the crashes or sending\n"
  3098               "them to a vendor? Check out the afl-tmin that comes with the fuzzer!\n\n"
  3099  
  3100               "Found any cool bugs in open-source tools using afl-fuzz? If yes, please drop\n"
  3101               "me a mail at <lcamtuf@coredump.cx> once the issues are fixed - I'd love to\n"
  3102               "add your finds to the gallery at:\n\n"
  3103  
  3104               "  http://lcamtuf.coredump.cx/afl/\n\n"
  3105  
  3106               "Thanks :-)\n",
  3107  
  3108               orig_cmdline, DMS(mem_limit << 20)); /* ignore errors */
  3109  
  3110    fclose(f);
  3111  
  3112  }
  3113  
  3114  
  3115  /* Check if the result of an execve() during routine fuzzing is interesting,
  3116     save or queue the input test case for further analysis if so. Returns 1 if
  3117     entry is saved, 0 otherwise. */
  3118  
  3119  static u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
  3120  
  3121    u8  *fn = "";
  3122    u8  hnb;
  3123    s32 fd;
  3124    u8  keeping = 0, res;
  3125  
  3126    if (fault == crash_mode) {
  3127  
  3128      /* Keep only if there are new bits in the map, add to queue for
  3129         future fuzzing, etc. */
  3130  
  3131      if (!(hnb = has_new_bits(virgin_bits))) {
  3132        if (crash_mode) total_crashes++;
  3133        return 0;
  3134      }    
  3135  
  3136  #ifndef SIMPLE_FILES
  3137  
  3138      fn = alloc_printf("%s/queue/id:%06u,%s", out_dir, queued_paths,
  3139                        describe_op(hnb));
  3140  
  3141  #else
  3142  
  3143      fn = alloc_printf("%s/queue/id_%06u", out_dir, queued_paths);
  3144  
  3145  #endif /* ^!SIMPLE_FILES */
  3146  
  3147      add_to_queue(fn, len, 0);
  3148  
  3149      if (hnb == 2) {
  3150        queue_top->has_new_cov = 1;
  3151        queued_with_cov++;
  3152      }
  3153  
  3154      queue_top->exec_cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
  3155  
  3156      /* Try to calibrate inline; this also calls update_bitmap_score() when
  3157         successful. */
  3158  
  3159      res = calibrate_case(argv, queue_top, mem, queue_cycle - 1, 0);
  3160  
  3161      if (res == FAULT_ERROR)
  3162        FATAL("Unable to execute target application");
  3163  
  3164      fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
  3165      if (fd < 0) PFATAL("Unable to create '%s'", fn);
  3166      ck_write(fd, mem, len, fn);
  3167      close(fd);
  3168  
  3169      keeping = 1;
  3170  
  3171    }
  3172  
  3173    switch (fault) {
  3174  
  3175      case FAULT_TMOUT:
  3176  
  3177        /* Timeouts are not very interesting, but we're still obliged to keep
  3178           a handful of samples. We use the presence of new bits in the
  3179           hang-specific bitmap as a signal of uniqueness. In "dumb" mode, we
  3180           just keep everything. */
  3181  
  3182        total_tmouts++;
  3183  
  3184        if (unique_hangs >= KEEP_UNIQUE_HANG) return keeping;
  3185  
  3186        if (!dumb_mode) {
  3187  
  3188  #ifdef __x86_64__
  3189          simplify_trace((u64*)trace_bits);
  3190  #else
  3191          simplify_trace((u32*)trace_bits);
  3192  #endif /* ^__x86_64__ */
  3193  
  3194          if (!has_new_bits(virgin_tmout)) return keeping;
  3195  
  3196        }
  3197  
  3198        unique_tmouts++;
  3199  
  3200        /* Before saving, we make sure that it's a genuine hang by re-running
  3201           the target with a more generous timeout (unless the default timeout
  3202           is already generous). */
  3203  
  3204        if (exec_tmout < hang_tmout) {
  3205  
  3206          u8 new_fault;
  3207          write_to_testcase(mem, len);
  3208          new_fault = run_target(argv, hang_tmout);
  3209  
  3210          /* A corner case that one user reported bumping into: increasing the
  3211             timeout actually uncovers a crash. Make sure we don't discard it if
  3212             so. */
  3213  
  3214          if (!stop_soon && new_fault == FAULT_CRASH) goto keep_as_crash;
  3215  
  3216          if (stop_soon || new_fault != FAULT_TMOUT) return keeping;
  3217  
  3218        }
  3219  
  3220  #ifndef SIMPLE_FILES
  3221  
  3222        fn = alloc_printf("%s/hangs/id:%06llu,%s", out_dir,
  3223                          unique_hangs, describe_op(0));
  3224  
  3225  #else
  3226  
  3227        fn = alloc_printf("%s/hangs/id_%06llu", out_dir,
  3228                          unique_hangs);
  3229  
  3230  #endif /* ^!SIMPLE_FILES */
  3231  
  3232        unique_hangs++;
  3233  
  3234        last_hang_time = get_cur_time();
  3235  
  3236        break;
  3237  
  3238      case FAULT_CRASH:
  3239  
  3240  keep_as_crash:
  3241  
  3242        /* This is handled in a manner roughly similar to timeouts,
  3243           except for slightly different limits and no need to re-run test
  3244           cases. */
  3245  
  3246        total_crashes++;
  3247  
  3248        if (unique_crashes >= KEEP_UNIQUE_CRASH) return keeping;
  3249  
  3250        if (!dumb_mode) {
  3251  
  3252  #ifdef __x86_64__
  3253          simplify_trace((u64*)trace_bits);
  3254  #else
  3255          simplify_trace((u32*)trace_bits);
  3256  #endif /* ^__x86_64__ */
  3257  
  3258          if (!has_new_bits(virgin_crash)) return keeping;
  3259  
  3260        }
  3261  
  3262        if (!unique_crashes) write_crash_readme();
  3263  
  3264  #ifndef SIMPLE_FILES
  3265  
  3266        fn = alloc_printf("%s/crashes/id:%06llu,sig:%02u,%s", out_dir,
  3267                          unique_crashes, kill_signal, describe_op(0));
  3268  
  3269  #else
  3270  
  3271        fn = alloc_printf("%s/crashes/id_%06llu_%02u", out_dir, unique_crashes,
  3272                          kill_signal);
  3273  
  3274  #endif /* ^!SIMPLE_FILES */
  3275  
  3276        unique_crashes++;
  3277  
  3278        last_crash_time = get_cur_time();
  3279        last_crash_execs = total_execs;
  3280  
  3281        break;
  3282  
  3283      case FAULT_ERROR: FATAL("Unable to execute target application");
  3284  
  3285      default: return keeping;
  3286  
  3287    }
  3288  
  3289    /* If we're here, we apparently want to save the crash or hang
  3290       test case, too. */
  3291  
  3292    fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
  3293    if (fd < 0) PFATAL("Unable to create '%s'", fn);
  3294    ck_write(fd, mem, len, fn);
  3295    close(fd);
  3296  
  3297    ck_free(fn);
  3298  
  3299    return keeping;
  3300  
  3301  }
  3302  
  3303  
  3304  /* When resuming, try to find the queue position to start from. This makes sense
  3305     only when resuming, and when we can find the original fuzzer_stats. */
  3306  
  3307  static u32 find_start_position(void) {
  3308  
  3309    static u8 tmp[4096]; /* Ought to be enough for anybody. */
  3310  
  3311    u8  *fn, *off;
  3312    s32 fd, i;
  3313    u32 ret;
  3314  
  3315    if (!resuming_fuzz) return 0;
  3316  
  3317    if (in_place_resume) fn = alloc_printf("%s/fuzzer_stats", out_dir);
  3318    else fn = alloc_printf("%s/../fuzzer_stats", in_dir);
  3319  
  3320    fd = open(fn, O_RDONLY);
  3321    ck_free(fn);
  3322  
  3323    if (fd < 0) return 0;
  3324  
  3325    i = read(fd, tmp, sizeof(tmp) - 1); (void)i; /* Ignore errors */
  3326    close(fd);
  3327  
  3328    off = strstr(tmp, "cur_path          : ");
  3329    if (!off) return 0;
  3330  
  3331    ret = atoi(off + 20);
  3332    if (ret >= queued_paths) ret = 0;
  3333    return ret;
  3334  
  3335  }
  3336  
  3337  
  3338  /* The same, but for timeouts. The idea is that when resuming sessions without
  3339     -t given, we don't want to keep auto-scaling the timeout over and over
  3340     again to prevent it from growing due to random flukes. */
  3341  
  3342  static void find_timeout(void) {
  3343  
  3344    static u8 tmp[4096]; /* Ought to be enough for anybody. */
  3345  
  3346    u8  *fn, *off;
  3347    s32 fd, i;
  3348    u32 ret;
  3349  
  3350    if (!resuming_fuzz) return;
  3351  
  3352    if (in_place_resume) fn = alloc_printf("%s/fuzzer_stats", out_dir);
  3353    else fn = alloc_printf("%s/../fuzzer_stats", in_dir);
  3354  
  3355    fd = open(fn, O_RDONLY);
  3356    ck_free(fn);
  3357  
  3358    if (fd < 0) return;
  3359  
  3360    i = read(fd, tmp, sizeof(tmp) - 1); (void)i; /* Ignore errors */
  3361    close(fd);
  3362  
  3363    off = strstr(tmp, "exec_timeout   : ");
  3364    if (!off) return;
  3365  
  3366    ret = atoi(off + 17);
  3367    if (ret <= 4) return;
  3368  
  3369    exec_tmout = ret;
  3370    timeout_given = 3;
  3371  
  3372  }
  3373  
  3374  
  3375  /* Update stats file for unattended monitoring. */
  3376  
  3377  static void write_stats_file(double bitmap_cvg, double stability, double eps) {
  3378  
  3379    static double last_bcvg, last_stab, last_eps;
  3380  
  3381    u8* fn = alloc_printf("%s/fuzzer_stats", out_dir);
  3382    s32 fd;
  3383    FILE* f;
  3384  
  3385    fd = open(fn, O_WRONLY | O_CREAT | O_TRUNC, 0600);
  3386  
  3387    if (fd < 0) PFATAL("Unable to create '%s'", fn);
  3388  
  3389    ck_free(fn);
  3390  
  3391    f = fdopen(fd, "w");
  3392  
  3393    if (!f) PFATAL("fdopen() failed");
  3394  
  3395    /* Keep last values in case we're called from another context
  3396       where exec/sec stats and such are not readily available. */
  3397  
  3398    if (!bitmap_cvg && !stability && !eps) {
  3399      bitmap_cvg = last_bcvg;
  3400      stability  = last_stab;
  3401      eps        = last_eps;
  3402    } else {
  3403      last_bcvg = bitmap_cvg;
  3404      last_stab = stability;
  3405      last_eps  = eps;
  3406    }
  3407  
  3408    fprintf(f, "start_time        : %llu\n"
  3409               "last_update       : %llu\n"
  3410               "fuzzer_pid        : %u\n"
  3411               "cycles_done       : %llu\n"
  3412               "execs_done        : %llu\n"
  3413               "execs_per_sec     : %0.02f\n"
  3414               "paths_total       : %u\n"
  3415               "paths_favored     : %u\n"
  3416               "paths_found       : %u\n"
  3417               "paths_imported    : %u\n"
  3418               "max_depth         : %u\n"
  3419               "cur_path          : %u\n" /* Must match find_start_position() */
  3420               "pending_favs      : %u\n"
  3421               "pending_total     : %u\n"
  3422               "variable_paths    : %u\n"
  3423               "stability         : %0.02f%%\n"
  3424               "bitmap_cvg        : %0.02f%%\n"
  3425               "unique_crashes    : %llu\n"
  3426               "unique_hangs      : %llu\n"
  3427               "last_path         : %llu\n"
  3428               "last_crash        : %llu\n"
  3429               "last_hang         : %llu\n"
  3430               "execs_since_crash : %llu\n"
  3431               "exec_timeout      : %u\n"
  3432               "afl_banner        : %s\n"
  3433               "afl_version       : " VERSION "\n"
  3434               "target_mode       : %s%s%s%s%s%s%s\n"
  3435               "command_line      : %s\n",
  3436               start_time / 1000, get_cur_time() / 1000, getpid(),
  3437               queue_cycle ? (queue_cycle - 1) : 0, total_execs, eps,
  3438               queued_paths, queued_favored, queued_discovered, queued_imported,
  3439               max_depth, current_entry, pending_favored, pending_not_fuzzed,
  3440               queued_variable, stability, bitmap_cvg, unique_crashes,
  3441               unique_hangs, last_path_time / 1000, last_crash_time / 1000,
  3442               last_hang_time / 1000, total_execs - last_crash_execs,
  3443               exec_tmout, use_banner,
  3444               qemu_mode ? "qemu " : "", dumb_mode ? " dumb " : "",
  3445               no_forkserver ? "no_forksrv " : "", crash_mode ? "crash " : "",
  3446               persistent_mode ? "persistent " : "", deferred_mode ? "deferred " : "",
  3447               (qemu_mode || dumb_mode || no_forkserver || crash_mode ||
  3448                persistent_mode || deferred_mode) ? "" : "default",
  3449               orig_cmdline);
  3450               /* ignore errors */
  3451  
  3452    fclose(f);
  3453  
  3454  }
  3455  
  3456  
  3457  /* Update the plot file if there is a reason to. */
  3458  
  3459  static void maybe_update_plot_file(double bitmap_cvg, double eps) {
  3460  
  3461    static u32 prev_qp, prev_pf, prev_pnf, prev_ce, prev_md;
  3462    static u64 prev_qc, prev_uc, prev_uh;
  3463  
  3464    if (prev_qp == queued_paths && prev_pf == pending_favored && 
  3465        prev_pnf == pending_not_fuzzed && prev_ce == current_entry &&
  3466        prev_qc == queue_cycle && prev_uc == unique_crashes &&
  3467        prev_uh == unique_hangs && prev_md == max_depth) return;
  3468  
  3469    prev_qp  = queued_paths;
  3470    prev_pf  = pending_favored;
  3471    prev_pnf = pending_not_fuzzed;
  3472    prev_ce  = current_entry;
  3473    prev_qc  = queue_cycle;
  3474    prev_uc  = unique_crashes;
  3475    prev_uh  = unique_hangs;
  3476    prev_md  = max_depth;
  3477  
  3478    /* Fields in the file:
  3479  
  3480       unix_time, cycles_done, cur_path, paths_total, paths_not_fuzzed,
  3481       favored_not_fuzzed, unique_crashes, unique_hangs, max_depth,
  3482       execs_per_sec */
  3483  
  3484    fprintf(plot_file, 
  3485            "%llu, %llu, %u, %u, %u, %u, %0.02f%%, %llu, %llu, %u, %0.02f\n",
  3486            get_cur_time() / 1000, queue_cycle - 1, current_entry, queued_paths,
  3487            pending_not_fuzzed, pending_favored, bitmap_cvg, unique_crashes,
  3488            unique_hangs, max_depth, eps); /* ignore errors */
  3489  
  3490    fflush(plot_file);
  3491  
  3492  }
  3493  
  3494  
  3495  
  3496  /* A helper function for maybe_delete_out_dir(), deleting all prefixed
  3497     files in a directory. */
  3498  
  3499  static u8 delete_files(u8* path, u8* prefix) {
  3500  
  3501    DIR* d;
  3502    struct dirent* d_ent;
  3503  
  3504    d = opendir(path);
  3505  
  3506    if (!d) return 0;
  3507  
  3508    while ((d_ent = readdir(d))) {
  3509  
  3510      if (d_ent->d_name[0] != '.' && (!prefix ||
  3511          !strncmp(d_ent->d_name, prefix, strlen(prefix)))) {
  3512  
  3513        u8* fname = alloc_printf("%s/%s", path, d_ent->d_name);
  3514        if (unlink(fname)) PFATAL("Unable to delete '%s'", fname);
  3515        ck_free(fname);
  3516  
  3517      }
  3518  
  3519    }
  3520  
  3521    closedir(d);
  3522  
  3523    return !!rmdir(path);
  3524  
  3525  }
  3526  
  3527  
  3528  /* Get the number of runnable processes, with some simple smoothing. */
  3529  
  3530  static double get_runnable_processes(void) {
  3531  
  3532    static double res;
  3533  
  3534  #if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__)
  3535  
  3536    /* I don't see any portable sysctl or so that would quickly give us the
  3537       number of runnable processes; the 1-minute load average can be a
  3538       semi-decent approximation, though. */
  3539  
  3540    if (getloadavg(&res, 1) != 1) return 0;
  3541  
  3542  #else
  3543  
  3544    /* On Linux, /proc/stat is probably the best way; load averages are
  3545       computed in funny ways and sometimes don't reflect extremely short-lived
  3546       processes well. */
  3547  
  3548    FILE* f = fopen("/proc/stat", "r");
  3549    u8 tmp[1024];
  3550    u32 val = 0;
  3551  
  3552    if (!f) return 0;
  3553  
  3554    while (fgets(tmp, sizeof(tmp), f)) {
  3555  
  3556      if (!strncmp(tmp, "procs_running ", 14) ||
  3557          !strncmp(tmp, "procs_blocked ", 14)) val += atoi(tmp + 14);
  3558  
  3559    }
  3560   
  3561    fclose(f);
  3562  
  3563    if (!res) {
  3564  
  3565      res = val;
  3566  
  3567    } else {
  3568  
  3569      res = res * (1.0 - 1.0 / AVG_SMOOTHING) +
  3570            ((double)val) * (1.0 / AVG_SMOOTHING);
  3571  
  3572    }
  3573  
  3574  #endif /* ^(__APPLE__ || __FreeBSD__ || __OpenBSD__) */
  3575  
  3576    return res;
  3577  
  3578  }
  3579  
  3580  
  3581  /* Delete the temporary directory used for in-place session resume. */
  3582  
  3583  static void nuke_resume_dir(void) {
  3584  
  3585    u8* fn;
  3586  
  3587    fn = alloc_printf("%s/_resume/.state/deterministic_done", out_dir);
  3588    if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
  3589    ck_free(fn);
  3590  
  3591    fn = alloc_printf("%s/_resume/.state/auto_extras", out_dir);
  3592    if (delete_files(fn, "auto_")) goto dir_cleanup_failed;
  3593    ck_free(fn);
  3594  
  3595    fn = alloc_printf("%s/_resume/.state/redundant_edges", out_dir);
  3596    if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
  3597    ck_free(fn);
  3598  
  3599    fn = alloc_printf("%s/_resume/.state/variable_behavior", out_dir);
  3600    if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
  3601    ck_free(fn);
  3602  
  3603    fn = alloc_printf("%s/_resume/.state", out_dir);
  3604    if (rmdir(fn) && errno != ENOENT) goto dir_cleanup_failed;
  3605    ck_free(fn);
  3606  
  3607    fn = alloc_printf("%s/_resume", out_dir);
  3608    if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
  3609    ck_free(fn);
  3610  
  3611    return;
  3612  
  3613  dir_cleanup_failed:
  3614  
  3615    FATAL("_resume directory cleanup failed");
  3616  
  3617  }
  3618  
  3619  
  3620  /* Delete fuzzer output directory if we recognize it as ours, if the fuzzer
  3621     is not currently running, and if the last run time isn't too great. */
  3622  
  3623  static void maybe_delete_out_dir(void) {
  3624  
  3625    FILE* f;
  3626    u8 *fn = alloc_printf("%s/fuzzer_stats", out_dir);
  3627  
  3628    /* See if the output directory is locked. If yes, bail out. If not,
  3629       create a lock that will persist for the lifetime of the process
  3630       (this requires leaving the descriptor open).*/
  3631  
  3632    out_dir_fd = open(out_dir, O_RDONLY);
  3633    if (out_dir_fd < 0) PFATAL("Unable to open '%s'", out_dir);
  3634  
  3635  #ifndef __sun
  3636  
  3637    if (flock(out_dir_fd, LOCK_EX | LOCK_NB) && errno == EWOULDBLOCK) {
  3638  
  3639      SAYF("\n" cLRD "[-] " cRST
  3640           "Looks like the job output directory is being actively used by another\n"
  3641           "    instance of afl-fuzz. You will need to choose a different %s\n"
  3642           "    or stop the other process first.\n",
  3643           sync_id ? "fuzzer ID" : "output location");
  3644  
  3645      FATAL("Directory '%s' is in use", out_dir);
  3646  
  3647    }
  3648  
  3649  #endif /* !__sun */
  3650  
  3651    f = fopen(fn, "r");
  3652  
  3653    if (f) {
  3654  
  3655      u64 start_time, last_update;
  3656  
  3657      if (fscanf(f, "start_time     : %llu\n"
  3658                    "last_update    : %llu\n", &start_time, &last_update) != 2)
  3659        FATAL("Malformed data in '%s'", fn);
  3660  
  3661      fclose(f);
  3662  
  3663      /* Let's see how much work is at stake. */
  3664  
  3665      if (!in_place_resume && last_update - start_time > OUTPUT_GRACE * 60) {
  3666  
  3667        SAYF("\n" cLRD "[-] " cRST
  3668             "The job output directory already exists and contains the results of more\n"
  3669             "    than %u minutes worth of fuzzing. To avoid data loss, afl-fuzz will *NOT*\n"
  3670             "    automatically delete this data for you.\n\n"
  3671  
  3672             "    If you wish to start a new session, remove or rename the directory manually,\n"
  3673             "    or specify a different output location for this job. To resume the old\n"
  3674             "    session, put '-' as the input directory in the command line ('-i -') and\n"
  3675             "    try again.\n", OUTPUT_GRACE);
  3676  
  3677         FATAL("At-risk data found in '%s'", out_dir);
  3678  
  3679      }
  3680  
  3681    }
  3682  
  3683    ck_free(fn);
  3684  
  3685    /* The idea for in-place resume is pretty simple: we temporarily move the old
  3686       queue/ to a new location that gets deleted once import to the new queue/
  3687       is finished. If _resume/ already exists, the current queue/ may be
  3688       incomplete due to an earlier abort, so we want to use the old _resume/
  3689       dir instead, and we let rename() fail silently. */
  3690  
  3691    if (in_place_resume) {
  3692  
  3693      u8* orig_q = alloc_printf("%s/queue", out_dir);
  3694  
  3695      in_dir = alloc_printf("%s/_resume", out_dir);
  3696  
  3697      rename(orig_q, in_dir); /* Ignore errors */
  3698  
  3699      OKF("Output directory exists, will attempt session resume.");
  3700  
  3701      ck_free(orig_q);
  3702  
  3703    } else {
  3704  
  3705      OKF("Output directory exists but deemed OK to reuse.");
  3706  
  3707    }
  3708  
  3709    ACTF("Deleting old session data...");
  3710  
  3711    /* Okay, let's get the ball rolling! First, we need to get rid of the entries
  3712       in <out_dir>/.synced/.../id:*, if any are present. */
  3713  
  3714    if (!in_place_resume) {
  3715  
  3716      fn = alloc_printf("%s/.synced", out_dir);
  3717      if (delete_files(fn, NULL)) goto dir_cleanup_failed;
  3718      ck_free(fn);
  3719  
  3720    }
  3721  
  3722    /* Next, we need to clean up <out_dir>/queue/.state/ subdirectories: */
  3723  
  3724    fn = alloc_printf("%s/queue/.state/deterministic_done", out_dir);
  3725    if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
  3726    ck_free(fn);
  3727  
  3728    fn = alloc_printf("%s/queue/.state/auto_extras", out_dir);
  3729    if (delete_files(fn, "auto_")) goto dir_cleanup_failed;
  3730    ck_free(fn);
  3731  
  3732    fn = alloc_printf("%s/queue/.state/redundant_edges", out_dir);
  3733    if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
  3734    ck_free(fn);
  3735  
  3736    fn = alloc_printf("%s/queue/.state/variable_behavior", out_dir);
  3737    if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
  3738    ck_free(fn);
  3739  
  3740    /* Then, get rid of the .state subdirectory itself (should be empty by now)
  3741       and everything matching <out_dir>/queue/id:*. */
  3742  
  3743    fn = alloc_printf("%s/queue/.state", out_dir);
  3744    if (rmdir(fn) && errno != ENOENT) goto dir_cleanup_failed;
  3745    ck_free(fn);
  3746  
  3747    fn = alloc_printf("%s/queue", out_dir);
  3748    if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
  3749    ck_free(fn);
  3750  
  3751    /* All right, let's do <out_dir>/crashes/id:* and <out_dir>/hangs/id:*. */
  3752  
  3753    if (!in_place_resume) {
  3754  
  3755      fn = alloc_printf("%s/crashes/README.txt", out_dir);
  3756      unlink(fn); /* Ignore errors */
  3757      ck_free(fn);
  3758  
  3759    }
  3760  
  3761    fn = alloc_printf("%s/crashes", out_dir);
  3762  
  3763    /* Make backup of the crashes directory if it's not empty and if we're
  3764       doing in-place resume. */
  3765  
  3766    if (in_place_resume && rmdir(fn)) {
  3767  
  3768      time_t cur_t = time(0);
  3769      struct tm* t = localtime(&cur_t);
  3770  
  3771  #ifndef SIMPLE_FILES
  3772  
  3773      u8* nfn = alloc_printf("%s.%04u-%02u-%02u-%02u:%02u:%02u", fn,
  3774                             t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
  3775                             t->tm_hour, t->tm_min, t->tm_sec);
  3776  
  3777  #else
  3778  
  3779      u8* nfn = alloc_printf("%s_%04u%02u%02u%02u%02u%02u", fn,
  3780                             t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
  3781                             t->tm_hour, t->tm_min, t->tm_sec);
  3782  
  3783  #endif /* ^!SIMPLE_FILES */
  3784  
  3785      rename(fn, nfn); /* Ignore errors. */
  3786      ck_free(nfn);
  3787  
  3788    }
  3789  
  3790    if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
  3791    ck_free(fn);
  3792  
  3793    fn = alloc_printf("%s/hangs", out_dir);
  3794  
  3795    /* Backup hangs, too. */
  3796  
  3797    if (in_place_resume && rmdir(fn)) {
  3798  
  3799      time_t cur_t = time(0);
  3800      struct tm* t = localtime(&cur_t);
  3801  
  3802  #ifndef SIMPLE_FILES
  3803  
  3804      u8* nfn = alloc_printf("%s.%04u-%02u-%02u-%02u:%02u:%02u", fn,
  3805                             t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
  3806                             t->tm_hour, t->tm_min, t->tm_sec);
  3807  
  3808  #else
  3809  
  3810      u8* nfn = alloc_printf("%s_%04u%02u%02u%02u%02u%02u", fn,
  3811                             t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
  3812                             t->tm_hour, t->tm_min, t->tm_sec);
  3813  
  3814  #endif /* ^!SIMPLE_FILES */
  3815  
  3816      rename(fn, nfn); /* Ignore errors. */
  3817      ck_free(nfn);
  3818  
  3819    }
  3820  
  3821    if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
  3822    ck_free(fn);
  3823  
  3824    /* And now, for some finishing touches. */
  3825  
  3826    fn = alloc_printf("%s/.cur_input", out_dir);
  3827    if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed;
  3828    ck_free(fn);
  3829  
  3830    fn = alloc_printf("%s/fuzz_bitmap", out_dir);
  3831    if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed;
  3832    ck_free(fn);
  3833  
  3834    if (!in_place_resume) {
  3835      fn  = alloc_printf("%s/fuzzer_stats", out_dir);
  3836      if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed;
  3837      ck_free(fn);
  3838    }
  3839  
  3840    fn = alloc_printf("%s/plot_data", out_dir);
  3841    if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed;
  3842    ck_free(fn);
  3843  
  3844    OKF("Output dir cleanup successful.");
  3845  
  3846    /* Wow... is that all? If yes, celebrate! */
  3847  
  3848    return;
  3849  
  3850  dir_cleanup_failed:
  3851  
  3852    SAYF("\n" cLRD "[-] " cRST
  3853         "Whoops, the fuzzer tried to reuse your output directory, but bumped into\n"
  3854         "    some files that shouldn't be there or that couldn't be removed - so it\n"
  3855         "    decided to abort! This happened while processing this path:\n\n"
  3856  
  3857         "    %s\n\n"
  3858         "    Please examine and manually delete the files, or specify a different\n"
  3859         "    output location for the tool.\n", fn);
  3860  
  3861    FATAL("Output directory cleanup failed");
  3862  
  3863  }
  3864  
  3865  
  3866  static void check_term_size(void);
  3867  
  3868  
  3869  /* A spiffy retro stats screen! This is called every stats_update_freq
  3870     execve() calls, plus in several other circumstances. */
  3871  
  3872  static void show_stats(void) {
  3873  
  3874    static u64 last_stats_ms, last_plot_ms, last_ms, last_execs;
  3875    static double avg_exec;
  3876    double t_byte_ratio, stab_ratio;
  3877  
  3878    u64 cur_ms;
  3879    u32 t_bytes, t_bits;
  3880  
  3881    u32 banner_len, banner_pad;
  3882    u8  tmp[256];
  3883  
  3884    cur_ms = get_cur_time();
  3885  
  3886    /* If not enough time has passed since last UI update, bail out. */
  3887  
  3888    if (cur_ms - last_ms < 1000 / UI_TARGET_HZ) return;
  3889  
  3890    /* Check if we're past the 10 minute mark. */
  3891  
  3892    if (cur_ms - start_time > 10 * 60 * 1000) run_over10m = 1;
  3893  
  3894    /* Calculate smoothed exec speed stats. */
  3895  
  3896    if (!last_execs) {
  3897    
  3898      avg_exec = ((double)total_execs) * 1000 / (cur_ms - start_time);
  3899  
  3900    } else {
  3901  
  3902      double cur_avg = ((double)(total_execs - last_execs)) * 1000 /
  3903                       (cur_ms - last_ms);
  3904  
  3905      /* If there is a dramatic (5x+) jump in speed, reset the indicator
  3906         more quickly. */
  3907  
  3908      if (cur_avg * 5 < avg_exec || cur_avg / 5 > avg_exec)
  3909        avg_exec = cur_avg;
  3910  
  3911      avg_exec = avg_exec * (1.0 - 1.0 / AVG_SMOOTHING) +
  3912                 cur_avg * (1.0 / AVG_SMOOTHING);
  3913  
  3914    }
  3915  
  3916    last_ms = cur_ms;
  3917    last_execs = total_execs;
  3918  
  3919    /* Tell the callers when to contact us (as measured in execs). */
  3920  
  3921    stats_update_freq = avg_exec / (UI_TARGET_HZ * 10);
  3922    if (!stats_update_freq) stats_update_freq = 1;
  3923  
  3924    /* Do some bitmap stats. */
  3925  
  3926    t_bytes = count_non_255_bytes(virgin_bits);
  3927    t_byte_ratio = ((double)t_bytes * 100) / MAP_SIZE;
  3928  
  3929    if (t_bytes) 
  3930      stab_ratio = 100 - ((double)var_byte_count) * 100 / t_bytes;
  3931    else
  3932      stab_ratio = 100;
  3933  
  3934    /* Roughly every minute, update fuzzer stats and save auto tokens. */
  3935  
  3936    if (cur_ms - last_stats_ms > STATS_UPDATE_SEC * 1000) {
  3937  
  3938      last_stats_ms = cur_ms;
  3939      write_stats_file(t_byte_ratio, stab_ratio, avg_exec);
  3940      save_auto();
  3941      write_bitmap();
  3942  
  3943    }
  3944  
  3945    /* Every now and then, write plot data. */
  3946  
  3947    if (cur_ms - last_plot_ms > PLOT_UPDATE_SEC * 1000) {
  3948  
  3949      last_plot_ms = cur_ms;
  3950      maybe_update_plot_file(t_byte_ratio, avg_exec);
  3951   
  3952    }
  3953  
  3954    /* Honor AFL_EXIT_WHEN_DONE and AFL_BENCH_UNTIL_CRASH. */
  3955  
  3956    if (!dumb_mode && cycles_wo_finds > 100 && !pending_not_fuzzed &&
  3957        getenv("AFL_EXIT_WHEN_DONE")) stop_soon = 2;
  3958  
  3959    if (total_crashes && getenv("AFL_BENCH_UNTIL_CRASH")) stop_soon = 2;
  3960  
  3961    /* If we're not on TTY, bail out. */
  3962  
  3963    if (not_on_tty) return;
  3964  
  3965    /* Compute some mildly useful bitmap stats. */
  3966  
  3967    t_bits = (MAP_SIZE << 3) - count_bits(virgin_bits);
  3968  
  3969    /* Now, for the visuals... */
  3970  
  3971    if (clear_screen) {
  3972  
  3973      SAYF(TERM_CLEAR CURSOR_HIDE);
  3974      clear_screen = 0;
  3975  
  3976      check_term_size();
  3977  
  3978    }
  3979  
  3980    SAYF(TERM_HOME);
  3981  
  3982    if (term_too_small) {
  3983  
  3984      SAYF(cBRI "Your terminal is too small to display the UI.\n"
  3985           "Please resize terminal window to at least 80x25.\n" cRST);
  3986  
  3987      return;
  3988  
  3989    }
  3990  
  3991    /* Let's start by drawing a centered banner. */
  3992  
  3993    banner_len = (crash_mode ? 24 : 22) + strlen(VERSION) + strlen(use_banner);
  3994    banner_pad = (80 - banner_len) / 2;
  3995    memset(tmp, ' ', banner_pad);
  3996  
  3997    sprintf(tmp + banner_pad, "%s " cLCY VERSION cLGN
  3998            " (%s)",  crash_mode ? cPIN "peruvian were-rabbit" : 
  3999            cYEL "american fuzzy lop", use_banner);
  4000  
  4001    SAYF("\n%s\n\n", tmp);
  4002  
  4003    /* "Handy" shortcuts for drawing boxes... */
  4004  
  4005  #define bSTG    bSTART cGRA
  4006  #define bH2     bH bH
  4007  #define bH5     bH2 bH2 bH
  4008  #define bH10    bH5 bH5
  4009  #define bH20    bH10 bH10
  4010  #define bH30    bH20 bH10
  4011  #define SP5     "     "
  4012  #define SP10    SP5 SP5
  4013  #define SP20    SP10 SP10
  4014  
  4015    /* Lord, forgive me this. */
  4016  
  4017    SAYF(SET_G1 bSTG bLT bH bSTOP cCYA " process timing " bSTG bH30 bH5 bH2 bHB
  4018         bH bSTOP cCYA " overall results " bSTG bH5 bRT "\n");
  4019  
  4020    if (dumb_mode) {
  4021  
  4022      strcpy(tmp, cRST);
  4023  
  4024    } else {
  4025  
  4026      u64 min_wo_finds = (cur_ms - last_path_time) / 1000 / 60;
  4027  
  4028      /* First queue cycle: don't stop now! */
  4029      if (queue_cycle == 1 || min_wo_finds < 15) strcpy(tmp, cMGN); else
  4030  
  4031      /* Subsequent cycles, but we're still making finds. */
  4032      if (cycles_wo_finds < 25 || min_wo_finds < 30) strcpy(tmp, cYEL); else
  4033  
  4034      /* No finds for a long time and no test cases to try. */
  4035      if (cycles_wo_finds > 100 && !pending_not_fuzzed && min_wo_finds > 120)
  4036        strcpy(tmp, cLGN);
  4037  
  4038      /* Default: cautiously OK to stop? */
  4039      else strcpy(tmp, cLBL);
  4040  
  4041    }
  4042  
  4043    SAYF(bV bSTOP "        run time : " cRST "%-34s " bSTG bV bSTOP
  4044         "  cycles done : %s%-5s  " bSTG bV "\n",
  4045         DTD(cur_ms, start_time), tmp, DI(queue_cycle - 1));
  4046  
  4047    /* We want to warn people about not seeing new paths after a full cycle,
  4048       except when resuming fuzzing or running in non-instrumented mode. */
  4049  
  4050    if (!dumb_mode && (last_path_time || resuming_fuzz || queue_cycle == 1 ||
  4051        in_bitmap || crash_mode)) {
  4052  
  4053      SAYF(bV bSTOP "   last new path : " cRST "%-34s ",
  4054           DTD(cur_ms, last_path_time));
  4055  
  4056    } else {
  4057  
  4058      if (dumb_mode)
  4059  
  4060        SAYF(bV bSTOP "   last new path : " cPIN "n/a" cRST 
  4061             " (non-instrumented mode)        ");
  4062  
  4063       else
  4064  
  4065        SAYF(bV bSTOP "   last new path : " cRST "none yet " cLRD
  4066             "(odd, check syntax!)      ");
  4067  
  4068    }
  4069  
  4070    SAYF(bSTG bV bSTOP "  total paths : " cRST "%-5s  " bSTG bV "\n",
  4071         DI(queued_paths));
  4072  
  4073    /* Highlight crashes in red if found, denote going over the KEEP_UNIQUE_CRASH
  4074       limit with a '+' appended to the count. */
  4075  
  4076    sprintf(tmp, "%s%s", DI(unique_crashes),
  4077            (unique_crashes >= KEEP_UNIQUE_CRASH) ? "+" : "");
  4078  
  4079    SAYF(bV bSTOP " last uniq crash : " cRST "%-34s " bSTG bV bSTOP
  4080         " uniq crashes : %s%-6s " bSTG bV "\n",
  4081         DTD(cur_ms, last_crash_time), unique_crashes ? cLRD : cRST,
  4082         tmp);
  4083  
  4084    sprintf(tmp, "%s%s", DI(unique_hangs),
  4085           (unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : "");
  4086  
  4087    SAYF(bV bSTOP "  last uniq hang : " cRST "%-34s " bSTG bV bSTOP 
  4088         "   uniq hangs : " cRST "%-6s " bSTG bV "\n",
  4089         DTD(cur_ms, last_hang_time), tmp);
  4090  
  4091    SAYF(bVR bH bSTOP cCYA " cycle progress " bSTG bH20 bHB bH bSTOP cCYA
  4092         " map coverage " bSTG bH bHT bH20 bH2 bH bVL "\n");
  4093  
  4094    /* This gets funny because we want to print several variable-length variables
  4095       together, but then cram them into a fixed-width field - so we need to
  4096       put them in a temporary buffer first. */
  4097  
  4098    sprintf(tmp, "%s%s (%0.02f%%)", DI(current_entry),
  4099            queue_cur->favored ? "" : "*",
  4100            ((double)current_entry * 100) / queued_paths);
  4101  
  4102    SAYF(bV bSTOP "  now processing : " cRST "%-17s " bSTG bV bSTOP, tmp);
  4103  
  4104    sprintf(tmp, "%0.02f%% / %0.02f%%", ((double)queue_cur->bitmap_size) * 
  4105            100 / MAP_SIZE, t_byte_ratio);
  4106  
  4107    SAYF("    map density : %s%-21s " bSTG bV "\n", t_byte_ratio > 70 ? cLRD : 
  4108         ((t_bytes < 200 && !dumb_mode) ? cPIN : cRST), tmp);
  4109  
  4110    sprintf(tmp, "%s (%0.02f%%)", DI(cur_skipped_paths),
  4111            ((double)cur_skipped_paths * 100) / queued_paths);
  4112  
  4113    SAYF(bV bSTOP " paths timed out : " cRST "%-17s " bSTG bV, tmp);
  4114  
  4115    sprintf(tmp, "%0.02f bits/tuple",
  4116            t_bytes ? (((double)t_bits) / t_bytes) : 0);
  4117  
  4118    SAYF(bSTOP " count coverage : " cRST "%-21s " bSTG bV "\n", tmp);
  4119  
  4120    SAYF(bVR bH bSTOP cCYA " stage progress " bSTG bH20 bX bH bSTOP cCYA
  4121         " findings in depth " bSTG bH20 bVL "\n");
  4122  
  4123    sprintf(tmp, "%s (%0.02f%%)", DI(queued_favored),
  4124            ((double)queued_favored) * 100 / queued_paths);
  4125  
  4126    /* Yeah... it's still going on... halp? */
  4127  
  4128    SAYF(bV bSTOP "  now trying : " cRST "%-21s " bSTG bV bSTOP 
  4129         " favored paths : " cRST "%-22s " bSTG bV "\n", stage_name, tmp);
  4130  
  4131    if (!stage_max) {
  4132  
  4133      sprintf(tmp, "%s/-", DI(stage_cur));
  4134  
  4135    } else {
  4136  
  4137      sprintf(tmp, "%s/%s (%0.02f%%)", DI(stage_cur), DI(stage_max),
  4138              ((double)stage_cur) * 100 / stage_max);
  4139  
  4140    }
  4141  
  4142    SAYF(bV bSTOP " stage execs : " cRST "%-21s " bSTG bV bSTOP, tmp);
  4143  
  4144    sprintf(tmp, "%s (%0.02f%%)", DI(queued_with_cov),
  4145            ((double)queued_with_cov) * 100 / queued_paths);
  4146  
  4147    SAYF("  new edges on : " cRST "%-22s " bSTG bV "\n", tmp);
  4148  
  4149    sprintf(tmp, "%s (%s%s unique)", DI(total_crashes), DI(unique_crashes),
  4150            (unique_crashes >= KEEP_UNIQUE_CRASH) ? "+" : "");
  4151  
  4152    if (crash_mode) {
  4153  
  4154      SAYF(bV bSTOP " total execs : " cRST "%-21s " bSTG bV bSTOP
  4155           "   new crashes : %s%-22s " bSTG bV "\n", DI(total_execs),
  4156           unique_crashes ? cLRD : cRST, tmp);
  4157  
  4158    } else {
  4159  
  4160      SAYF(bV bSTOP " total execs : " cRST "%-21s " bSTG bV bSTOP
  4161           " total crashes : %s%-22s " bSTG bV "\n", DI(total_execs),
  4162           unique_crashes ? cLRD : cRST, tmp);
  4163  
  4164    }
  4165  
  4166    /* Show a warning about slow execution. */
  4167  
  4168    if (avg_exec < 100) {
  4169  
  4170      sprintf(tmp, "%s/sec (%s)", DF(avg_exec), avg_exec < 20 ?
  4171              "zzzz..." : "slow!");
  4172  
  4173      SAYF(bV bSTOP "  exec speed : " cLRD "%-21s ", tmp);
  4174  
  4175    } else {
  4176  
  4177      sprintf(tmp, "%s/sec", DF(avg_exec));
  4178      SAYF(bV bSTOP "  exec speed : " cRST "%-21s ", tmp);
  4179  
  4180    }
  4181  
  4182    sprintf(tmp, "%s (%s%s unique)", DI(total_tmouts), DI(unique_tmouts),
  4183            (unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : "");
  4184  
  4185    SAYF (bSTG bV bSTOP "  total tmouts : " cRST "%-22s " bSTG bV "\n", tmp);
  4186  
  4187    /* Aaaalmost there... hold on! */
  4188  
  4189    SAYF(bVR bH cCYA bSTOP " fuzzing strategy yields " bSTG bH10 bH bHT bH10
  4190         bH5 bHB bH bSTOP cCYA " path geometry " bSTG bH5 bH2 bH bVL "\n");
  4191  
  4192    if (skip_deterministic) {
  4193  
  4194      strcpy(tmp, "n/a, n/a, n/a");
  4195  
  4196    } else {
  4197  
  4198      sprintf(tmp, "%s/%s, %s/%s, %s/%s",
  4199              DI(stage_finds[STAGE_FLIP1]), DI(stage_cycles[STAGE_FLIP1]),
  4200              DI(stage_finds[STAGE_FLIP2]), DI(stage_cycles[STAGE_FLIP2]),
  4201              DI(stage_finds[STAGE_FLIP4]), DI(stage_cycles[STAGE_FLIP4]));
  4202  
  4203    }
  4204  
  4205    SAYF(bV bSTOP "   bit flips : " cRST "%-37s " bSTG bV bSTOP "    levels : "
  4206         cRST "%-10s " bSTG bV "\n", tmp, DI(max_depth));
  4207  
  4208    if (!skip_deterministic)
  4209      sprintf(tmp, "%s/%s, %s/%s, %s/%s",
  4210              DI(stage_finds[STAGE_FLIP8]), DI(stage_cycles[STAGE_FLIP8]),
  4211              DI(stage_finds[STAGE_FLIP16]), DI(stage_cycles[STAGE_FLIP16]),
  4212              DI(stage_finds[STAGE_FLIP32]), DI(stage_cycles[STAGE_FLIP32]));
  4213  
  4214    SAYF(bV bSTOP "  byte flips : " cRST "%-37s " bSTG bV bSTOP "   pending : "
  4215         cRST "%-10s " bSTG bV "\n", tmp, DI(pending_not_fuzzed));
  4216  
  4217    if (!skip_deterministic)
  4218      sprintf(tmp, "%s/%s, %s/%s, %s/%s",
  4219              DI(stage_finds[STAGE_ARITH8]), DI(stage_cycles[STAGE_ARITH8]),
  4220              DI(stage_finds[STAGE_ARITH16]), DI(stage_cycles[STAGE_ARITH16]),
  4221              DI(stage_finds[STAGE_ARITH32]), DI(stage_cycles[STAGE_ARITH32]));
  4222  
  4223    SAYF(bV bSTOP " arithmetics : " cRST "%-37s " bSTG bV bSTOP "  pend fav : "
  4224         cRST "%-10s " bSTG bV "\n", tmp, DI(pending_favored));
  4225  
  4226    if (!skip_deterministic)
  4227      sprintf(tmp, "%s/%s, %s/%s, %s/%s",
  4228              DI(stage_finds[STAGE_INTEREST8]), DI(stage_cycles[STAGE_INTEREST8]),
  4229              DI(stage_finds[STAGE_INTEREST16]), DI(stage_cycles[STAGE_INTEREST16]),
  4230              DI(stage_finds[STAGE_INTEREST32]), DI(stage_cycles[STAGE_INTEREST32]));
  4231  
  4232    SAYF(bV bSTOP "  known ints : " cRST "%-37s " bSTG bV bSTOP " own finds : "
  4233         cRST "%-10s " bSTG bV "\n", tmp, DI(queued_discovered));
  4234  
  4235    if (!skip_deterministic)
  4236      sprintf(tmp, "%s/%s, %s/%s, %s/%s",
  4237              DI(stage_finds[STAGE_EXTRAS_UO]), DI(stage_cycles[STAGE_EXTRAS_UO]),
  4238              DI(stage_finds[STAGE_EXTRAS_UI]), DI(stage_cycles[STAGE_EXTRAS_UI]),
  4239              DI(stage_finds[STAGE_EXTRAS_AO]), DI(stage_cycles[STAGE_EXTRAS_AO]));
  4240  
  4241    SAYF(bV bSTOP "  dictionary : " cRST "%-37s " bSTG bV bSTOP
  4242         "  imported : " cRST "%-10s " bSTG bV "\n", tmp,
  4243         sync_id ? DI(queued_imported) : (u8*)"n/a");
  4244  
  4245    sprintf(tmp, "%s/%s, %s/%s",
  4246            DI(stage_finds[STAGE_HAVOC]), DI(stage_cycles[STAGE_HAVOC]),
  4247            DI(stage_finds[STAGE_SPLICE]), DI(stage_cycles[STAGE_SPLICE]));
  4248  
  4249    SAYF(bV bSTOP "       havoc : " cRST "%-37s " bSTG bV bSTOP, tmp);
  4250  
  4251    if (t_bytes) sprintf(tmp, "%0.02f%%", stab_ratio);
  4252      else strcpy(tmp, "n/a");
  4253  
  4254    SAYF(" stability : %s%-10s " bSTG bV "\n", (stab_ratio < 85 && var_byte_count > 40) 
  4255         ? cLRD : ((queued_variable && (!persistent_mode || var_byte_count > 20))
  4256         ? cMGN : cRST), tmp);
  4257  
  4258    if (!bytes_trim_out) {
  4259  
  4260      sprintf(tmp, "n/a, ");
  4261  
  4262    } else {
  4263  
  4264      sprintf(tmp, "%0.02f%%/%s, ",
  4265              ((double)(bytes_trim_in - bytes_trim_out)) * 100 / bytes_trim_in,
  4266              DI(trim_execs));
  4267  
  4268    }
  4269  
  4270    if (!blocks_eff_total) {
  4271  
  4272      u8 tmp2[128];
  4273  
  4274      sprintf(tmp2, "n/a");
  4275      strcat(tmp, tmp2);
  4276  
  4277    } else {
  4278  
  4279      u8 tmp2[128];
  4280  
  4281      sprintf(tmp2, "%0.02f%%",
  4282              ((double)(blocks_eff_total - blocks_eff_select)) * 100 /
  4283              blocks_eff_total);
  4284  
  4285      strcat(tmp, tmp2);
  4286  
  4287    }
  4288  
  4289    SAYF(bV bSTOP "        trim : " cRST "%-37s " bSTG bVR bH20 bH2 bH2 bRB "\n"
  4290         bLB bH30 bH20 bH2 bH bRB bSTOP cRST RESET_G1, tmp);
  4291  
  4292    /* Provide some CPU utilization stats. */
  4293  
  4294    if (cpu_core_count) {
  4295  
  4296      double cur_runnable = get_runnable_processes();
  4297      u32 cur_utilization = cur_runnable * 100 / cpu_core_count;
  4298  
  4299      u8* cpu_color = cCYA;
  4300  
  4301      /* If we could still run one or more processes, use green. */
  4302  
  4303      if (cpu_core_count > 1 && cur_runnable + 1 <= cpu_core_count)
  4304        cpu_color = cLGN;
  4305  
  4306      /* If we're clearly oversubscribed, use red. */
  4307  
  4308      if (!no_cpu_meter_red && cur_utilization >= 150) cpu_color = cLRD;
  4309  
  4310  #ifdef HAVE_AFFINITY
  4311  
  4312      if (cpu_aff >= 0) {
  4313  
  4314        SAYF(SP10 cGRA "[cpu%03u:%s%3u%%" cGRA "]\r" cRST, 
  4315             MIN(cpu_aff, 999), cpu_color,
  4316             MIN(cur_utilization, 999));
  4317  
  4318      } else {
  4319  
  4320        SAYF(SP10 cGRA "   [cpu:%s%3u%%" cGRA "]\r" cRST,
  4321             cpu_color, MIN(cur_utilization, 999));
  4322   
  4323     }
  4324  
  4325  #else
  4326  
  4327      SAYF(SP10 cGRA "   [cpu:%s%3u%%" cGRA "]\r" cRST,
  4328           cpu_color, MIN(cur_utilization, 999));
  4329  
  4330  #endif /* ^HAVE_AFFINITY */
  4331  
  4332    } else SAYF("\r");
  4333  
  4334    /* Hallelujah! */
  4335  
  4336    fflush(0);
  4337  
  4338  }
  4339  
  4340  
  4341  /* Display quick statistics at the end of processing the input directory,
  4342     plus a bunch of warnings. Some calibration stuff also ended up here,
  4343     along with several hardcoded constants. Maybe clean up eventually. */
  4344  
  4345  static void show_init_stats(void) {
  4346  
  4347    struct queue_entry* q = queue;
  4348    u32 min_bits = 0, max_bits = 0;
  4349    u64 min_us = 0, max_us = 0;
  4350    u64 avg_us = 0;
  4351    u32 max_len = 0;
  4352  
  4353    if (total_cal_cycles) avg_us = total_cal_us / total_cal_cycles;
  4354  
  4355    while (q) {
  4356  
  4357      if (!min_us || q->exec_us < min_us) min_us = q->exec_us;
  4358      if (q->exec_us > max_us) max_us = q->exec_us;
  4359  
  4360      if (!min_bits || q->bitmap_size < min_bits) min_bits = q->bitmap_size;
  4361      if (q->bitmap_size > max_bits) max_bits = q->bitmap_size;
  4362  
  4363      if (q->len > max_len) max_len = q->len;
  4364  
  4365      q = q->next;
  4366  
  4367    }
  4368  
  4369    SAYF("\n");
  4370  
  4371    if (avg_us > (qemu_mode ? 50000 : 10000)) 
  4372      WARNF(cLRD "The target binary is pretty slow! See %s/perf_tips.txt.",
  4373            doc_path);
  4374  
  4375    /* Let's keep things moving with slow binaries. */
  4376  
  4377    if (avg_us > 50000) havoc_div = 10;     /* 0-19 execs/sec   */
  4378    else if (avg_us > 20000) havoc_div = 5; /* 20-49 execs/sec  */
  4379    else if (avg_us > 10000) havoc_div = 2; /* 50-100 execs/sec */
  4380  
  4381    if (!resuming_fuzz) {
  4382  
  4383      if (max_len > 50 * 1024)
  4384        WARNF(cLRD "Some test cases are huge (%s) - see %s/perf_tips.txt!",
  4385              DMS(max_len), doc_path);
  4386      else if (max_len > 10 * 1024)
  4387        WARNF("Some test cases are big (%s) - see %s/perf_tips.txt.",
  4388              DMS(max_len), doc_path);
  4389  
  4390      if (useless_at_start && !in_bitmap)
  4391        WARNF(cLRD "Some test cases look useless. Consider using a smaller set.");
  4392  
  4393      if (queued_paths > 100)
  4394        WARNF(cLRD "You probably have far too many input files! Consider trimming down.");
  4395      else if (queued_paths > 20)
  4396        WARNF("You have lots of input files; try starting small.");
  4397  
  4398    }
  4399  
  4400    OKF("Here are some useful stats:\n\n"
  4401  
  4402        cGRA "    Test case count : " cRST "%u favored, %u variable, %u total\n"
  4403        cGRA "       Bitmap range : " cRST "%u to %u bits (average: %0.02f bits)\n"
  4404        cGRA "        Exec timing : " cRST "%s to %s us (average: %s us)\n",
  4405        queued_favored, queued_variable, queued_paths, min_bits, max_bits, 
  4406        ((double)total_bitmap_size) / (total_bitmap_entries ? total_bitmap_entries : 1),
  4407        DI(min_us), DI(max_us), DI(avg_us));
  4408  
  4409    if (!timeout_given) {
  4410  
  4411      /* Figure out the appropriate timeout. The basic idea is: 5x average or
  4412         1x max, rounded up to EXEC_TM_ROUND ms and capped at 1 second.
  4413  
  4414         If the program is slow, the multiplier is lowered to 2x or 3x, because
  4415         random scheduler jitter is less likely to have any impact, and because
  4416         our patience is wearing thin =) */
  4417  
  4418      if (avg_us > 50000) exec_tmout = avg_us * 2 / 1000;
  4419      else if (avg_us > 10000) exec_tmout = avg_us * 3 / 1000;
  4420      else exec_tmout = avg_us * 5 / 1000;
  4421  
  4422      exec_tmout = MAX(exec_tmout, max_us / 1000);
  4423      exec_tmout = (exec_tmout + EXEC_TM_ROUND) / EXEC_TM_ROUND * EXEC_TM_ROUND;
  4424  
  4425      if (exec_tmout > EXEC_TIMEOUT) exec_tmout = EXEC_TIMEOUT;
  4426  
  4427      ACTF("No -t option specified, so I'll use exec timeout of %u ms.", 
  4428           exec_tmout);
  4429  
  4430      timeout_given = 1;
  4431  
  4432    } else if (timeout_given == 3) {
  4433  
  4434      ACTF("Applying timeout settings from resumed session (%u ms).", exec_tmout);
  4435  
  4436    }
  4437  
  4438    /* In dumb mode, re-running every timing out test case with a generous time
  4439       limit is very expensive, so let's select a more conservative default. */
  4440  
  4441    if (dumb_mode && !getenv("AFL_HANG_TMOUT"))
  4442      hang_tmout = MIN(EXEC_TIMEOUT, exec_tmout * 2 + 100);
  4443  
  4444    OKF("All set and ready to roll!");
  4445  
  4446  }
  4447  
  4448  
  4449  /* Find first power of two greater or equal to val (assuming val under
  4450     2^31). */
  4451  
  4452  static u32 next_p2(u32 val) {
  4453  
  4454    u32 ret = 1;
  4455    while (val > ret) ret <<= 1;
  4456    return ret;
  4457  
  4458  } 
  4459  
  4460  
  4461  /* Trim all new test cases to save cycles when doing deterministic checks. The
  4462     trimmer uses power-of-two increments somewhere between 1/16 and 1/1024 of
  4463     file size, to keep the stage short and sweet. */
  4464  
  4465  static u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) {
  4466  
  4467    static u8 tmp[64];
  4468    static u8 clean_trace[MAP_SIZE];
  4469  
  4470    u8  needs_write = 0, fault = 0;
  4471    u32 trim_exec = 0;
  4472    u32 remove_len;
  4473    u32 len_p2;
  4474  
  4475    /* Although the trimmer will be less useful when variable behavior is
  4476       detected, it will still work to some extent, so we don't check for
  4477       this. */
  4478  
  4479    if (q->len < 5) return 0;
  4480  
  4481    stage_name = tmp;
  4482    bytes_trim_in += q->len;
  4483  
  4484    /* Select initial chunk len, starting with large steps. */
  4485  
  4486    len_p2 = next_p2(q->len);
  4487  
  4488    remove_len = MAX(len_p2 / TRIM_START_STEPS, TRIM_MIN_BYTES);
  4489  
  4490    /* Continue until the number of steps gets too high or the stepover
  4491       gets too small. */
  4492  
  4493    while (remove_len >= MAX(len_p2 / TRIM_END_STEPS, TRIM_MIN_BYTES)) {
  4494  
  4495      u32 remove_pos = remove_len;
  4496  
  4497      sprintf(tmp, "trim %s/%s", DI(remove_len), DI(remove_len));
  4498  
  4499      stage_cur = 0;
  4500      stage_max = q->len / remove_len;
  4501  
  4502      while (remove_pos < q->len) {
  4503  
  4504        u32 trim_avail = MIN(remove_len, q->len - remove_pos);
  4505        u32 cksum;
  4506  
  4507        write_with_gap(in_buf, q->len, remove_pos, trim_avail);
  4508  
  4509        fault = run_target(argv, exec_tmout);
  4510        trim_execs++;
  4511  
  4512        if (stop_soon || fault == FAULT_ERROR) goto abort_trimming;
  4513  
  4514        /* Note that we don't keep track of crashes or hangs here; maybe TODO? */
  4515  
  4516        cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
  4517  
  4518        /* If the deletion had no impact on the trace, make it permanent. This
  4519           isn't perfect for variable-path inputs, but we're just making a
  4520           best-effort pass, so it's not a big deal if we end up with false
  4521           negatives every now and then. */
  4522  
  4523        if (cksum == q->exec_cksum) {
  4524  
  4525          u32 move_tail = q->len - remove_pos - trim_avail;
  4526  
  4527          q->len -= trim_avail;
  4528          len_p2  = next_p2(q->len);
  4529  
  4530          memmove(in_buf + remove_pos, in_buf + remove_pos + trim_avail, 
  4531                  move_tail);
  4532  
  4533          /* Let's save a clean trace, which will be needed by
  4534             update_bitmap_score once we're done with the trimming stuff. */
  4535  
  4536          if (!needs_write) {
  4537  
  4538            needs_write = 1;
  4539            memcpy(clean_trace, trace_bits, MAP_SIZE);
  4540  
  4541          }
  4542  
  4543        } else remove_pos += remove_len;
  4544  
  4545        /* Since this can be slow, update the screen every now and then. */
  4546  
  4547        if (!(trim_exec++ % stats_update_freq)) show_stats();
  4548        stage_cur++;
  4549  
  4550      }
  4551  
  4552      remove_len >>= 1;
  4553  
  4554    }
  4555  
  4556    /* If we have made changes to in_buf, we also need to update the on-disk
  4557       version of the test case. */
  4558  
  4559    if (needs_write) {
  4560  
  4561      s32 fd;
  4562  
  4563      unlink(q->fname); /* ignore errors */
  4564  
  4565      fd = open(q->fname, O_WRONLY | O_CREAT | O_EXCL, 0600);
  4566  
  4567      if (fd < 0) PFATAL("Unable to create '%s'", q->fname);
  4568  
  4569      ck_write(fd, in_buf, q->len, q->fname);
  4570      close(fd);
  4571  
  4572      memcpy(trace_bits, clean_trace, MAP_SIZE);
  4573      update_bitmap_score(q);
  4574  
  4575    }
  4576  
  4577  abort_trimming:
  4578  
  4579    bytes_trim_out += q->len;
  4580    return fault;
  4581  
  4582  }
  4583  
  4584  
  4585  /* Write a modified test case, run program, process results. Handle
  4586     error conditions, returning 1 if it's time to bail out. This is
  4587     a helper function for fuzz_one(). */
  4588  
  4589  EXP_ST u8 common_fuzz_stuff(char** argv, u8* out_buf, u32 len) {
  4590  
  4591    u8 fault;
  4592  
  4593    if (post_handler) {
  4594  
  4595      out_buf = post_handler(out_buf, &len);
  4596      if (!out_buf || !len) return 0;
  4597  
  4598    }
  4599  
  4600    write_to_testcase(out_buf, len);
  4601  
  4602    fault = run_target(argv, exec_tmout);
  4603  
  4604    if (stop_soon) return 1;
  4605  
  4606    if (fault == FAULT_TMOUT) {
  4607  
  4608      if (subseq_tmouts++ > TMOUT_LIMIT) {
  4609        cur_skipped_paths++;
  4610        return 1;
  4611      }
  4612  
  4613    } else subseq_tmouts = 0;
  4614  
  4615    /* Users can hit us with SIGUSR1 to request the current input
  4616       to be abandoned. */
  4617  
  4618    if (skip_requested) {
  4619  
  4620       skip_requested = 0;
  4621       cur_skipped_paths++;
  4622       return 1;
  4623  
  4624    }
  4625  
  4626    /* This handles FAULT_ERROR for us: */
  4627  
  4628    queued_discovered += save_if_interesting(argv, out_buf, len, fault);
  4629  
  4630    if (!(stage_cur % stats_update_freq) || stage_cur + 1 == stage_max)
  4631      show_stats();
  4632  
  4633    return 0;
  4634  
  4635  }
  4636  
  4637  
  4638  /* Helper to choose random block len for block operations in fuzz_one().
  4639     Doesn't return zero, provided that max_len is > 0. */
  4640  
  4641  static u32 choose_block_len(u32 limit) {
  4642  
  4643    u32 min_value, max_value;
  4644    u32 rlim = MIN(queue_cycle, 3);
  4645  
  4646    if (!run_over10m) rlim = 1;
  4647  
  4648    switch (UR(rlim)) {
  4649  
  4650      case 0:  min_value = 1;
  4651               max_value = HAVOC_BLK_SMALL;
  4652               break;
  4653  
  4654      case 1:  min_value = HAVOC_BLK_SMALL;
  4655               max_value = HAVOC_BLK_MEDIUM;
  4656               break;
  4657  
  4658      default: 
  4659  
  4660               if (UR(10)) {
  4661  
  4662                 min_value = HAVOC_BLK_MEDIUM;
  4663                 max_value = HAVOC_BLK_LARGE;
  4664  
  4665               } else {
  4666  
  4667                 min_value = HAVOC_BLK_LARGE;
  4668                 max_value = HAVOC_BLK_XL;
  4669  
  4670               }
  4671  
  4672    }
  4673  
  4674    if (min_value >= limit) min_value = 1;
  4675  
  4676    return min_value + UR(MIN(max_value, limit) - min_value + 1);
  4677  
  4678  }
  4679  
  4680  
  4681  /* Calculate case desirability score to adjust the length of havoc fuzzing.
  4682     A helper function for fuzz_one(). Maybe some of these constants should
  4683     go into config.h. */
  4684  
  4685  static u32 calculate_score(struct queue_entry* q) {
  4686  
  4687    u32 avg_exec_us = total_cal_us / total_cal_cycles;
  4688    u32 avg_bitmap_size = total_bitmap_size / total_bitmap_entries;
  4689    u32 perf_score = 100;
  4690  
  4691    /* Adjust score based on execution speed of this path, compared to the
  4692       global average. Multiplier ranges from 0.1x to 3x. Fast inputs are
  4693       less expensive to fuzz, so we're giving them more air time. */
  4694  
  4695    if (q->exec_us * 0.1 > avg_exec_us) perf_score = 10;
  4696    else if (q->exec_us * 0.25 > avg_exec_us) perf_score = 25;
  4697    else if (q->exec_us * 0.5 > avg_exec_us) perf_score = 50;
  4698    else if (q->exec_us * 0.75 > avg_exec_us) perf_score = 75;
  4699    else if (q->exec_us * 4 < avg_exec_us) perf_score = 300;
  4700    else if (q->exec_us * 3 < avg_exec_us) perf_score = 200;
  4701    else if (q->exec_us * 2 < avg_exec_us) perf_score = 150;
  4702  
  4703    /* Adjust score based on bitmap size. The working theory is that better
  4704       coverage translates to better targets. Multiplier from 0.25x to 3x. */
  4705  
  4706    if (q->bitmap_size * 0.3 > avg_bitmap_size) perf_score *= 3;
  4707    else if (q->bitmap_size * 0.5 > avg_bitmap_size) perf_score *= 2;
  4708    else if (q->bitmap_size * 0.75 > avg_bitmap_size) perf_score *= 1.5;
  4709    else if (q->bitmap_size * 3 < avg_bitmap_size) perf_score *= 0.25;
  4710    else if (q->bitmap_size * 2 < avg_bitmap_size) perf_score *= 0.5;
  4711    else if (q->bitmap_size * 1.5 < avg_bitmap_size) perf_score *= 0.75;
  4712  
  4713    /* Adjust score based on handicap. Handicap is proportional to how late
  4714       in the game we learned about this path. Latecomers are allowed to run
  4715       for a bit longer until they catch up with the rest. */
  4716  
  4717    if (q->handicap >= 4) {
  4718  
  4719      perf_score *= 4;
  4720      q->handicap -= 4;
  4721  
  4722    } else if (q->handicap) {
  4723  
  4724      perf_score *= 2;
  4725      q->handicap--;
  4726  
  4727    }
  4728  
  4729    /* Final adjustment based on input depth, under the assumption that fuzzing
  4730       deeper test cases is more likely to reveal stuff that can't be
  4731       discovered with traditional fuzzers. */
  4732  
  4733    switch (q->depth) {
  4734  
  4735      case 0 ... 3:   break;
  4736      case 4 ... 7:   perf_score *= 2; break;
  4737      case 8 ... 13:  perf_score *= 3; break;
  4738      case 14 ... 25: perf_score *= 4; break;
  4739      default:        perf_score *= 5;
  4740  
  4741    }
  4742  
  4743    /* Make sure that we don't go over limit. */
  4744  
  4745    if (perf_score > HAVOC_MAX_MULT * 100) perf_score = HAVOC_MAX_MULT * 100;
  4746  
  4747    return perf_score;
  4748  
  4749  }
  4750  
  4751  
  4752  /* Helper function to see if a particular change (xor_val = old ^ new) could
  4753     be a product of deterministic bit flips with the lengths and stepovers
  4754     attempted by afl-fuzz. This is used to avoid dupes in some of the
  4755     deterministic fuzzing operations that follow bit flips. We also
  4756     return 1 if xor_val is zero, which implies that the old and attempted new
  4757     values are identical and the exec would be a waste of time. */
  4758  
  4759  static u8 could_be_bitflip(u32 xor_val) {
  4760  
  4761    u32 sh = 0;
  4762  
  4763    if (!xor_val) return 1;
  4764  
  4765    /* Shift left until first bit set. */
  4766  
  4767    while (!(xor_val & 1)) { sh++; xor_val >>= 1; }
  4768  
  4769    /* 1-, 2-, and 4-bit patterns are OK anywhere. */
  4770  
  4771    if (xor_val == 1 || xor_val == 3 || xor_val == 15) return 1;
  4772  
  4773    /* 8-, 16-, and 32-bit patterns are OK only if shift factor is
  4774       divisible by 8, since that's the stepover for these ops. */
  4775  
  4776    if (sh & 7) return 0;
  4777  
  4778    if (xor_val == 0xff || xor_val == 0xffff || xor_val == 0xffffffff)
  4779      return 1;
  4780  
  4781    return 0;
  4782  
  4783  }
  4784  
  4785  
  4786  /* Helper function to see if a particular value is reachable through
  4787     arithmetic operations. Used for similar purposes. */
  4788  
  4789  static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) {
  4790  
  4791    u32 i, ov = 0, nv = 0, diffs = 0;
  4792  
  4793    if (old_val == new_val) return 1;
  4794  
  4795    /* See if one-byte adjustments to any byte could produce this result. */
  4796  
  4797    for (i = 0; i < blen; i++) {
  4798  
  4799      u8 a = old_val >> (8 * i),
  4800         b = new_val >> (8 * i);
  4801  
  4802      if (a != b) { diffs++; ov = a; nv = b; }
  4803  
  4804    }
  4805  
  4806    /* If only one byte differs and the values are within range, return 1. */
  4807  
  4808    if (diffs == 1) {
  4809  
  4810      if ((u8)(ov - nv) <= ARITH_MAX ||
  4811          (u8)(nv - ov) <= ARITH_MAX) return 1;
  4812  
  4813    }
  4814  
  4815    if (blen == 1) return 0;
  4816  
  4817    /* See if two-byte adjustments to any byte would produce this result. */
  4818  
  4819    diffs = 0;
  4820  
  4821    for (i = 0; i < blen / 2; i++) {
  4822  
  4823      u16 a = old_val >> (16 * i),
  4824          b = new_val >> (16 * i);
  4825  
  4826      if (a != b) { diffs++; ov = a; nv = b; }
  4827  
  4828    }
  4829  
  4830    /* If only one word differs and the values are within range, return 1. */
  4831  
  4832    if (diffs == 1) {
  4833  
  4834      if ((u16)(ov - nv) <= ARITH_MAX ||
  4835          (u16)(nv - ov) <= ARITH_MAX) return 1;
  4836  
  4837      ov = SWAP16(ov); nv = SWAP16(nv);
  4838  
  4839      if ((u16)(ov - nv) <= ARITH_MAX ||
  4840          (u16)(nv - ov) <= ARITH_MAX) return 1;
  4841  
  4842    }
  4843  
  4844    /* Finally, let's do the same thing for dwords. */
  4845  
  4846    if (blen == 4) {
  4847  
  4848      if ((u32)(old_val - new_val) <= ARITH_MAX ||
  4849          (u32)(new_val - old_val) <= ARITH_MAX) return 1;
  4850  
  4851      new_val = SWAP32(new_val);
  4852      old_val = SWAP32(old_val);
  4853  
  4854      if ((u32)(old_val - new_val) <= ARITH_MAX ||
  4855          (u32)(new_val - old_val) <= ARITH_MAX) return 1;
  4856  
  4857    }
  4858  
  4859    return 0;
  4860  
  4861  }
  4862  
  4863  
  4864  /* Last but not least, a similar helper to see if insertion of an 
  4865     interesting integer is redundant given the insertions done for
  4866     shorter blen. The last param (check_le) is set if the caller
  4867     already executed LE insertion for current blen and wants to see
  4868     if BE variant passed in new_val is unique. */
  4869  
  4870  static u8 could_be_interest(u32 old_val, u32 new_val, u8 blen, u8 check_le) {
  4871  
  4872    u32 i, j;
  4873  
  4874    if (old_val == new_val) return 1;
  4875  
  4876    /* See if one-byte insertions from interesting_8 over old_val could
  4877       produce new_val. */
  4878  
  4879    for (i = 0; i < blen; i++) {
  4880  
  4881      for (j = 0; j < sizeof(interesting_8); j++) {
  4882  
  4883        u32 tval = (old_val & ~(0xff << (i * 8))) |
  4884                   (((u8)interesting_8[j]) << (i * 8));
  4885  
  4886        if (new_val == tval) return 1;
  4887  
  4888      }
  4889  
  4890    }
  4891  
  4892    /* Bail out unless we're also asked to examine two-byte LE insertions
  4893       as a preparation for BE attempts. */
  4894  
  4895    if (blen == 2 && !check_le) return 0;
  4896  
  4897    /* See if two-byte insertions over old_val could give us new_val. */
  4898  
  4899    for (i = 0; i < blen - 1; i++) {
  4900  
  4901      for (j = 0; j < sizeof(interesting_16) / 2; j++) {
  4902  
  4903        u32 tval = (old_val & ~(0xffff << (i * 8))) |
  4904                   (((u16)interesting_16[j]) << (i * 8));
  4905  
  4906        if (new_val == tval) return 1;
  4907  
  4908        /* Continue here only if blen > 2. */
  4909  
  4910        if (blen > 2) {
  4911  
  4912          tval = (old_val & ~(0xffff << (i * 8))) |
  4913                 (SWAP16(interesting_16[j]) << (i * 8));
  4914  
  4915          if (new_val == tval) return 1;
  4916  
  4917        }
  4918  
  4919      }
  4920  
  4921    }
  4922  
  4923    if (blen == 4 && check_le) {
  4924  
  4925      /* See if four-byte insertions could produce the same result
  4926         (LE only). */
  4927  
  4928      for (j = 0; j < sizeof(interesting_32) / 4; j++)
  4929        if (new_val == (u32)interesting_32[j]) return 1;
  4930  
  4931    }
  4932  
  4933    return 0;
  4934  
  4935  }
  4936  
  4937  
  4938  /* Take the current entry from the queue, fuzz it for a while. This
  4939     function is a tad too long... returns 0 if fuzzed successfully, 1 if
  4940     skipped or bailed out. */
  4941  
  4942  static u8 fuzz_one(char** argv) {
  4943  
  4944    s32 len, fd, temp_len, i, j;
  4945    u8  *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
  4946    u64 havoc_queued,  orig_hit_cnt, new_hit_cnt;
  4947    u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1;
  4948  
  4949    u8  ret_val = 1, doing_det = 0;
  4950  
  4951    u8  a_collect[MAX_AUTO_EXTRA];
  4952    u32 a_len = 0;
  4953  
  4954  #ifdef IGNORE_FINDS
  4955  
  4956    /* In IGNORE_FINDS mode, skip any entries that weren't in the
  4957       initial data set. */
  4958  
  4959    if (queue_cur->depth > 1) return 1;
  4960  
  4961  #else
  4962  
  4963    if (pending_favored) {
  4964  
  4965      /* If we have any favored, non-fuzzed new arrivals in the queue,
  4966         possibly skip to them at the expense of already-fuzzed or non-favored
  4967         cases. */
  4968  
  4969      if ((queue_cur->was_fuzzed || !queue_cur->favored) &&
  4970          UR(100) < SKIP_TO_NEW_PROB) return 1;
  4971  
  4972    } else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) {
  4973  
  4974      /* Otherwise, still possibly skip non-favored cases, albeit less often.
  4975         The odds of skipping stuff are higher for already-fuzzed inputs and
  4976         lower for never-fuzzed entries. */
  4977  
  4978      if (queue_cycle > 1 && !queue_cur->was_fuzzed) {
  4979  
  4980        if (UR(100) < SKIP_NFAV_NEW_PROB) return 1;
  4981  
  4982      } else {
  4983  
  4984        if (UR(100) < SKIP_NFAV_OLD_PROB) return 1;
  4985  
  4986      }
  4987  
  4988    }
  4989  
  4990  #endif /* ^IGNORE_FINDS */
  4991  
  4992    if (not_on_tty) {
  4993      ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...",
  4994           current_entry, queued_paths, unique_crashes);
  4995      fflush(stdout);
  4996    }
  4997  
  4998    /* Map the test case into memory. */
  4999  
  5000    fd = open(queue_cur->fname, O_RDONLY);
  5001  
  5002    if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname);
  5003  
  5004    len = queue_cur->len;
  5005  
  5006    orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
  5007  
  5008    if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", queue_cur->fname);
  5009  
  5010    close(fd);
  5011  
  5012    /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every
  5013       single byte anyway, so it wouldn't give us any performance or memory usage
  5014       benefits. */
  5015  
  5016    out_buf = ck_alloc_nozero(len);
  5017  
  5018    subseq_tmouts = 0;
  5019  
  5020    cur_depth = queue_cur->depth;
  5021  
  5022    /*******************************************
  5023     * CALIBRATION (only if failed earlier on) *
  5024     *******************************************/
  5025  
  5026    if (queue_cur->cal_failed) {
  5027  
  5028      u8 res = FAULT_TMOUT;
  5029  
  5030      if (queue_cur->cal_failed < CAL_CHANCES) {
  5031  
  5032        res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0);
  5033  
  5034        if (res == FAULT_ERROR)
  5035          FATAL("Unable to execute target application");
  5036  
  5037      }
  5038  
  5039      if (stop_soon || res != crash_mode) {
  5040        cur_skipped_paths++;
  5041        goto abandon_entry;
  5042      }
  5043  
  5044    }
  5045  
  5046    /************
  5047     * TRIMMING *
  5048     ************/
  5049  
  5050    if (!dumb_mode && !queue_cur->trim_done) {
  5051  
  5052      u8 res = trim_case(argv, queue_cur, in_buf);
  5053  
  5054      if (res == FAULT_ERROR)
  5055        FATAL("Unable to execute target application");
  5056  
  5057      if (stop_soon) {
  5058        cur_skipped_paths++;
  5059        goto abandon_entry;
  5060      }
  5061  
  5062      /* Don't retry trimming, even if it failed. */
  5063  
  5064      queue_cur->trim_done = 1;
  5065  
  5066      if (len != queue_cur->len) len = queue_cur->len;
  5067  
  5068    }
  5069  
  5070    memcpy(out_buf, in_buf, len);
  5071  
  5072    /*********************
  5073     * PERFORMANCE SCORE *
  5074     *********************/
  5075  
  5076    orig_perf = perf_score = calculate_score(queue_cur);
  5077  
  5078    /* Skip right away if -d is given, if we have done deterministic fuzzing on
  5079       this entry ourselves (was_fuzzed), or if it has gone through deterministic
  5080       testing in earlier, resumed runs (passed_det). */
  5081  
  5082    if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det)
  5083      goto havoc_stage;
  5084  
  5085    /* Skip deterministic fuzzing if exec path checksum puts this out of scope
  5086       for this master instance. */
  5087  
  5088    if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1)
  5089      goto havoc_stage;
  5090  
  5091    doing_det = 1;
  5092  
  5093    /*********************************************
  5094     * SIMPLE BITFLIP (+dictionary construction) *
  5095     *********************************************/
  5096  
  5097  #define FLIP_BIT(_ar, _b) do { \
  5098      u8* _arf = (u8*)(_ar); \
  5099      u32 _bf = (_b); \
  5100      _arf[(_bf) >> 3] ^= (128 >> ((_bf) & 7)); \
  5101    } while (0)
  5102  
  5103    /* Single walking bit. */
  5104  
  5105    stage_short = "flip1";
  5106    stage_max   = len << 3;
  5107    stage_name  = "bitflip 1/1";
  5108  
  5109    stage_val_type = STAGE_VAL_NONE;
  5110  
  5111    orig_hit_cnt = queued_paths + unique_crashes;
  5112  
  5113    prev_cksum = queue_cur->exec_cksum;
  5114  
  5115    for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {
  5116  
  5117      stage_cur_byte = stage_cur >> 3;
  5118  
  5119      FLIP_BIT(out_buf, stage_cur);
  5120  
  5121      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
  5122  
  5123      FLIP_BIT(out_buf, stage_cur);
  5124  
  5125      /* While flipping the least significant bit in every byte, pull of an extra
  5126         trick to detect possible syntax tokens. In essence, the idea is that if
  5127         you have a binary blob like this:
  5128  
  5129         xxxxxxxxIHDRxxxxxxxx
  5130  
  5131         ...and changing the leading and trailing bytes causes variable or no
  5132         changes in program flow, but touching any character in the "IHDR" string
  5133         always produces the same, distinctive path, it's highly likely that
  5134         "IHDR" is an atomically-checked magic value of special significance to
  5135         the fuzzed format.
  5136  
  5137         We do this here, rather than as a separate stage, because it's a nice
  5138         way to keep the operation approximately "free" (i.e., no extra execs).
  5139         
  5140         Empirically, performing the check when flipping the least significant bit
  5141         is advantageous, compared to doing it at the time of more disruptive
  5142         changes, where the program flow may be affected in more violent ways.
  5143  
  5144         The caveat is that we won't generate dictionaries in the -d mode or -S
  5145         mode - but that's probably a fair trade-off.
  5146  
  5147         This won't work particularly well with paths that exhibit variable
  5148         behavior, but fails gracefully, so we'll carry out the checks anyway.
  5149  
  5150        */
  5151  
  5152      if (!dumb_mode && (stage_cur & 7) == 7) {
  5153  
  5154        u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
  5155  
  5156        if (stage_cur == stage_max - 1 && cksum == prev_cksum) {
  5157  
  5158          /* If at end of file and we are still collecting a string, grab the
  5159             final character and force output. */
  5160  
  5161          if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
  5162          a_len++;
  5163  
  5164          if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
  5165            maybe_add_auto(a_collect, a_len);
  5166  
  5167        } else if (cksum != prev_cksum) {
  5168  
  5169          /* Otherwise, if the checksum has changed, see if we have something
  5170             worthwhile queued up, and collect that if the answer is yes. */
  5171  
  5172          if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
  5173            maybe_add_auto(a_collect, a_len);
  5174  
  5175          a_len = 0;
  5176          prev_cksum = cksum;
  5177  
  5178        }
  5179  
  5180        /* Continue collecting string, but only if the bit flip actually made
  5181           any difference - we don't want no-op tokens. */
  5182  
  5183        if (cksum != queue_cur->exec_cksum) {
  5184  
  5185          if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];        
  5186          a_len++;
  5187  
  5188        }
  5189  
  5190      }
  5191  
  5192    }
  5193  
  5194    new_hit_cnt = queued_paths + unique_crashes;
  5195  
  5196    stage_finds[STAGE_FLIP1]  += new_hit_cnt - orig_hit_cnt;
  5197    stage_cycles[STAGE_FLIP1] += stage_max;
  5198  
  5199    /* Two walking bits. */
  5200  
  5201    stage_name  = "bitflip 2/1";
  5202    stage_short = "flip2";
  5203    stage_max   = (len << 3) - 1;
  5204  
  5205    orig_hit_cnt = new_hit_cnt;
  5206  
  5207    for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {
  5208  
  5209      stage_cur_byte = stage_cur >> 3;
  5210  
  5211      FLIP_BIT(out_buf, stage_cur);
  5212      FLIP_BIT(out_buf, stage_cur + 1);
  5213  
  5214      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
  5215  
  5216      FLIP_BIT(out_buf, stage_cur);
  5217      FLIP_BIT(out_buf, stage_cur + 1);
  5218  
  5219    }
  5220  
  5221    new_hit_cnt = queued_paths + unique_crashes;
  5222  
  5223    stage_finds[STAGE_FLIP2]  += new_hit_cnt - orig_hit_cnt;
  5224    stage_cycles[STAGE_FLIP2] += stage_max;
  5225  
  5226    /* Four walking bits. */
  5227  
  5228    stage_name  = "bitflip 4/1";
  5229    stage_short = "flip4";
  5230    stage_max   = (len << 3) - 3;
  5231  
  5232    orig_hit_cnt = new_hit_cnt;
  5233  
  5234    for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {
  5235  
  5236      stage_cur_byte = stage_cur >> 3;
  5237  
  5238      FLIP_BIT(out_buf, stage_cur);
  5239      FLIP_BIT(out_buf, stage_cur + 1);
  5240      FLIP_BIT(out_buf, stage_cur + 2);
  5241      FLIP_BIT(out_buf, stage_cur + 3);
  5242  
  5243      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
  5244  
  5245      FLIP_BIT(out_buf, stage_cur);
  5246      FLIP_BIT(out_buf, stage_cur + 1);
  5247      FLIP_BIT(out_buf, stage_cur + 2);
  5248      FLIP_BIT(out_buf, stage_cur + 3);
  5249  
  5250    }
  5251  
  5252    new_hit_cnt = queued_paths + unique_crashes;
  5253  
  5254    stage_finds[STAGE_FLIP4]  += new_hit_cnt - orig_hit_cnt;
  5255    stage_cycles[STAGE_FLIP4] += stage_max;
  5256  
  5257    /* Effector map setup. These macros calculate:
  5258  
  5259       EFF_APOS      - position of a particular file offset in the map.
  5260       EFF_ALEN      - length of a map with a particular number of bytes.
  5261       EFF_SPAN_ALEN - map span for a sequence of bytes.
  5262  
  5263     */
  5264  
  5265  #define EFF_APOS(_p)          ((_p) >> EFF_MAP_SCALE2)
  5266  #define EFF_REM(_x)           ((_x) & ((1 << EFF_MAP_SCALE2) - 1))
  5267  #define EFF_ALEN(_l)          (EFF_APOS(_l) + !!EFF_REM(_l))
  5268  #define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l) - 1) - EFF_APOS(_p) + 1)
  5269  
  5270    /* Initialize effector map for the next step (see comments below). Always
  5271       flag first and last byte as doing something. */
  5272  
  5273    eff_map    = ck_alloc(EFF_ALEN(len));
  5274    eff_map[0] = 1;
  5275  
  5276    if (EFF_APOS(len - 1) != 0) {
  5277      eff_map[EFF_APOS(len - 1)] = 1;
  5278      eff_cnt++;
  5279    }
  5280  
  5281    /* Walking byte. */
  5282  
  5283    stage_name  = "bitflip 8/8";
  5284    stage_short = "flip8";
  5285    stage_max   = len;
  5286  
  5287    orig_hit_cnt = new_hit_cnt;
  5288  
  5289    for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {
  5290  
  5291      stage_cur_byte = stage_cur;
  5292  
  5293      out_buf[stage_cur] ^= 0xFF;
  5294  
  5295      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
  5296  
  5297      /* We also use this stage to pull off a simple trick: we identify
  5298         bytes that seem to have no effect on the current execution path
  5299         even when fully flipped - and we skip them during more expensive
  5300         deterministic stages, such as arithmetics or known ints. */
  5301  
  5302      if (!eff_map[EFF_APOS(stage_cur)]) {
  5303  
  5304        u32 cksum;
  5305  
  5306        /* If in dumb mode or if the file is very short, just flag everything
  5307           without wasting time on checksums. */
  5308  
  5309        if (!dumb_mode && len >= EFF_MIN_LEN)
  5310          cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
  5311        else
  5312          cksum = ~queue_cur->exec_cksum;
  5313  
  5314        if (cksum != queue_cur->exec_cksum) {
  5315          eff_map[EFF_APOS(stage_cur)] = 1;
  5316          eff_cnt++;
  5317        }
  5318  
  5319      }
  5320  
  5321      out_buf[stage_cur] ^= 0xFF;
  5322  
  5323    }
  5324  
  5325    /* If the effector map is more than EFF_MAX_PERC dense, just flag the
  5326       whole thing as worth fuzzing, since we wouldn't be saving much time
  5327       anyway. */
  5328  
  5329    if (eff_cnt != EFF_ALEN(len) &&
  5330        eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) {
  5331  
  5332      memset(eff_map, 1, EFF_ALEN(len));
  5333  
  5334      blocks_eff_select += EFF_ALEN(len);
  5335  
  5336    } else {
  5337  
  5338      blocks_eff_select += eff_cnt;
  5339  
  5340    }
  5341  
  5342    blocks_eff_total += EFF_ALEN(len);
  5343  
  5344    new_hit_cnt = queued_paths + unique_crashes;
  5345  
  5346    stage_finds[STAGE_FLIP8]  += new_hit_cnt - orig_hit_cnt;
  5347    stage_cycles[STAGE_FLIP8] += stage_max;
  5348  
  5349    /* Two walking bytes. */
  5350  
  5351    if (len < 2) goto skip_bitflip;
  5352  
  5353    stage_name  = "bitflip 16/8";
  5354    stage_short = "flip16";
  5355    stage_cur   = 0;
  5356    stage_max   = len - 1;
  5357  
  5358    orig_hit_cnt = new_hit_cnt;
  5359  
  5360    for (i = 0; i < len - 1; i++) {
  5361  
  5362      /* Let's consult the effector map... */
  5363  
  5364      if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
  5365        stage_max--;
  5366        continue;
  5367      }
  5368  
  5369      stage_cur_byte = i;
  5370  
  5371      *(u16*)(out_buf + i) ^= 0xFFFF;
  5372  
  5373      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
  5374      stage_cur++;
  5375  
  5376      *(u16*)(out_buf + i) ^= 0xFFFF;
  5377  
  5378  
  5379    }
  5380  
  5381    new_hit_cnt = queued_paths + unique_crashes;
  5382  
  5383    stage_finds[STAGE_FLIP16]  += new_hit_cnt - orig_hit_cnt;
  5384    stage_cycles[STAGE_FLIP16] += stage_max;
  5385  
  5386    if (len < 4) goto skip_bitflip;
  5387  
  5388    /* Four walking bytes. */
  5389  
  5390    stage_name  = "bitflip 32/8";
  5391    stage_short = "flip32";
  5392    stage_cur   = 0;
  5393    stage_max   = len - 3;
  5394  
  5395    orig_hit_cnt = new_hit_cnt;
  5396  
  5397    for (i = 0; i < len - 3; i++) {
  5398  
  5399      /* Let's consult the effector map... */
  5400      if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
  5401          !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
  5402        stage_max--;
  5403        continue;
  5404      }
  5405  
  5406      stage_cur_byte = i;
  5407  
  5408      *(u32*)(out_buf + i) ^= 0xFFFFFFFF;
  5409  
  5410      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
  5411      stage_cur++;
  5412  
  5413      *(u32*)(out_buf + i) ^= 0xFFFFFFFF;
  5414  
  5415    }
  5416  
  5417    new_hit_cnt = queued_paths + unique_crashes;
  5418  
  5419    stage_finds[STAGE_FLIP32]  += new_hit_cnt - orig_hit_cnt;
  5420    stage_cycles[STAGE_FLIP32] += stage_max;
  5421  
  5422  skip_bitflip:
  5423  
  5424    if (no_arith) goto skip_arith;
  5425  
  5426    /**********************
  5427     * ARITHMETIC INC/DEC *
  5428     **********************/
  5429  
  5430    /* 8-bit arithmetics. */
  5431  
  5432    stage_name  = "arith 8/8";
  5433    stage_short = "arith8";
  5434    stage_cur   = 0;
  5435    stage_max   = 2 * len * ARITH_MAX;
  5436  
  5437    stage_val_type = STAGE_VAL_LE;
  5438  
  5439    orig_hit_cnt = new_hit_cnt;
  5440  
  5441    for (i = 0; i < len; i++) {
  5442  
  5443      u8 orig = out_buf[i];
  5444  
  5445      /* Let's consult the effector map... */
  5446  
  5447      if (!eff_map[EFF_APOS(i)]) {
  5448        stage_max -= 2 * ARITH_MAX;
  5449        continue;
  5450      }
  5451  
  5452      stage_cur_byte = i;
  5453  
  5454      for (j = 1; j <= ARITH_MAX; j++) {
  5455  
  5456        u8 r = orig ^ (orig + j);
  5457  
  5458        /* Do arithmetic operations only if the result couldn't be a product
  5459           of a bitflip. */
  5460  
  5461        if (!could_be_bitflip(r)) {
  5462  
  5463          stage_cur_val = j;
  5464          out_buf[i] = orig + j;
  5465  
  5466          if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
  5467          stage_cur++;
  5468  
  5469        } else stage_max--;
  5470  
  5471        r =  orig ^ (orig - j);
  5472  
  5473        if (!could_be_bitflip(r)) {
  5474  
  5475          stage_cur_val = -j;
  5476          out_buf[i] = orig - j;
  5477  
  5478          if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
  5479          stage_cur++;
  5480  
  5481        } else stage_max--;
  5482  
  5483        out_buf[i] = orig;
  5484  
  5485      }
  5486  
  5487    }
  5488  
  5489    new_hit_cnt = queued_paths + unique_crashes;
  5490  
  5491    stage_finds[STAGE_ARITH8]  += new_hit_cnt - orig_hit_cnt;
  5492    stage_cycles[STAGE_ARITH8] += stage_max;
  5493  
  5494    /* 16-bit arithmetics, both endians. */
  5495  
  5496    if (len < 2) goto skip_arith;
  5497  
  5498    stage_name  = "arith 16/8";
  5499    stage_short = "arith16";
  5500    stage_cur   = 0;
  5501    stage_max   = 4 * (len - 1) * ARITH_MAX;
  5502  
  5503    orig_hit_cnt = new_hit_cnt;
  5504  
  5505    for (i = 0; i < len - 1; i++) {
  5506  
  5507      u16 orig = *(u16*)(out_buf + i);
  5508  
  5509      /* Let's consult the effector map... */
  5510  
  5511      if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
  5512        stage_max -= 4 * ARITH_MAX;
  5513        continue;
  5514      }
  5515  
  5516      stage_cur_byte = i;
  5517  
  5518      for (j = 1; j <= ARITH_MAX; j++) {
  5519  
  5520        u16 r1 = orig ^ (orig + j),
  5521            r2 = orig ^ (orig - j),
  5522            r3 = orig ^ SWAP16(SWAP16(orig) + j),
  5523            r4 = orig ^ SWAP16(SWAP16(orig) - j);
  5524  
  5525        /* Try little endian addition and subtraction first. Do it only
  5526           if the operation would affect more than one byte (hence the 
  5527           & 0xff overflow checks) and if it couldn't be a product of
  5528           a bitflip. */
  5529  
  5530        stage_val_type = STAGE_VAL_LE; 
  5531  
  5532        if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) {
  5533  
  5534          stage_cur_val = j;
  5535          *(u16*)(out_buf + i) = orig + j;
  5536  
  5537          if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
  5538          stage_cur++;
  5539   
  5540        } else stage_max--;
  5541  
  5542        if ((orig & 0xff) < j && !could_be_bitflip(r2)) {
  5543  
  5544          stage_cur_val = -j;
  5545          *(u16*)(out_buf + i) = orig - j;
  5546  
  5547          if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
  5548          stage_cur++;
  5549  
  5550        } else stage_max--;
  5551  
  5552        /* Big endian comes next. Same deal. */
  5553  
  5554        stage_val_type = STAGE_VAL_BE;
  5555  
  5556  
  5557        if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) {
  5558  
  5559          stage_cur_val = j;
  5560          *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j);
  5561  
  5562          if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
  5563          stage_cur++;
  5564  
  5565        } else stage_max--;
  5566  
  5567        if ((orig >> 8) < j && !could_be_bitflip(r4)) {
  5568  
  5569          stage_cur_val = -j;
  5570          *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j);
  5571  
  5572          if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
  5573          stage_cur++;
  5574  
  5575        } else stage_max--;
  5576  
  5577        *(u16*)(out_buf + i) = orig;
  5578  
  5579      }
  5580  
  5581    }
  5582  
  5583    new_hit_cnt = queued_paths + unique_crashes;
  5584  
  5585    stage_finds[STAGE_ARITH16]  += new_hit_cnt - orig_hit_cnt;
  5586    stage_cycles[STAGE_ARITH16] += stage_max;
  5587  
  5588    /* 32-bit arithmetics, both endians. */
  5589  
  5590    if (len < 4) goto skip_arith;
  5591  
  5592    stage_name  = "arith 32/8";
  5593    stage_short = "arith32";
  5594    stage_cur   = 0;
  5595    stage_max   = 4 * (len - 3) * ARITH_MAX;
  5596  
  5597    orig_hit_cnt = new_hit_cnt;
  5598  
  5599    for (i = 0; i < len - 3; i++) {
  5600  
  5601      u32 orig = *(u32*)(out_buf + i);
  5602  
  5603      /* Let's consult the effector map... */
  5604  
  5605      if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
  5606          !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
  5607        stage_max -= 4 * ARITH_MAX;
  5608        continue;
  5609      }
  5610  
  5611      stage_cur_byte = i;
  5612  
  5613      for (j = 1; j <= ARITH_MAX; j++) {
  5614  
  5615        u32 r1 = orig ^ (orig + j),
  5616            r2 = orig ^ (orig - j),
  5617            r3 = orig ^ SWAP32(SWAP32(orig) + j),
  5618            r4 = orig ^ SWAP32(SWAP32(orig) - j);
  5619  
  5620        /* Little endian first. Same deal as with 16-bit: we only want to
  5621           try if the operation would have effect on more than two bytes. */
  5622  
  5623        stage_val_type = STAGE_VAL_LE;
  5624  
  5625        if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) {
  5626  
  5627          stage_cur_val = j;
  5628          *(u32*)(out_buf + i) = orig + j;
  5629  
  5630          if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
  5631          stage_cur++;
  5632  
  5633        } else stage_max--;
  5634  
  5635        if ((orig & 0xffff) < j && !could_be_bitflip(r2)) {
  5636  
  5637          stage_cur_val = -j;
  5638          *(u32*)(out_buf + i) = orig - j;
  5639  
  5640          if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
  5641          stage_cur++;
  5642  
  5643        } else stage_max--;
  5644  
  5645        /* Big endian next. */
  5646  
  5647        stage_val_type = STAGE_VAL_BE;
  5648  
  5649        if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) {
  5650  
  5651          stage_cur_val = j;
  5652          *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j);
  5653  
  5654          if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
  5655          stage_cur++;
  5656  
  5657        } else stage_max--;
  5658  
  5659        if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) {
  5660  
  5661          stage_cur_val = -j;
  5662          *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j);
  5663  
  5664          if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
  5665          stage_cur++;
  5666  
  5667        } else stage_max--;
  5668  
  5669        *(u32*)(out_buf + i) = orig;
  5670  
  5671      }
  5672  
  5673    }
  5674  
  5675    new_hit_cnt = queued_paths + unique_crashes;
  5676  
  5677    stage_finds[STAGE_ARITH32]  += new_hit_cnt - orig_hit_cnt;
  5678    stage_cycles[STAGE_ARITH32] += stage_max;
  5679  
  5680  skip_arith:
  5681  
  5682    /**********************
  5683     * INTERESTING VALUES *
  5684     **********************/
  5685  
  5686    stage_name  = "interest 8/8";
  5687    stage_short = "int8";
  5688    stage_cur   = 0;
  5689    stage_max   = len * sizeof(interesting_8);
  5690  
  5691    stage_val_type = STAGE_VAL_LE;
  5692  
  5693    orig_hit_cnt = new_hit_cnt;
  5694  
  5695    /* Setting 8-bit integers. */
  5696  
  5697    for (i = 0; i < len; i++) {
  5698  
  5699      u8 orig = out_buf[i];
  5700  
  5701      /* Let's consult the effector map... */
  5702  
  5703      if (!eff_map[EFF_APOS(i)]) {
  5704        stage_max -= sizeof(interesting_8);
  5705        continue;
  5706      }
  5707  
  5708      stage_cur_byte = i;
  5709  
  5710      for (j = 0; j < sizeof(interesting_8); j++) {
  5711  
  5712        /* Skip if the value could be a product of bitflips or arithmetics. */
  5713  
  5714        if (could_be_bitflip(orig ^ (u8)interesting_8[j]) ||
  5715            could_be_arith(orig, (u8)interesting_8[j], 1)) {
  5716          stage_max--;
  5717          continue;
  5718        }
  5719  
  5720        stage_cur_val = interesting_8[j];
  5721        out_buf[i] = interesting_8[j];
  5722  
  5723        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
  5724  
  5725        out_buf[i] = orig;
  5726        stage_cur++;
  5727  
  5728      }
  5729  
  5730    }
  5731  
  5732    new_hit_cnt = queued_paths + unique_crashes;
  5733  
  5734    stage_finds[STAGE_INTEREST8]  += new_hit_cnt - orig_hit_cnt;
  5735    stage_cycles[STAGE_INTEREST8] += stage_max;
  5736  
  5737    /* Setting 16-bit integers, both endians. */
  5738  
  5739    if (no_arith || len < 2) goto skip_interest;
  5740  
  5741    stage_name  = "interest 16/8";
  5742    stage_short = "int16";
  5743    stage_cur   = 0;
  5744    stage_max   = 2 * (len - 1) * (sizeof(interesting_16) >> 1);
  5745  
  5746    orig_hit_cnt = new_hit_cnt;
  5747  
  5748    for (i = 0; i < len - 1; i++) {
  5749  
  5750      u16 orig = *(u16*)(out_buf + i);
  5751  
  5752      /* Let's consult the effector map... */
  5753  
  5754      if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
  5755        stage_max -= sizeof(interesting_16);
  5756        continue;
  5757      }
  5758  
  5759      stage_cur_byte = i;
  5760  
  5761      for (j = 0; j < sizeof(interesting_16) / 2; j++) {
  5762  
  5763        stage_cur_val = interesting_16[j];
  5764  
  5765        /* Skip if this could be a product of a bitflip, arithmetics,
  5766           or single-byte interesting value insertion. */
  5767  
  5768        if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) &&
  5769            !could_be_arith(orig, (u16)interesting_16[j], 2) &&
  5770            !could_be_interest(orig, (u16)interesting_16[j], 2, 0)) {
  5771  
  5772          stage_val_type = STAGE_VAL_LE;
  5773  
  5774          *(u16*)(out_buf + i) = interesting_16[j];
  5775  
  5776          if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
  5777          stage_cur++;
  5778  
  5779        } else stage_max--;
  5780  
  5781        if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) &&
  5782            !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) &&
  5783            !could_be_arith(orig, SWAP16(interesting_16[j]), 2) &&
  5784            !could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) {
  5785  
  5786          stage_val_type = STAGE_VAL_BE;
  5787  
  5788          *(u16*)(out_buf + i) = SWAP16(interesting_16[j]);
  5789          if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
  5790          stage_cur++;
  5791  
  5792        } else stage_max--;
  5793  
  5794      }
  5795  
  5796      *(u16*)(out_buf + i) = orig;
  5797  
  5798    }
  5799  
  5800    new_hit_cnt = queued_paths + unique_crashes;
  5801  
  5802    stage_finds[STAGE_INTEREST16]  += new_hit_cnt - orig_hit_cnt;
  5803    stage_cycles[STAGE_INTEREST16] += stage_max;
  5804  
  5805    if (len < 4) goto skip_interest;
  5806  
  5807    /* Setting 32-bit integers, both endians. */
  5808  
  5809    stage_name  = "interest 32/8";
  5810    stage_short = "int32";
  5811    stage_cur   = 0;
  5812    stage_max   = 2 * (len - 3) * (sizeof(interesting_32) >> 2);
  5813  
  5814    orig_hit_cnt = new_hit_cnt;
  5815  
  5816    for (i = 0; i < len - 3; i++) {
  5817  
  5818      u32 orig = *(u32*)(out_buf + i);
  5819  
  5820      /* Let's consult the effector map... */
  5821  
  5822      if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
  5823          !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
  5824        stage_max -= sizeof(interesting_32) >> 1;
  5825        continue;
  5826      }
  5827  
  5828      stage_cur_byte = i;
  5829  
  5830      for (j = 0; j < sizeof(interesting_32) / 4; j++) {
  5831  
  5832        stage_cur_val = interesting_32[j];
  5833  
  5834        /* Skip if this could be a product of a bitflip, arithmetics,
  5835           or word interesting value insertion. */
  5836  
  5837        if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) &&
  5838            !could_be_arith(orig, interesting_32[j], 4) &&
  5839            !could_be_interest(orig, interesting_32[j], 4, 0)) {
  5840  
  5841          stage_val_type = STAGE_VAL_LE;
  5842  
  5843          *(u32*)(out_buf + i) = interesting_32[j];
  5844  
  5845          if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
  5846          stage_cur++;
  5847  
  5848        } else stage_max--;
  5849  
  5850        if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) &&
  5851            !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) &&
  5852            !could_be_arith(orig, SWAP32(interesting_32[j]), 4) &&
  5853            !could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) {
  5854  
  5855          stage_val_type = STAGE_VAL_BE;
  5856  
  5857          *(u32*)(out_buf + i) = SWAP32(interesting_32[j]);
  5858          if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
  5859          stage_cur++;
  5860  
  5861        } else stage_max--;
  5862  
  5863      }
  5864  
  5865      *(u32*)(out_buf + i) = orig;
  5866  
  5867    }
  5868  
  5869    new_hit_cnt = queued_paths + unique_crashes;
  5870  
  5871    stage_finds[STAGE_INTEREST32]  += new_hit_cnt - orig_hit_cnt;
  5872    stage_cycles[STAGE_INTEREST32] += stage_max;
  5873  
  5874  skip_interest:
  5875  
  5876    /********************
  5877     * DICTIONARY STUFF *
  5878     ********************/
  5879  
  5880    if (!extras_cnt) goto skip_user_extras;
  5881  
  5882    /* Overwrite with user-supplied extras. */
  5883  
  5884    stage_name  = "user extras (over)";
  5885    stage_short = "ext_UO";
  5886    stage_cur   = 0;
  5887    stage_max   = extras_cnt * len;
  5888  
  5889    stage_val_type = STAGE_VAL_NONE;
  5890  
  5891    orig_hit_cnt = new_hit_cnt;
  5892  
  5893    for (i = 0; i < len; i++) {
  5894  
  5895      u32 last_len = 0;
  5896  
  5897      stage_cur_byte = i;
  5898  
  5899      /* Extras are sorted by size, from smallest to largest. This means
  5900         that we don't have to worry about restoring the buffer in
  5901         between writes at a particular offset determined by the outer
  5902         loop. */
  5903  
  5904      for (j = 0; j < extras_cnt; j++) {
  5905  
  5906        /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also
  5907           skip them if there's no room to insert the payload, if the token
  5908           is redundant, or if its entire span has no bytes set in the effector
  5909           map. */
  5910  
  5911        if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) ||
  5912            extras[j].len > len - i ||
  5913            !memcmp(extras[j].data, out_buf + i, extras[j].len) ||
  5914            !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) {
  5915  
  5916          stage_max--;
  5917          continue;
  5918  
  5919        }
  5920  
  5921        last_len = extras[j].len;
  5922        memcpy(out_buf + i, extras[j].data, last_len);
  5923  
  5924        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
  5925  
  5926        stage_cur++;
  5927  
  5928      }
  5929  
  5930      /* Restore all the clobbered memory. */
  5931      memcpy(out_buf + i, in_buf + i, last_len);
  5932  
  5933    }
  5934  
  5935    new_hit_cnt = queued_paths + unique_crashes;
  5936  
  5937    stage_finds[STAGE_EXTRAS_UO]  += new_hit_cnt - orig_hit_cnt;
  5938    stage_cycles[STAGE_EXTRAS_UO] += stage_max;
  5939  
  5940    /* Insertion of user-supplied extras. */
  5941  
  5942    stage_name  = "user extras (insert)";
  5943    stage_short = "ext_UI";
  5944    stage_cur   = 0;
  5945    stage_max   = extras_cnt * len;
  5946  
  5947    orig_hit_cnt = new_hit_cnt;
  5948  
  5949    ex_tmp = ck_alloc(len + MAX_DICT_FILE);
  5950  
  5951    for (i = 0; i <= len; i++) {
  5952  
  5953      stage_cur_byte = i;
  5954  
  5955      for (j = 0; j < extras_cnt; j++) {
  5956  
  5957        if (len + extras[j].len > MAX_FILE) {
  5958          stage_max--; 
  5959          continue;
  5960        }
  5961  
  5962        /* Insert token */
  5963        memcpy(ex_tmp + i, extras[j].data, extras[j].len);
  5964  
  5965        /* Copy tail */
  5966        memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i);
  5967  
  5968        if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) {
  5969          ck_free(ex_tmp);
  5970          goto abandon_entry;
  5971        }
  5972  
  5973        stage_cur++;
  5974  
  5975      }
  5976  
  5977      /* Copy head */
  5978      ex_tmp[i] = out_buf[i];
  5979  
  5980    }
  5981  
  5982    ck_free(ex_tmp);
  5983  
  5984    new_hit_cnt = queued_paths + unique_crashes;
  5985  
  5986    stage_finds[STAGE_EXTRAS_UI]  += new_hit_cnt - orig_hit_cnt;
  5987    stage_cycles[STAGE_EXTRAS_UI] += stage_max;
  5988  
  5989  skip_user_extras:
  5990  
  5991    if (!a_extras_cnt) goto skip_extras;
  5992  
  5993    stage_name  = "auto extras (over)";
  5994    stage_short = "ext_AO";
  5995    stage_cur   = 0;
  5996    stage_max   = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len;
  5997  
  5998    stage_val_type = STAGE_VAL_NONE;
  5999  
  6000    orig_hit_cnt = new_hit_cnt;
  6001  
  6002    for (i = 0; i < len; i++) {
  6003  
  6004      u32 last_len = 0;
  6005  
  6006      stage_cur_byte = i;
  6007  
  6008      for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); j++) {
  6009  
  6010        /* See the comment in the earlier code; extras are sorted by size. */
  6011  
  6012        if (a_extras[j].len > len - i ||
  6013            !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) ||
  6014            !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len))) {
  6015  
  6016          stage_max--;
  6017          continue;
  6018  
  6019        }
  6020  
  6021        last_len = a_extras[j].len;
  6022        memcpy(out_buf + i, a_extras[j].data, last_len);
  6023  
  6024        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
  6025  
  6026        stage_cur++;
  6027  
  6028      }
  6029  
  6030      /* Restore all the clobbered memory. */
  6031      memcpy(out_buf + i, in_buf + i, last_len);
  6032  
  6033    }
  6034  
  6035    new_hit_cnt = queued_paths + unique_crashes;
  6036  
  6037    stage_finds[STAGE_EXTRAS_AO]  += new_hit_cnt - orig_hit_cnt;
  6038    stage_cycles[STAGE_EXTRAS_AO] += stage_max;
  6039  
  6040  skip_extras:
  6041  
  6042    /* If we made this to here without jumping to havoc_stage or abandon_entry,
  6043       we're properly done with deterministic steps and can mark it as such
  6044       in the .state/ directory. */
  6045  
  6046    if (!queue_cur->passed_det) mark_as_det_done(queue_cur);
  6047  
  6048    /****************
  6049     * RANDOM HAVOC *
  6050     ****************/
  6051  
  6052  havoc_stage:
  6053  
  6054    stage_cur_byte = -1;
  6055  
  6056    /* The havoc stage mutation code is also invoked when splicing files; if the
  6057       splice_cycle variable is set, generate different descriptions and such. */
  6058  
  6059    if (!splice_cycle) {
  6060  
  6061      stage_name  = "havoc";
  6062      stage_short = "havoc";
  6063      stage_max   = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
  6064                    perf_score / havoc_div / 100;
  6065  
  6066    } else {
  6067  
  6068      static u8 tmp[32];
  6069  
  6070      perf_score = orig_perf;
  6071  
  6072      sprintf(tmp, "splice %u", splice_cycle);
  6073      stage_name  = tmp;
  6074      stage_short = "splice";
  6075      stage_max   = SPLICE_HAVOC * perf_score / havoc_div / 100;
  6076  
  6077    }
  6078  
  6079    if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN;
  6080  
  6081    temp_len = len;
  6082  
  6083    orig_hit_cnt = queued_paths + unique_crashes;
  6084  
  6085    havoc_queued = queued_paths;
  6086  
  6087    /* We essentially just do several thousand runs (depending on perf_score)
  6088       where we take the input file and make random stacked tweaks. */
  6089  
  6090    for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {
  6091  
  6092      u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2));
  6093  
  6094      stage_cur_val = use_stacking;
  6095   
  6096      for (i = 0; i < use_stacking; i++) {
  6097  
  6098        switch (UR(15 + ((extras_cnt + a_extras_cnt) ? 2 : 0))) {
  6099  
  6100          case 0:
  6101  
  6102            /* Flip a single bit somewhere. Spooky! */
  6103  
  6104            FLIP_BIT(out_buf, UR(temp_len << 3));
  6105            break;
  6106  
  6107          case 1: 
  6108  
  6109            /* Set byte to interesting value. */
  6110  
  6111            out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))];
  6112            break;
  6113  
  6114          case 2:
  6115  
  6116            /* Set word to interesting value, randomly choosing endian. */
  6117  
  6118            if (temp_len < 2) break;
  6119  
  6120            if (UR(2)) {
  6121  
  6122              *(u16*)(out_buf + UR(temp_len - 1)) =
  6123                interesting_16[UR(sizeof(interesting_16) >> 1)];
  6124  
  6125            } else {
  6126  
  6127              *(u16*)(out_buf + UR(temp_len - 1)) = SWAP16(
  6128                interesting_16[UR(sizeof(interesting_16) >> 1)]);
  6129  
  6130            }
  6131  
  6132            break;
  6133  
  6134          case 3:
  6135  
  6136            /* Set dword to interesting value, randomly choosing endian. */
  6137  
  6138            if (temp_len < 4) break;
  6139  
  6140            if (UR(2)) {
  6141    
  6142              *(u32*)(out_buf + UR(temp_len - 3)) =
  6143                interesting_32[UR(sizeof(interesting_32) >> 2)];
  6144  
  6145            } else {
  6146  
  6147              *(u32*)(out_buf + UR(temp_len - 3)) = SWAP32(
  6148                interesting_32[UR(sizeof(interesting_32) >> 2)]);
  6149  
  6150            }
  6151  
  6152            break;
  6153  
  6154          case 4:
  6155  
  6156            /* Randomly subtract from byte. */
  6157  
  6158            out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX);
  6159            break;
  6160  
  6161          case 5:
  6162  
  6163            /* Randomly add to byte. */
  6164  
  6165            out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX);
  6166            break;
  6167  
  6168          case 6:
  6169  
  6170            /* Randomly subtract from word, random endian. */
  6171  
  6172            if (temp_len < 2) break;
  6173  
  6174            if (UR(2)) {
  6175  
  6176              u32 pos = UR(temp_len - 1);
  6177  
  6178              *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
  6179  
  6180            } else {
  6181  
  6182              u32 pos = UR(temp_len - 1);
  6183              u16 num = 1 + UR(ARITH_MAX);
  6184  
  6185              *(u16*)(out_buf + pos) =
  6186                SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num);
  6187  
  6188            }
  6189  
  6190            break;
  6191  
  6192          case 7:
  6193  
  6194            /* Randomly add to word, random endian. */
  6195  
  6196            if (temp_len < 2) break;
  6197  
  6198            if (UR(2)) {
  6199  
  6200              u32 pos = UR(temp_len - 1);
  6201  
  6202              *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX);
  6203  
  6204            } else {
  6205  
  6206              u32 pos = UR(temp_len - 1);
  6207              u16 num = 1 + UR(ARITH_MAX);
  6208  
  6209              *(u16*)(out_buf + pos) =
  6210                SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num);
  6211  
  6212            }
  6213  
  6214            break;
  6215  
  6216          case 8:
  6217  
  6218            /* Randomly subtract from dword, random endian. */
  6219  
  6220            if (temp_len < 4) break;
  6221  
  6222            if (UR(2)) {
  6223  
  6224              u32 pos = UR(temp_len - 3);
  6225  
  6226              *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
  6227  
  6228            } else {
  6229  
  6230              u32 pos = UR(temp_len - 3);
  6231              u32 num = 1 + UR(ARITH_MAX);
  6232  
  6233              *(u32*)(out_buf + pos) =
  6234                SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num);
  6235  
  6236            }
  6237  
  6238            break;
  6239  
  6240          case 9:
  6241  
  6242            /* Randomly add to dword, random endian. */
  6243  
  6244            if (temp_len < 4) break;
  6245  
  6246            if (UR(2)) {
  6247  
  6248              u32 pos = UR(temp_len - 3);
  6249  
  6250              *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX);
  6251  
  6252            } else {
  6253  
  6254              u32 pos = UR(temp_len - 3);
  6255              u32 num = 1 + UR(ARITH_MAX);
  6256  
  6257              *(u32*)(out_buf + pos) =
  6258                SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num);
  6259  
  6260            }
  6261  
  6262            break;
  6263  
  6264          case 10:
  6265  
  6266            /* Just set a random byte to a random value. Because,
  6267               why not. We use XOR with 1-255 to eliminate the
  6268               possibility of a no-op. */
  6269  
  6270            out_buf[UR(temp_len)] ^= 1 + UR(255);
  6271            break;
  6272  
  6273          case 11 ... 12: {
  6274  
  6275              /* Delete bytes. We're making this a bit more likely
  6276                 than insertion (the next option) in hopes of keeping
  6277                 files reasonably small. */
  6278  
  6279              u32 del_from, del_len;
  6280  
  6281              if (temp_len < 2) break;
  6282  
  6283              /* Don't delete too much. */
  6284  
  6285              del_len = choose_block_len(temp_len - 1);
  6286  
  6287              del_from = UR(temp_len - del_len + 1);
  6288  
  6289              memmove(out_buf + del_from, out_buf + del_from + del_len,
  6290                      temp_len - del_from - del_len);
  6291  
  6292              temp_len -= del_len;
  6293  
  6294              break;
  6295  
  6296            }
  6297  
  6298          case 13:
  6299  
  6300            if (temp_len + HAVOC_BLK_XL < MAX_FILE) {
  6301  
  6302              /* Clone bytes (75%) or insert a block of constant bytes (25%). */
  6303  
  6304              u8  actually_clone = UR(4);
  6305              u32 clone_from, clone_to, clone_len;
  6306              u8* new_buf;
  6307  
  6308              if (actually_clone) {
  6309  
  6310                clone_len  = choose_block_len(temp_len);
  6311                clone_from = UR(temp_len - clone_len + 1);
  6312  
  6313              } else {
  6314  
  6315                clone_len = choose_block_len(HAVOC_BLK_XL);
  6316                clone_from = 0;
  6317  
  6318              }
  6319  
  6320              clone_to   = UR(temp_len);
  6321  
  6322              new_buf = ck_alloc_nozero(temp_len + clone_len);
  6323  
  6324              /* Head */
  6325  
  6326              memcpy(new_buf, out_buf, clone_to);
  6327  
  6328              /* Inserted part */
  6329  
  6330              if (actually_clone)
  6331                memcpy(new_buf + clone_to, out_buf + clone_from, clone_len);
  6332              else
  6333                memset(new_buf + clone_to,
  6334                       UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len);
  6335  
  6336              /* Tail */
  6337              memcpy(new_buf + clone_to + clone_len, out_buf + clone_to,
  6338                     temp_len - clone_to);
  6339  
  6340              ck_free(out_buf);
  6341              out_buf = new_buf;
  6342              temp_len += clone_len;
  6343  
  6344            }
  6345  
  6346            break;
  6347  
  6348          case 14: {
  6349  
  6350              /* Overwrite bytes with a randomly selected chunk (75%) or fixed
  6351                 bytes (25%). */
  6352  
  6353              u32 copy_from, copy_to, copy_len;
  6354  
  6355              if (temp_len < 2) break;
  6356  
  6357              copy_len  = choose_block_len(temp_len - 1);
  6358  
  6359              copy_from = UR(temp_len - copy_len + 1);
  6360              copy_to   = UR(temp_len - copy_len + 1);
  6361  
  6362              if (UR(4)) {
  6363  
  6364                if (copy_from != copy_to)
  6365                  memmove(out_buf + copy_to, out_buf + copy_from, copy_len);
  6366  
  6367              } else memset(out_buf + copy_to,
  6368                            UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len);
  6369  
  6370              break;
  6371  
  6372            }
  6373  
  6374          /* Values 15 and 16 can be selected only if there are any extras
  6375             present in the dictionaries. */
  6376  
  6377          case 15: {
  6378  
  6379              /* Overwrite bytes with an extra. */
  6380  
  6381              if (!extras_cnt || (a_extras_cnt && UR(2))) {
  6382  
  6383                /* No user-specified extras or odds in our favor. Let's use an
  6384                   auto-detected one. */
  6385  
  6386                u32 use_extra = UR(a_extras_cnt);
  6387                u32 extra_len = a_extras[use_extra].len;
  6388                u32 insert_at;
  6389  
  6390                if (extra_len > temp_len) break;
  6391  
  6392                insert_at = UR(temp_len - extra_len + 1);
  6393                memcpy(out_buf + insert_at, a_extras[use_extra].data, extra_len);
  6394  
  6395              } else {
  6396  
  6397                /* No auto extras or odds in our favor. Use the dictionary. */
  6398  
  6399                u32 use_extra = UR(extras_cnt);
  6400                u32 extra_len = extras[use_extra].len;
  6401                u32 insert_at;
  6402  
  6403                if (extra_len > temp_len) break;
  6404  
  6405                insert_at = UR(temp_len - extra_len + 1);
  6406                memcpy(out_buf + insert_at, extras[use_extra].data, extra_len);
  6407  
  6408              }
  6409  
  6410              break;
  6411  
  6412            }
  6413  
  6414          case 16: {
  6415  
  6416              u32 use_extra, extra_len, insert_at = UR(temp_len + 1);
  6417              u8* new_buf;
  6418  
  6419              /* Insert an extra. Do the same dice-rolling stuff as for the
  6420                 previous case. */
  6421  
  6422              if (!extras_cnt || (a_extras_cnt && UR(2))) {
  6423  
  6424                use_extra = UR(a_extras_cnt);
  6425                extra_len = a_extras[use_extra].len;
  6426  
  6427                if (temp_len + extra_len >= MAX_FILE) break;
  6428  
  6429                new_buf = ck_alloc_nozero(temp_len + extra_len);
  6430  
  6431                /* Head */
  6432                memcpy(new_buf, out_buf, insert_at);
  6433  
  6434                /* Inserted part */
  6435                memcpy(new_buf + insert_at, a_extras[use_extra].data, extra_len);
  6436  
  6437              } else {
  6438  
  6439                use_extra = UR(extras_cnt);
  6440                extra_len = extras[use_extra].len;
  6441  
  6442                if (temp_len + extra_len >= MAX_FILE) break;
  6443  
  6444                new_buf = ck_alloc_nozero(temp_len + extra_len);
  6445  
  6446                /* Head */
  6447                memcpy(new_buf, out_buf, insert_at);
  6448  
  6449                /* Inserted part */
  6450                memcpy(new_buf + insert_at, extras[use_extra].data, extra_len);
  6451  
  6452              }
  6453  
  6454              /* Tail */
  6455              memcpy(new_buf + insert_at + extra_len, out_buf + insert_at,
  6456                     temp_len - insert_at);
  6457  
  6458              ck_free(out_buf);
  6459              out_buf   = new_buf;
  6460              temp_len += extra_len;
  6461  
  6462              break;
  6463  
  6464            }
  6465  
  6466        }
  6467  
  6468      }
  6469  
  6470      if (common_fuzz_stuff(argv, out_buf, temp_len))
  6471        goto abandon_entry;
  6472  
  6473      /* out_buf might have been mangled a bit, so let's restore it to its
  6474         original size and shape. */
  6475  
  6476      if (temp_len < len) out_buf = ck_realloc(out_buf, len);
  6477      temp_len = len;
  6478      memcpy(out_buf, in_buf, len);
  6479  
  6480      /* If we're finding new stuff, let's run for a bit longer, limits
  6481         permitting. */
  6482  
  6483      if (queued_paths != havoc_queued) {
  6484  
  6485        if (perf_score <= HAVOC_MAX_MULT * 100) {
  6486          stage_max  *= 2;
  6487          perf_score *= 2;
  6488        }
  6489  
  6490        havoc_queued = queued_paths;
  6491  
  6492      }
  6493  
  6494    }
  6495  
  6496    new_hit_cnt = queued_paths + unique_crashes;
  6497  
  6498    if (!splice_cycle) {
  6499      stage_finds[STAGE_HAVOC]  += new_hit_cnt - orig_hit_cnt;
  6500      stage_cycles[STAGE_HAVOC] += stage_max;
  6501    } else {
  6502      stage_finds[STAGE_SPLICE]  += new_hit_cnt - orig_hit_cnt;
  6503      stage_cycles[STAGE_SPLICE] += stage_max;
  6504    }
  6505  
  6506  #ifndef IGNORE_FINDS
  6507  
  6508    /************
  6509     * SPLICING *
  6510     ************/
  6511  
  6512    /* This is a last-resort strategy triggered by a full round with no findings.
  6513       It takes the current input file, randomly selects another input, and
  6514       splices them together at some offset, then relies on the havoc
  6515       code to mutate that blob. */
  6516  
  6517  retry_splicing:
  6518  
  6519    if (use_splicing && splice_cycle++ < SPLICE_CYCLES &&
  6520        queued_paths > 1 && queue_cur->len > 1) {
  6521  
  6522      struct queue_entry* target;
  6523      u32 tid, split_at;
  6524      u8* new_buf;
  6525      s32 f_diff, l_diff;
  6526  
  6527      /* First of all, if we've modified in_buf for havoc, let's clean that
  6528         up... */
  6529  
  6530      if (in_buf != orig_in) {
  6531        ck_free(in_buf);
  6532        in_buf = orig_in;
  6533        len = queue_cur->len;
  6534      }
  6535  
  6536      /* Pick a random queue entry and seek to it. Don't splice with yourself. */
  6537  
  6538      do { tid = UR(queued_paths); } while (tid == current_entry);
  6539  
  6540      splicing_with = tid;
  6541      target = queue;
  6542  
  6543      while (tid >= 100) { target = target->next_100; tid -= 100; }
  6544      while (tid--) target = target->next;
  6545  
  6546      /* Make sure that the target has a reasonable length. */
  6547  
  6548      while (target && (target->len < 2 || target == queue_cur)) {
  6549        target = target->next;
  6550        splicing_with++;
  6551      }
  6552  
  6553      if (!target) goto retry_splicing;
  6554  
  6555      /* Read the testcase into a new buffer. */
  6556  
  6557      fd = open(target->fname, O_RDONLY);
  6558  
  6559      if (fd < 0) PFATAL("Unable to open '%s'", target->fname);
  6560  
  6561      new_buf = ck_alloc_nozero(target->len);
  6562  
  6563      ck_read(fd, new_buf, target->len, target->fname);
  6564  
  6565      close(fd);
  6566  
  6567      /* Find a suitable splicing location, somewhere between the first and
  6568         the last differing byte. Bail out if the difference is just a single
  6569         byte or so. */
  6570  
  6571      locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff);
  6572  
  6573      if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) {
  6574        ck_free(new_buf);
  6575        goto retry_splicing;
  6576      }
  6577  
  6578      /* Split somewhere between the first and last differing byte. */
  6579  
  6580      split_at = f_diff + UR(l_diff - f_diff);
  6581  
  6582      /* Do the thing. */
  6583  
  6584      len = target->len;
  6585      memcpy(new_buf, in_buf, split_at);
  6586      in_buf = new_buf;
  6587  
  6588      ck_free(out_buf);
  6589      out_buf = ck_alloc_nozero(len);
  6590      memcpy(out_buf, in_buf, len);
  6591  
  6592      goto havoc_stage;
  6593  
  6594    }
  6595  
  6596  #endif /* !IGNORE_FINDS */
  6597  
  6598    ret_val = 0;
  6599  
  6600  abandon_entry:
  6601  
  6602    splicing_with = -1;
  6603  
  6604    /* Update pending_not_fuzzed count if we made it through the calibration
  6605       cycle and have not seen this entry before. */
  6606  
  6607    if (!stop_soon && !queue_cur->cal_failed && !queue_cur->was_fuzzed) {
  6608      queue_cur->was_fuzzed = 1;
  6609      pending_not_fuzzed--;
  6610      if (queue_cur->favored) pending_favored--;
  6611    }
  6612  
  6613    munmap(orig_in, queue_cur->len);
  6614  
  6615    if (in_buf != orig_in) ck_free(in_buf);
  6616    ck_free(out_buf);
  6617    ck_free(eff_map);
  6618  
  6619    return ret_val;
  6620  
  6621  #undef FLIP_BIT
  6622  
  6623  }
  6624  
  6625  
  6626  /* Grab interesting test cases from other fuzzers. */
  6627  
  6628  static void sync_fuzzers(char** argv) {
  6629  
  6630    DIR* sd;
  6631    struct dirent* sd_ent;
  6632    u32 sync_cnt = 0;
  6633  
  6634    sd = opendir(sync_dir);
  6635    if (!sd) PFATAL("Unable to open '%s'", sync_dir);
  6636  
  6637    stage_max = stage_cur = 0;
  6638    cur_depth = 0;
  6639  
  6640    /* Look at the entries created for every other fuzzer in the sync directory. */
  6641  
  6642    while ((sd_ent = readdir(sd))) {
  6643  
  6644      static u8 stage_tmp[128];
  6645  
  6646      DIR* qd;
  6647      struct dirent* qd_ent;
  6648      u8 *qd_path, *qd_synced_path;
  6649      u32 min_accept = 0, next_min_accept;
  6650  
  6651      s32 id_fd;
  6652  
  6653      /* Skip dot files and our own output directory. */
  6654  
  6655      if (sd_ent->d_name[0] == '.' || !strcmp(sync_id, sd_ent->d_name)) continue;
  6656  
  6657      /* Skip anything that doesn't have a queue/ subdirectory. */
  6658  
  6659      qd_path = alloc_printf("%s/%s/queue", sync_dir, sd_ent->d_name);
  6660  
  6661      if (!(qd = opendir(qd_path))) {
  6662        ck_free(qd_path);
  6663        continue;
  6664      }
  6665  
  6666      /* Retrieve the ID of the last seen test case. */
  6667  
  6668      qd_synced_path = alloc_printf("%s/.synced/%s", out_dir, sd_ent->d_name);
  6669  
  6670      id_fd = open(qd_synced_path, O_RDWR | O_CREAT, 0600);
  6671  
  6672      if (id_fd < 0) PFATAL("Unable to create '%s'", qd_synced_path);
  6673  
  6674      if (read(id_fd, &min_accept, sizeof(u32)) > 0) 
  6675        lseek(id_fd, 0, SEEK_SET);
  6676  
  6677      next_min_accept = min_accept;
  6678  
  6679      /* Show stats */    
  6680  
  6681      sprintf(stage_tmp, "sync %u", ++sync_cnt);
  6682      stage_name = stage_tmp;
  6683      stage_cur  = 0;
  6684      stage_max  = 0;
  6685  
  6686      /* For every file queued by this fuzzer, parse ID and see if we have looked at
  6687         it before; exec a test case if not. */
  6688  
  6689      while ((qd_ent = readdir(qd))) {
  6690  
  6691        u8* path;
  6692        s32 fd;
  6693        struct stat st;
  6694  
  6695        if (qd_ent->d_name[0] == '.' ||
  6696            sscanf(qd_ent->d_name, CASE_PREFIX "%06u", &syncing_case) != 1 || 
  6697            syncing_case < min_accept) continue;
  6698  
  6699        /* OK, sounds like a new one. Let's give it a try. */
  6700  
  6701        if (syncing_case >= next_min_accept)
  6702          next_min_accept = syncing_case + 1;
  6703  
  6704        path = alloc_printf("%s/%s", qd_path, qd_ent->d_name);
  6705  
  6706        /* Allow this to fail in case the other fuzzer is resuming or so... */
  6707  
  6708        fd = open(path, O_RDONLY);
  6709  
  6710        if (fd < 0) {
  6711           ck_free(path);
  6712           continue;
  6713        }
  6714  
  6715        if (fstat(fd, &st)) PFATAL("fstat() failed");
  6716  
  6717        /* Ignore zero-sized or oversized files. */
  6718  
  6719        if (st.st_size && st.st_size <= MAX_FILE) {
  6720  
  6721          u8  fault;
  6722          u8* mem = mmap(0, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
  6723  
  6724          if (mem == MAP_FAILED) PFATAL("Unable to mmap '%s'", path);
  6725  
  6726          /* See what happens. We rely on save_if_interesting() to catch major
  6727             errors and save the test case. */
  6728  
  6729          write_to_testcase(mem, st.st_size);
  6730  
  6731          fault = run_target(argv, exec_tmout);
  6732  
  6733          if (stop_soon) return;
  6734  
  6735          syncing_party = sd_ent->d_name;
  6736          queued_imported += save_if_interesting(argv, mem, st.st_size, fault);
  6737          syncing_party = 0;
  6738  
  6739          munmap(mem, st.st_size);
  6740  
  6741          if (!(stage_cur++ % stats_update_freq)) show_stats();
  6742  
  6743        }
  6744  
  6745        ck_free(path);
  6746        close(fd);
  6747  
  6748      }
  6749  
  6750      ck_write(id_fd, &next_min_accept, sizeof(u32), qd_synced_path);
  6751  
  6752      close(id_fd);
  6753      closedir(qd);
  6754      ck_free(qd_path);
  6755      ck_free(qd_synced_path);
  6756      
  6757    }  
  6758  
  6759    closedir(sd);
  6760  
  6761  }
  6762  
  6763  
  6764  /* Handle stop signal (Ctrl-C, etc). */
  6765  
  6766  static void handle_stop_sig(int sig) {
  6767  
  6768    stop_soon = 1; 
  6769  
  6770    if (child_pid > 0) kill(child_pid, SIGKILL);
  6771    if (forksrv_pid > 0) kill(forksrv_pid, SIGKILL);
  6772  
  6773  }
  6774  
  6775  
  6776  /* Handle skip request (SIGUSR1). */
  6777  
  6778  static void handle_skipreq(int sig) {
  6779  
  6780    skip_requested = 1;
  6781  
  6782  }
  6783  
  6784  /* Handle timeout (SIGALRM). */
  6785  
  6786  static void handle_timeout(int sig) {
  6787  
  6788    if (child_pid > 0) {
  6789  
  6790      child_timed_out = 1; 
  6791      kill(child_pid, SIGKILL);
  6792  
  6793    } else if (child_pid == -1 && forksrv_pid > 0) {
  6794  
  6795      child_timed_out = 1; 
  6796      kill(forksrv_pid, SIGKILL);
  6797  
  6798    }
  6799  
  6800  }
  6801  
  6802  
  6803  /* Do a PATH search and find target binary to see that it exists and
  6804     isn't a shell script - a common and painful mistake. We also check for
  6805     a valid ELF header and for evidence of AFL instrumentation. */
  6806  
  6807  EXP_ST void check_binary(u8* fname) {
  6808  
  6809    u8* env_path = 0;
  6810    struct stat st;
  6811  
  6812    s32 fd;
  6813    u8* f_data;
  6814    u32 f_len = 0;
  6815  
  6816    ACTF("Validating target binary...");
  6817  
  6818    if (strchr(fname, '/') || !(env_path = getenv("PATH"))) {
  6819  
  6820      target_path = ck_strdup(fname);
  6821      if (stat(target_path, &st) || !S_ISREG(st.st_mode) ||
  6822          !(st.st_mode & 0111) || (f_len = st.st_size) < 4)
  6823        FATAL("Program '%s' not found or not executable", fname);
  6824  
  6825    } else {
  6826  
  6827      while (env_path) {
  6828  
  6829        u8 *cur_elem, *delim = strchr(env_path, ':');
  6830  
  6831        if (delim) {
  6832  
  6833          cur_elem = ck_alloc(delim - env_path + 1);
  6834          memcpy(cur_elem, env_path, delim - env_path);
  6835          delim++;
  6836  
  6837        } else cur_elem = ck_strdup(env_path);
  6838  
  6839        env_path = delim;
  6840  
  6841        if (cur_elem[0])
  6842          target_path = alloc_printf("%s/%s", cur_elem, fname);
  6843        else
  6844          target_path = ck_strdup(fname);
  6845  
  6846        ck_free(cur_elem);
  6847  
  6848        if (!stat(target_path, &st) && S_ISREG(st.st_mode) &&
  6849            (st.st_mode & 0111) && (f_len = st.st_size) >= 4) break;
  6850  
  6851        ck_free(target_path);
  6852        target_path = 0;
  6853  
  6854      }
  6855  
  6856      if (!target_path) FATAL("Program '%s' not found or not executable", fname);
  6857  
  6858    }
  6859  
  6860    if (getenv("AFL_SKIP_BIN_CHECK")) return;
  6861  
  6862    /* Check for blatant user errors. */
  6863  
  6864    if ((!strncmp(target_path, "/tmp/", 5) && !strchr(target_path + 5, '/')) ||
  6865        (!strncmp(target_path, "/var/tmp/", 9) && !strchr(target_path + 9, '/')))
  6866       FATAL("Please don't keep binaries in /tmp or /var/tmp");
  6867  
  6868    fd = open(target_path, O_RDONLY);
  6869  
  6870    if (fd < 0) PFATAL("Unable to open '%s'", target_path);
  6871  
  6872    f_data = mmap(0, f_len, PROT_READ, MAP_PRIVATE, fd, 0);
  6873  
  6874    if (f_data == MAP_FAILED) PFATAL("Unable to mmap file '%s'", target_path);
  6875  
  6876    close(fd);
  6877  
  6878    if (f_data[0] == '#' && f_data[1] == '!') {
  6879  
  6880      SAYF("\n" cLRD "[-] " cRST
  6881           "Oops, the target binary looks like a shell script. Some build systems will\n"
  6882           "    sometimes generate shell stubs for dynamically linked programs; try static\n"
  6883           "    library mode (./configure --disable-shared) if that's the case.\n\n"
  6884  
  6885           "    Another possible cause is that you are actually trying to use a shell\n" 
  6886           "    wrapper around the fuzzed component. Invoking shell can slow down the\n" 
  6887           "    fuzzing process by a factor of 20x or more; it's best to write the wrapper\n"
  6888           "    in a compiled language instead.\n");
  6889  
  6890      FATAL("Program '%s' is a shell script", target_path);
  6891  
  6892    }
  6893  
  6894  #ifndef __APPLE__
  6895  
  6896    if (f_data[0] != 0x7f || memcmp(f_data + 1, "ELF", 3))
  6897      FATAL("Program '%s' is not an ELF binary", target_path);
  6898  
  6899  #else
  6900  
  6901    if (f_data[0] != 0xCF || f_data[1] != 0xFA || f_data[2] != 0xED)
  6902      FATAL("Program '%s' is not a 64-bit Mach-O binary", target_path);
  6903  
  6904  #endif /* ^!__APPLE__ */
  6905  
  6906    if (!qemu_mode && !dumb_mode &&
  6907        !memmem(f_data, f_len, SHM_ENV_VAR, strlen(SHM_ENV_VAR) + 1)) {
  6908  
  6909      SAYF("\n" cLRD "[-] " cRST
  6910           "Looks like the target binary is not instrumented! The fuzzer depends on\n"
  6911           "    compile-time instrumentation to isolate interesting test cases while\n"
  6912           "    mutating the input data. For more information, and for tips on how to\n"
  6913           "    instrument binaries, please see %s/README.\n\n"
  6914  
  6915           "    When source code is not available, you may be able to leverage QEMU\n"
  6916           "    mode support. Consult the README for tips on how to enable this.\n"
  6917  
  6918           "    (It is also possible to use afl-fuzz as a traditional, \"dumb\" fuzzer.\n"
  6919           "    For that, you can use the -n option - but expect much worse results.)\n",
  6920           doc_path);
  6921  
  6922      FATAL("No instrumentation detected");
  6923  
  6924    }
  6925  
  6926    if (qemu_mode &&
  6927        memmem(f_data, f_len, SHM_ENV_VAR, strlen(SHM_ENV_VAR) + 1)) {
  6928  
  6929      SAYF("\n" cLRD "[-] " cRST
  6930           "This program appears to be instrumented with afl-gcc, but is being run in\n"
  6931           "    QEMU mode (-Q). This is probably not what you want - this setup will be\n"
  6932           "    slow and offer no practical benefits.\n");
  6933  
  6934      FATAL("Instrumentation found in -Q mode");
  6935  
  6936    }
  6937  
  6938    if (memmem(f_data, f_len, "libasan.so", 10) ||
  6939        memmem(f_data, f_len, "__msan_init", 11)) uses_asan = 1;
  6940  
  6941    /* Detect persistent & deferred init signatures in the binary. */
  6942  
  6943    if (memmem(f_data, f_len, PERSIST_SIG, strlen(PERSIST_SIG) + 1)) {
  6944  
  6945      OKF(cPIN "Persistent mode binary detected.");
  6946      setenv(PERSIST_ENV_VAR, "1", 1);
  6947      persistent_mode = 1;
  6948  
  6949    } else if (getenv("AFL_PERSISTENT")) {
  6950  
  6951      WARNF("AFL_PERSISTENT is no longer supported and may misbehave!");
  6952  
  6953    }
  6954  
  6955    if (memmem(f_data, f_len, DEFER_SIG, strlen(DEFER_SIG) + 1)) {
  6956  
  6957      OKF(cPIN "Deferred forkserver binary detected.");
  6958      setenv(DEFER_ENV_VAR, "1", 1);
  6959      deferred_mode = 1;
  6960  
  6961    } else if (getenv("AFL_DEFER_FORKSRV")) {
  6962  
  6963      WARNF("AFL_DEFER_FORKSRV is no longer supported and may misbehave!");
  6964  
  6965    }
  6966  
  6967    if (munmap(f_data, f_len)) PFATAL("unmap() failed");
  6968  
  6969  }
  6970  
  6971  
  6972  /* Trim and possibly create a banner for the run. */
  6973  
  6974  static void fix_up_banner(u8* name) {
  6975  
  6976    if (!use_banner) {
  6977  
  6978      if (sync_id) {
  6979  
  6980        use_banner = sync_id;
  6981  
  6982      } else {
  6983  
  6984        u8* trim = strrchr(name, '/');
  6985        if (!trim) use_banner = name; else use_banner = trim + 1;
  6986  
  6987      }
  6988  
  6989    }
  6990  
  6991    if (strlen(use_banner) > 40) {
  6992  
  6993      u8* tmp = ck_alloc(44);
  6994      sprintf(tmp, "%.40s...", use_banner);
  6995      use_banner = tmp;
  6996  
  6997    }
  6998  
  6999  }
  7000  
  7001  
  7002  /* Check if we're on TTY. */
  7003  
  7004  static void check_if_tty(void) {
  7005  
  7006    struct winsize ws;
  7007  
  7008    if (getenv("AFL_NO_UI")) {
  7009      OKF("Disabling the UI because AFL_NO_UI is set.");
  7010      not_on_tty = 1;
  7011      return;
  7012    }
  7013  
  7014    if (ioctl(1, TIOCGWINSZ, &ws)) {
  7015  
  7016      if (errno == ENOTTY) {
  7017        OKF("Looks like we're not running on a tty, so I'll be a bit less verbose.");
  7018        not_on_tty = 1;
  7019      }
  7020  
  7021      return;
  7022    }
  7023  
  7024  }
  7025  
  7026  
  7027  /* Check terminal dimensions after resize. */
  7028  
  7029  static void check_term_size(void) {
  7030  
  7031    struct winsize ws;
  7032  
  7033    term_too_small = 0;
  7034  
  7035    if (ioctl(1, TIOCGWINSZ, &ws)) return;
  7036  
  7037    if (ws.ws_row < 25 || ws.ws_col < 80) term_too_small = 1;
  7038  
  7039  }
  7040  
  7041  
  7042  
  7043  /* Display usage hints. */
  7044  
  7045  static void usage(u8* argv0) {
  7046  
  7047    SAYF("\n%s [ options ] -- /path/to/fuzzed_app [ ... ]\n\n"
  7048  
  7049         "Required parameters:\n\n"
  7050  
  7051         "  -i dir        - input directory with test cases\n"
  7052         "  -o dir        - output directory for fuzzer findings\n\n"
  7053  
  7054         "Execution control settings:\n\n"
  7055  
  7056         "  -f file       - location read by the fuzzed program (stdin)\n"
  7057         "  -t msec       - timeout for each run (auto-scaled, 50-%u ms)\n"
  7058         "  -m megs       - memory limit for child process (%u MB)\n"
  7059         "  -Q            - use binary-only instrumentation (QEMU mode)\n\n"     
  7060   
  7061         "Fuzzing behavior settings:\n\n"
  7062  
  7063         "  -d            - quick & dirty mode (skips deterministic steps)\n"
  7064         "  -n            - fuzz without instrumentation (dumb mode)\n"
  7065         "  -x dir        - optional fuzzer dictionary (see README)\n\n"
  7066  
  7067         "Other stuff:\n\n"
  7068  
  7069         "  -T text       - text banner to show on the screen\n"
  7070         "  -M / -S id    - distributed mode (see parallel_fuzzing.txt)\n"
  7071         "  -C            - crash exploration mode (the peruvian rabbit thing)\n\n"
  7072  
  7073         "For additional tips, please consult %s/README.\n\n",
  7074  
  7075         argv0, EXEC_TIMEOUT, MEM_LIMIT, doc_path);
  7076  
  7077    exit(1);
  7078  
  7079  }
  7080  
  7081  
  7082  /* Prepare output directories and fds. */
  7083  
  7084  EXP_ST void setup_dirs_fds(void) {
  7085  
  7086    u8* tmp;
  7087    s32 fd;
  7088  
  7089    ACTF("Setting up output directories...");
  7090  
  7091    if (sync_id && mkdir(sync_dir, 0700) && errno != EEXIST)
  7092        PFATAL("Unable to create '%s'", sync_dir);
  7093  
  7094    if (mkdir(out_dir, 0700)) {
  7095  
  7096      if (errno != EEXIST) PFATAL("Unable to create '%s'", out_dir);
  7097  
  7098      maybe_delete_out_dir();
  7099  
  7100    } else {
  7101  
  7102      if (in_place_resume)
  7103        FATAL("Resume attempted but old output directory not found");
  7104  
  7105      out_dir_fd = open(out_dir, O_RDONLY);
  7106  
  7107  #ifndef __sun
  7108  
  7109      if (out_dir_fd < 0 || flock(out_dir_fd, LOCK_EX | LOCK_NB))
  7110        PFATAL("Unable to flock() output directory.");
  7111  
  7112  #endif /* !__sun */
  7113  
  7114    }
  7115  
  7116    /* Queue directory for any starting & discovered paths. */
  7117  
  7118    tmp = alloc_printf("%s/queue", out_dir);
  7119    if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
  7120    ck_free(tmp);
  7121  
  7122    /* Top-level directory for queue metadata used for session
  7123       resume and related tasks. */
  7124  
  7125    tmp = alloc_printf("%s/queue/.state/", out_dir);
  7126    if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
  7127    ck_free(tmp);
  7128  
  7129    /* Directory for flagging queue entries that went through
  7130       deterministic fuzzing in the past. */
  7131  
  7132    tmp = alloc_printf("%s/queue/.state/deterministic_done/", out_dir);
  7133    if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
  7134    ck_free(tmp);
  7135  
  7136    /* Directory with the auto-selected dictionary entries. */
  7137  
  7138    tmp = alloc_printf("%s/queue/.state/auto_extras/", out_dir);
  7139    if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
  7140    ck_free(tmp);
  7141  
  7142    /* The set of paths currently deemed redundant. */
  7143  
  7144    tmp = alloc_printf("%s/queue/.state/redundant_edges/", out_dir);
  7145    if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
  7146    ck_free(tmp);
  7147  
  7148    /* The set of paths showing variable behavior. */
  7149  
  7150    tmp = alloc_printf("%s/queue/.state/variable_behavior/", out_dir);
  7151    if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
  7152    ck_free(tmp);
  7153  
  7154    /* Sync directory for keeping track of cooperating fuzzers. */
  7155  
  7156    if (sync_id) {
  7157  
  7158      tmp = alloc_printf("%s/.synced/", out_dir);
  7159  
  7160      if (mkdir(tmp, 0700) && (!in_place_resume || errno != EEXIST))
  7161        PFATAL("Unable to create '%s'", tmp);
  7162  
  7163      ck_free(tmp);
  7164  
  7165    }
  7166  
  7167    /* All recorded crashes. */
  7168  
  7169    tmp = alloc_printf("%s/crashes", out_dir);
  7170    if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
  7171    ck_free(tmp);
  7172  
  7173    /* All recorded hangs. */
  7174  
  7175    tmp = alloc_printf("%s/hangs", out_dir);
  7176    if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
  7177    ck_free(tmp);
  7178  
  7179    /* Generally useful file descriptors. */
  7180  
  7181    dev_null_fd = open("/dev/null", O_RDWR);
  7182    if (dev_null_fd < 0) PFATAL("Unable to open /dev/null");
  7183  
  7184    dev_urandom_fd = open("/dev/urandom", O_RDONLY);
  7185    if (dev_urandom_fd < 0) PFATAL("Unable to open /dev/urandom");
  7186  
  7187    /* Gnuplot output file. */
  7188  
  7189    tmp = alloc_printf("%s/plot_data", out_dir);
  7190    fd = open(tmp, O_WRONLY | O_CREAT | O_EXCL, 0600);
  7191    if (fd < 0) PFATAL("Unable to create '%s'", tmp);
  7192    ck_free(tmp);
  7193  
  7194    plot_file = fdopen(fd, "w");
  7195    if (!plot_file) PFATAL("fdopen() failed");
  7196  
  7197    fprintf(plot_file, "# unix_time, cycles_done, cur_path, paths_total, "
  7198                       "pending_total, pending_favs, map_size, unique_crashes, "
  7199                       "unique_hangs, max_depth, execs_per_sec\n");
  7200                       /* ignore errors */
  7201  
  7202  }
  7203  
  7204  
  7205  /* Setup the output file for fuzzed data, if not using -f. */
  7206  
  7207  EXP_ST void setup_stdio_file(void) {
  7208  
  7209    u8* fn = alloc_printf("%s/.cur_input", out_dir);
  7210  
  7211    unlink(fn); /* Ignore errors */
  7212  
  7213    out_fd = open(fn, O_RDWR | O_CREAT | O_EXCL, 0600);
  7214  
  7215    if (out_fd < 0) PFATAL("Unable to create '%s'", fn);
  7216  
  7217    ck_free(fn);
  7218  
  7219  }
  7220  
  7221  
  7222  /* Make sure that core dumps don't go to a program. */
  7223  
  7224  static void check_crash_handling(void) {
  7225  
  7226  #ifdef __APPLE__
  7227  
  7228    /* Yuck! There appears to be no simple C API to query for the state of 
  7229       loaded daemons on MacOS X, and I'm a bit hesitant to do something
  7230       more sophisticated, such as disabling crash reporting via Mach ports,
  7231       until I get a box to test the code. So, for now, we check for crash
  7232       reporting the awful way. */
  7233    
  7234    if (system("launchctl list 2>/dev/null | grep -q '\\.ReportCrash$'")) return;
  7235  
  7236    SAYF("\n" cLRD "[-] " cRST
  7237         "Whoops, your system is configured to forward crash notifications to an\n"
  7238         "    external crash reporting utility. This will cause issues due to the\n"
  7239         "    extended delay between the fuzzed binary malfunctioning and this fact\n"
  7240         "    being relayed to the fuzzer via the standard waitpid() API.\n\n"
  7241         "    To avoid having crashes misinterpreted as timeouts, please run the\n" 
  7242         "    following commands:\n\n"
  7243  
  7244         "    SL=/System/Library; PL=com.apple.ReportCrash\n"
  7245         "    launchctl unload -w ${SL}/LaunchAgents/${PL}.plist\n"
  7246         "    sudo launchctl unload -w ${SL}/LaunchDaemons/${PL}.Root.plist\n");
  7247  
  7248    if (!getenv("AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES"))
  7249      FATAL("Crash reporter detected");
  7250  
  7251  #else
  7252  
  7253    /* This is Linux specific, but I don't think there's anything equivalent on
  7254       *BSD, so we can just let it slide for now. */
  7255  
  7256    s32 fd = open("/proc/sys/kernel/core_pattern", O_RDONLY);
  7257    u8  fchar;
  7258  
  7259    if (fd < 0) return;
  7260  
  7261    ACTF("Checking core_pattern...");
  7262  
  7263    if (read(fd, &fchar, 1) == 1 && fchar == '|') {
  7264  
  7265      SAYF("\n" cLRD "[-] " cRST
  7266           "Hmm, your system is configured to send core dump notifications to an\n"
  7267           "    external utility. This will cause issues: there will be an extended delay\n"
  7268           "    between stumbling upon a crash and having this information relayed to the\n"
  7269           "    fuzzer via the standard waitpid() API.\n\n"
  7270  
  7271           "    To avoid having crashes misinterpreted as timeouts, please log in as root\n" 
  7272           "    and temporarily modify /proc/sys/kernel/core_pattern, like so:\n\n"
  7273  
  7274           "    echo core >/proc/sys/kernel/core_pattern\n");
  7275  
  7276      if (!getenv("AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES"))
  7277        FATAL("Pipe at the beginning of 'core_pattern'");
  7278  
  7279    }
  7280   
  7281    close(fd);
  7282  
  7283  #endif /* ^__APPLE__ */
  7284  
  7285  }
  7286  
  7287  
  7288  /* Check CPU governor. */
  7289  
  7290  static void check_cpu_governor(void) {
  7291  
  7292    FILE* f;
  7293    u8 tmp[128];
  7294    u64 min = 0, max = 0;
  7295  
  7296    if (getenv("AFL_SKIP_CPUFREQ")) return;
  7297  
  7298    f = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor", "r");
  7299    if (!f) return;
  7300  
  7301    ACTF("Checking CPU scaling governor...");
  7302  
  7303    if (!fgets(tmp, 128, f)) PFATAL("fgets() failed");
  7304  
  7305    fclose(f);
  7306  
  7307    if (!strncmp(tmp, "perf", 4)) return;
  7308  
  7309    f = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq", "r");
  7310  
  7311    if (f) {
  7312      if (fscanf(f, "%llu", &min) != 1) min = 0;
  7313      fclose(f);
  7314    }
  7315  
  7316    f = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq", "r");
  7317  
  7318    if (f) {
  7319      if (fscanf(f, "%llu", &max) != 1) max = 0;
  7320      fclose(f);
  7321    }
  7322  
  7323    if (min == max) return;
  7324  
  7325    SAYF("\n" cLRD "[-] " cRST
  7326         "Whoops, your system uses on-demand CPU frequency scaling, adjusted\n"
  7327         "    between %llu and %llu MHz. Unfortunately, the scaling algorithm in the\n"
  7328         "    kernel is imperfect and can miss the short-lived processes spawned by\n"
  7329         "    afl-fuzz. To keep things moving, run these commands as root:\n\n"
  7330  
  7331         "    cd /sys/devices/system/cpu\n"
  7332         "    echo performance | tee cpu*/cpufreq/scaling_governor\n\n"
  7333  
  7334         "    You can later go back to the original state by replacing 'performance' with\n"
  7335         "    'ondemand'. If you don't want to change the settings, set AFL_SKIP_CPUFREQ\n"
  7336         "    to make afl-fuzz skip this check - but expect some performance drop.\n",
  7337         min / 1024, max / 1024);
  7338  
  7339    FATAL("Suboptimal CPU scaling governor");
  7340  
  7341  }
  7342  
  7343  
  7344  /* Count the number of logical CPU cores. */
  7345  
  7346  static void get_core_count(void) {
  7347  
  7348    u32 cur_runnable = 0;
  7349  
  7350  #if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__)
  7351  
  7352    size_t s = sizeof(cpu_core_count);
  7353  
  7354    /* On *BSD systems, we can just use a sysctl to get the number of CPUs. */
  7355  
  7356  #ifdef __APPLE__
  7357  
  7358    if (sysctlbyname("hw.logicalcpu", &cpu_core_count, &s, NULL, 0) < 0)
  7359      return;
  7360  
  7361  #else
  7362  
  7363    int s_name[2] = { CTL_HW, HW_NCPU };
  7364  
  7365    if (sysctl(s_name, 2, &cpu_core_count, &s, NULL, 0) < 0) return;
  7366  
  7367  #endif /* ^__APPLE__ */
  7368  
  7369  #else
  7370  
  7371  #ifdef HAVE_AFFINITY
  7372  
  7373    cpu_core_count = sysconf(_SC_NPROCESSORS_ONLN);
  7374  
  7375  #else
  7376  
  7377    FILE* f = fopen("/proc/stat", "r");
  7378    u8 tmp[1024];
  7379  
  7380    if (!f) return;
  7381  
  7382    while (fgets(tmp, sizeof(tmp), f))
  7383      if (!strncmp(tmp, "cpu", 3) && isdigit(tmp[3])) cpu_core_count++;
  7384  
  7385    fclose(f);
  7386  
  7387  #endif /* ^HAVE_AFFINITY */
  7388  
  7389  #endif /* ^(__APPLE__ || __FreeBSD__ || __OpenBSD__) */
  7390  
  7391    if (cpu_core_count > 0) {
  7392  
  7393      cur_runnable = (u32)get_runnable_processes();
  7394  
  7395  #if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__)
  7396  
  7397      /* Add ourselves, since the 1-minute average doesn't include that yet. */
  7398  
  7399      cur_runnable++;
  7400  
  7401  #endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */
  7402  
  7403      OKF("You have %u CPU core%s and %u runnable tasks (utilization: %0.0f%%).",
  7404          cpu_core_count, cpu_core_count > 1 ? "s" : "",
  7405          cur_runnable, cur_runnable * 100.0 / cpu_core_count);
  7406  
  7407      if (cpu_core_count > 1) {
  7408  
  7409        if (cur_runnable > cpu_core_count * 1.5) {
  7410  
  7411          WARNF("System under apparent load, performance may be spotty.");
  7412  
  7413        } else if (cur_runnable + 1 <= cpu_core_count) {
  7414  
  7415          OKF("Try parallel jobs - see %s/parallel_fuzzing.txt.", doc_path);
  7416    
  7417        }
  7418  
  7419      }
  7420  
  7421    } else {
  7422  
  7423      cpu_core_count = 0;
  7424      WARNF("Unable to figure out the number of CPU cores.");
  7425  
  7426    }
  7427  
  7428  }
  7429  
  7430  
  7431  /* Validate and fix up out_dir and sync_dir when using -S. */
  7432  
  7433  static void fix_up_sync(void) {
  7434  
  7435    u8* x = sync_id;
  7436  
  7437    if (dumb_mode)
  7438      FATAL("-S / -M and -n are mutually exclusive");
  7439  
  7440    if (skip_deterministic) {
  7441  
  7442      if (force_deterministic)
  7443        FATAL("use -S instead of -M -d");
  7444      else
  7445        FATAL("-S already implies -d");
  7446  
  7447    }
  7448  
  7449    while (*x) {
  7450  
  7451      if (!isalnum(*x) && *x != '_' && *x != '-')
  7452        FATAL("Non-alphanumeric fuzzer ID specified via -S or -M");
  7453  
  7454      x++;
  7455  
  7456    }
  7457  
  7458    if (strlen(sync_id) > 32) FATAL("Fuzzer ID too long");
  7459  
  7460    x = alloc_printf("%s/%s", out_dir, sync_id);
  7461  
  7462    sync_dir = out_dir;
  7463    out_dir  = x;
  7464  
  7465    if (!force_deterministic) {
  7466      skip_deterministic = 1;
  7467      use_splicing = 1;
  7468    }
  7469  
  7470  }
  7471  
  7472  
  7473  /* Handle screen resize (SIGWINCH). */
  7474  
  7475  static void handle_resize(int sig) {
  7476    clear_screen = 1;
  7477  }
  7478  
  7479  
  7480  /* Check ASAN options. */
  7481  
  7482  static void check_asan_opts(void) {
  7483    u8* x = getenv("ASAN_OPTIONS");
  7484  
  7485    if (x) {
  7486  
  7487      if (!strstr(x, "abort_on_error=1"))
  7488        FATAL("Custom ASAN_OPTIONS set without abort_on_error=1 - please fix!");
  7489  
  7490      if (!strstr(x, "symbolize=0"))
  7491        FATAL("Custom ASAN_OPTIONS set without symbolize=0 - please fix!");
  7492  
  7493    }
  7494  
  7495    x = getenv("MSAN_OPTIONS");
  7496  
  7497    if (x) {
  7498  
  7499      if (!strstr(x, "exit_code=" STRINGIFY(MSAN_ERROR)))
  7500        FATAL("Custom MSAN_OPTIONS set without exit_code="
  7501              STRINGIFY(MSAN_ERROR) " - please fix!");
  7502  
  7503      if (!strstr(x, "symbolize=0"))
  7504        FATAL("Custom MSAN_OPTIONS set without symbolize=0 - please fix!");
  7505  
  7506    }
  7507  
  7508  } 
  7509  
  7510  
  7511  /* Detect @@ in args. */
  7512  
  7513  EXP_ST void detect_file_args(char** argv) {
  7514  
  7515    u32 i = 0;
  7516    u8* cwd = getcwd(NULL, 0);
  7517  
  7518    if (!cwd) PFATAL("getcwd() failed");
  7519  
  7520    while (argv[i]) {
  7521  
  7522      u8* aa_loc = strstr(argv[i], "@@");
  7523  
  7524      if (aa_loc) {
  7525  
  7526        u8 *aa_subst, *n_arg;
  7527  
  7528        /* If we don't have a file name chosen yet, use a safe default. */
  7529  
  7530        if (!out_file)
  7531          out_file = alloc_printf("%s/.cur_input", out_dir);
  7532  
  7533        /* Be sure that we're always using fully-qualified paths. */
  7534  
  7535        if (out_file[0] == '/') aa_subst = out_file;
  7536        else aa_subst = alloc_printf("%s/%s", cwd, out_file);
  7537  
  7538        /* Construct a replacement argv value. */
  7539  
  7540        *aa_loc = 0;
  7541        n_arg = alloc_printf("%s%s%s", argv[i], aa_subst, aa_loc + 2);
  7542        argv[i] = n_arg;
  7543        *aa_loc = '@';
  7544  
  7545        if (out_file[0] != '/') ck_free(aa_subst);
  7546  
  7547      }
  7548  
  7549      i++;
  7550  
  7551    }
  7552  
  7553    free(cwd); /* not tracked */
  7554  
  7555  }
  7556  
  7557  
  7558  /* Set up signal handlers. More complicated that needs to be, because libc on
  7559     Solaris doesn't resume interrupted reads(), sets SA_RESETHAND when you call
  7560     siginterrupt(), and does other stupid things. */
  7561  
  7562  EXP_ST void setup_signal_handlers(void) {
  7563  
  7564    struct sigaction sa;
  7565  
  7566    sa.sa_handler   = NULL;
  7567    sa.sa_flags     = SA_RESTART;
  7568    sa.sa_sigaction = NULL;
  7569  
  7570    sigemptyset(&sa.sa_mask);
  7571  
  7572    /* Various ways of saying "stop". */
  7573  
  7574    sa.sa_handler = handle_stop_sig;
  7575    sigaction(SIGHUP, &sa, NULL);
  7576    sigaction(SIGINT, &sa, NULL);
  7577    sigaction(SIGTERM, &sa, NULL);
  7578  
  7579    /* Exec timeout notifications. */
  7580  
  7581    sa.sa_handler = handle_timeout;
  7582    sigaction(SIGALRM, &sa, NULL);
  7583  
  7584    /* Window resize */
  7585  
  7586    sa.sa_handler = handle_resize;
  7587    sigaction(SIGWINCH, &sa, NULL);
  7588  
  7589    /* SIGUSR1: skip entry */
  7590  
  7591    sa.sa_handler = handle_skipreq;
  7592    sigaction(SIGUSR1, &sa, NULL);
  7593  
  7594    /* Things we don't care about. */
  7595  
  7596    sa.sa_handler = SIG_IGN;
  7597    sigaction(SIGTSTP, &sa, NULL);
  7598    sigaction(SIGPIPE, &sa, NULL);
  7599  
  7600  }
  7601  
  7602  
  7603  /* Rewrite argv for QEMU. */
  7604  
  7605  static char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
  7606  
  7607    char** new_argv = ck_alloc(sizeof(char*) * (argc + 4));
  7608    u8 *tmp, *cp, *rsl, *own_copy;
  7609  
  7610    /* Workaround for a QEMU stability glitch. */
  7611  
  7612    setenv("QEMU_LOG", "nochain", 1);
  7613  
  7614    memcpy(new_argv + 3, argv + 1, sizeof(char*) * argc);
  7615  
  7616    new_argv[2] = target_path;
  7617    new_argv[1] = "--";
  7618  
  7619    /* Now we need to actually find the QEMU binary to put in argv[0]. */
  7620  
  7621    tmp = getenv("AFL_PATH");
  7622  
  7623    if (tmp) {
  7624  
  7625      cp = alloc_printf("%s/afl-qemu-trace", tmp);
  7626  
  7627      if (access(cp, X_OK))
  7628        FATAL("Unable to find '%s'", tmp);
  7629  
  7630      target_path = new_argv[0] = cp;
  7631      return new_argv;
  7632  
  7633    }
  7634  
  7635    own_copy = ck_strdup(own_loc);
  7636    rsl = strrchr(own_copy, '/');
  7637  
  7638    if (rsl) {
  7639  
  7640      *rsl = 0;
  7641  
  7642      cp = alloc_printf("%s/afl-qemu-trace", own_copy);
  7643      ck_free(own_copy);
  7644  
  7645      if (!access(cp, X_OK)) {
  7646  
  7647        target_path = new_argv[0] = cp;
  7648        return new_argv;
  7649  
  7650      }
  7651  
  7652    } else ck_free(own_copy);
  7653  
  7654    if (!access(BIN_PATH "/afl-qemu-trace", X_OK)) {
  7655  
  7656      target_path = new_argv[0] = ck_strdup(BIN_PATH "/afl-qemu-trace");
  7657      return new_argv;
  7658  
  7659    }
  7660  
  7661    SAYF("\n" cLRD "[-] " cRST
  7662         "Oops, unable to find the 'afl-qemu-trace' binary. The binary must be built\n"
  7663         "    separately by following the instructions in qemu_mode/README.qemu. If you\n"
  7664         "    already have the binary installed, you may need to specify AFL_PATH in the\n"
  7665         "    environment.\n\n"
  7666  
  7667         "    Of course, even without QEMU, afl-fuzz can still work with binaries that are\n"
  7668         "    instrumented at compile time with afl-gcc. It is also possible to use it as a\n"
  7669         "    traditional \"dumb\" fuzzer by specifying '-n' in the command line.\n");
  7670  
  7671    FATAL("Failed to locate 'afl-qemu-trace'.");
  7672  
  7673  }
  7674  
  7675  
  7676  /* Make a copy of the current command line. */
  7677  
  7678  static void save_cmdline(u32 argc, char** argv) {
  7679  
  7680    u32 len = 1, i;
  7681    u8* buf;
  7682  
  7683    for (i = 0; i < argc; i++)
  7684      len += strlen(argv[i]) + 1;
  7685    
  7686    buf = orig_cmdline = ck_alloc(len);
  7687  
  7688    for (i = 0; i < argc; i++) {
  7689  
  7690      u32 l = strlen(argv[i]);
  7691  
  7692      memcpy(buf, argv[i], l);
  7693      buf += l;
  7694  
  7695      if (i != argc - 1) *(buf++) = ' ';
  7696  
  7697    }
  7698  
  7699    *buf = 0;
  7700  
  7701  }
  7702  
  7703  
  7704  #ifndef AFL_LIB
  7705  
  7706  /* Main entry point */
  7707  
  7708  int main(int argc, char** argv) {
  7709  
  7710    s32 opt;
  7711    u64 prev_queued = 0;
  7712    u32 sync_interval_cnt = 0, seek_to;
  7713    u8  *extras_dir = 0;
  7714    u8  mem_limit_given = 0;
  7715    u8  exit_1 = !!getenv("AFL_BENCH_JUST_ONE");
  7716    char** use_argv;
  7717  
  7718    struct timeval tv;
  7719    struct timezone tz;
  7720  
  7721    SAYF(cCYA "afl-fuzz " cBRI VERSION cRST " by <lcamtuf@google.com>\n");
  7722  
  7723    doc_path = access(DOC_PATH, F_OK) ? "docs" : DOC_PATH;
  7724  
  7725    gettimeofday(&tv, &tz);
  7726    srandom(tv.tv_sec ^ tv.tv_usec ^ getpid());
  7727  
  7728    while ((opt = getopt(argc, argv, "+i:o:f:m:t:T:dnCB:S:M:x:Q")) > 0)
  7729  
  7730      switch (opt) {
  7731  
  7732        case 'i': /* input dir */
  7733  
  7734          if (in_dir) FATAL("Multiple -i options not supported");
  7735          in_dir = optarg;
  7736  
  7737          if (!strcmp(in_dir, "-")) in_place_resume = 1;
  7738  
  7739          break;
  7740  
  7741        case 'o': /* output dir */
  7742  
  7743          if (out_dir) FATAL("Multiple -o options not supported");
  7744          out_dir = optarg;
  7745          break;
  7746  
  7747        case 'M': { /* master sync ID */
  7748  
  7749            u8* c;
  7750  
  7751            if (sync_id) FATAL("Multiple -S or -M options not supported");
  7752            sync_id = ck_strdup(optarg);
  7753  
  7754            if ((c = strchr(sync_id, ':'))) {
  7755  
  7756              *c = 0;
  7757  
  7758              if (sscanf(c + 1, "%u/%u", &master_id, &master_max) != 2 ||
  7759                  !master_id || !master_max || master_id > master_max ||
  7760                  master_max > 1000000) FATAL("Bogus master ID passed to -M");
  7761  
  7762            }
  7763  
  7764            force_deterministic = 1;
  7765  
  7766          }
  7767  
  7768          break;
  7769  
  7770        case 'S': 
  7771  
  7772          if (sync_id) FATAL("Multiple -S or -M options not supported");
  7773          sync_id = ck_strdup(optarg);
  7774          break;
  7775  
  7776        case 'f': /* target file */
  7777  
  7778          if (out_file) FATAL("Multiple -f options not supported");
  7779          out_file = optarg;
  7780          break;
  7781  
  7782        case 'x': /* dictionary */
  7783  
  7784          if (extras_dir) FATAL("Multiple -x options not supported");
  7785          extras_dir = optarg;
  7786          break;
  7787  
  7788        case 't': { /* timeout */
  7789  
  7790            u8 suffix = 0;
  7791  
  7792            if (timeout_given) FATAL("Multiple -t options not supported");
  7793  
  7794            if (sscanf(optarg, "%u%c", &exec_tmout, &suffix) < 1 ||
  7795                optarg[0] == '-') FATAL("Bad syntax used for -t");
  7796  
  7797            if (exec_tmout < 5) FATAL("Dangerously low value of -t");
  7798  
  7799            if (suffix == '+') timeout_given = 2; else timeout_given = 1;
  7800  
  7801            break;
  7802  
  7803        }
  7804  
  7805        case 'm': { /* mem limit */
  7806  
  7807            u8 suffix = 'M';
  7808  
  7809            if (mem_limit_given) FATAL("Multiple -m options not supported");
  7810            mem_limit_given = 1;
  7811  
  7812            if (!strcmp(optarg, "none")) {
  7813  
  7814              mem_limit = 0;
  7815              break;
  7816  
  7817            }
  7818  
  7819            if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 ||
  7820                optarg[0] == '-') FATAL("Bad syntax used for -m");
  7821  
  7822            switch (suffix) {
  7823  
  7824              case 'T': mem_limit *= 1024 * 1024; break;
  7825              case 'G': mem_limit *= 1024; break;
  7826              case 'k': mem_limit /= 1024; break;
  7827              case 'M': break;
  7828  
  7829              default:  FATAL("Unsupported suffix or bad syntax for -m");
  7830  
  7831            }
  7832  
  7833            if (mem_limit < 5) FATAL("Dangerously low value of -m");
  7834  
  7835            if (sizeof(rlim_t) == 4 && mem_limit > 2000)
  7836              FATAL("Value of -m out of range on 32-bit systems");
  7837  
  7838          }
  7839  
  7840          break;
  7841  
  7842        case 'd': /* skip deterministic */
  7843  
  7844          if (skip_deterministic) FATAL("Multiple -d options not supported");
  7845          skip_deterministic = 1;
  7846          use_splicing = 1;
  7847          break;
  7848  
  7849        case 'B': /* load bitmap */
  7850  
  7851          /* This is a secret undocumented option! It is useful if you find
  7852             an interesting test case during a normal fuzzing process, and want
  7853             to mutate it without rediscovering any of the test cases already
  7854             found during an earlier run.
  7855  
  7856             To use this mode, you need to point -B to the fuzz_bitmap produced
  7857             by an earlier run for the exact same binary... and that's it.
  7858  
  7859             I only used this once or twice to get variants of a particular
  7860             file, so I'm not making this an official setting. */
  7861  
  7862          if (in_bitmap) FATAL("Multiple -B options not supported");
  7863  
  7864          in_bitmap = optarg;
  7865          read_bitmap(in_bitmap);
  7866          break;
  7867  
  7868        case 'C': /* crash mode */
  7869  
  7870          if (crash_mode) FATAL("Multiple -C options not supported");
  7871          crash_mode = FAULT_CRASH;
  7872          break;
  7873  
  7874        case 'n': /* dumb mode */
  7875  
  7876          if (dumb_mode) FATAL("Multiple -n options not supported");
  7877          if (getenv("AFL_DUMB_FORKSRV")) dumb_mode = 2; else dumb_mode = 1;
  7878  
  7879          break;
  7880  
  7881        case 'T': /* banner */
  7882  
  7883          if (use_banner) FATAL("Multiple -T options not supported");
  7884          use_banner = optarg;
  7885          break;
  7886  
  7887        case 'Q': /* QEMU mode */
  7888  
  7889          if (qemu_mode) FATAL("Multiple -Q options not supported");
  7890          qemu_mode = 1;
  7891  
  7892          if (!mem_limit_given) mem_limit = MEM_LIMIT_QEMU;
  7893  
  7894          break;
  7895  
  7896        default:
  7897  
  7898          usage(argv[0]);
  7899  
  7900      }
  7901  
  7902    if (optind == argc || !in_dir || !out_dir) usage(argv[0]);
  7903  
  7904    setup_signal_handlers();
  7905    check_asan_opts();
  7906  
  7907    if (sync_id) fix_up_sync();
  7908  
  7909    if (!strcmp(in_dir, out_dir))
  7910      FATAL("Input and output directories can't be the same");
  7911  
  7912    if (dumb_mode) {
  7913  
  7914      if (crash_mode) FATAL("-C and -n are mutually exclusive");
  7915      if (qemu_mode)  FATAL("-Q and -n are mutually exclusive");
  7916  
  7917    }
  7918  
  7919    if (getenv("AFL_NO_FORKSRV"))    no_forkserver    = 1;
  7920    if (getenv("AFL_NO_CPU_RED"))    no_cpu_meter_red = 1;
  7921    if (getenv("AFL_NO_ARITH"))      no_arith         = 1;
  7922    if (getenv("AFL_SHUFFLE_QUEUE")) shuffle_queue    = 1;
  7923    if (getenv("AFL_FAST_CAL"))      fast_cal         = 1;
  7924  
  7925    if (getenv("AFL_HANG_TMOUT")) {
  7926      hang_tmout = atoi(getenv("AFL_HANG_TMOUT"));
  7927      if (!hang_tmout) FATAL("Invalid value of AFL_HANG_TMOUT");
  7928    }
  7929  
  7930    if (dumb_mode == 2 && no_forkserver)
  7931      FATAL("AFL_DUMB_FORKSRV and AFL_NO_FORKSRV are mutually exclusive");
  7932  
  7933    if (getenv("AFL_PRELOAD")) {
  7934      setenv("LD_PRELOAD", getenv("AFL_PRELOAD"), 1);
  7935      setenv("DYLD_INSERT_LIBRARIES", getenv("AFL_PRELOAD"), 1);
  7936    }
  7937  
  7938    if (getenv("AFL_LD_PRELOAD"))
  7939      FATAL("Use AFL_PRELOAD instead of AFL_LD_PRELOAD");
  7940  
  7941    save_cmdline(argc, argv);
  7942  
  7943    fix_up_banner(argv[optind]);
  7944  
  7945    check_if_tty();
  7946  
  7947    get_core_count();
  7948  
  7949  #ifdef HAVE_AFFINITY
  7950    bind_to_free_cpu();
  7951  #endif /* HAVE_AFFINITY */
  7952  
  7953    check_crash_handling();
  7954    check_cpu_governor();
  7955  
  7956    setup_post();
  7957    setup_shm();
  7958    init_count_class16();
  7959  
  7960    setup_dirs_fds();
  7961    read_testcases();
  7962    load_auto();
  7963  
  7964    pivot_inputs();
  7965  
  7966    if (extras_dir) load_extras(extras_dir);
  7967  
  7968    if (!timeout_given) find_timeout();
  7969  
  7970    detect_file_args(argv + optind + 1);
  7971  
  7972    if (!out_file) setup_stdio_file();
  7973  
  7974    check_binary(argv[optind]);
  7975  
  7976    start_time = get_cur_time();
  7977  
  7978    if (qemu_mode)
  7979      use_argv = get_qemu_argv(argv[0], argv + optind, argc - optind);
  7980    else
  7981      use_argv = argv + optind;
  7982  
  7983    perform_dry_run(use_argv);
  7984  
  7985    cull_queue();
  7986  
  7987    show_init_stats();
  7988  
  7989    seek_to = find_start_position();
  7990  
  7991    write_stats_file(0, 0, 0);
  7992    save_auto();
  7993  
  7994    if (stop_soon) goto stop_fuzzing;
  7995  
  7996    /* Woop woop woop */
  7997  
  7998    if (!not_on_tty) {
  7999      sleep(4);
  8000      start_time += 4000;
  8001      if (stop_soon) goto stop_fuzzing;
  8002    }
  8003  
  8004    while (1) {
  8005  
  8006      u8 skipped_fuzz;
  8007  
  8008      cull_queue();
  8009  
  8010      if (!queue_cur) {
  8011  
  8012        queue_cycle++;
  8013        current_entry     = 0;
  8014        cur_skipped_paths = 0;
  8015        queue_cur         = queue;
  8016  
  8017        while (seek_to) {
  8018          current_entry++;
  8019          seek_to--;
  8020          queue_cur = queue_cur->next;
  8021        }
  8022  
  8023        show_stats();
  8024  
  8025        if (not_on_tty) {
  8026          ACTF("Entering queue cycle %llu.", queue_cycle);
  8027          fflush(stdout);
  8028        }
  8029  
  8030        /* If we had a full queue cycle with no new finds, try
  8031           recombination strategies next. */
  8032  
  8033        if (queued_paths == prev_queued) {
  8034  
  8035          if (use_splicing) cycles_wo_finds++; else use_splicing = 1;
  8036  
  8037        } else cycles_wo_finds = 0;
  8038  
  8039        prev_queued = queued_paths;
  8040  
  8041        if (sync_id && queue_cycle == 1 && getenv("AFL_IMPORT_FIRST"))
  8042          sync_fuzzers(use_argv);
  8043  
  8044      }
  8045  
  8046      skipped_fuzz = fuzz_one(use_argv);
  8047  
  8048      if (!stop_soon && sync_id && !skipped_fuzz) {
  8049        
  8050        if (!(sync_interval_cnt++ % SYNC_INTERVAL))
  8051          sync_fuzzers(use_argv);
  8052  
  8053      }
  8054  
  8055      if (!stop_soon && exit_1) stop_soon = 2;
  8056  
  8057      if (stop_soon) break;
  8058  
  8059      queue_cur = queue_cur->next;
  8060      current_entry++;
  8061  
  8062    }
  8063  
  8064    if (queue_cur) show_stats();
  8065  
  8066    write_bitmap();
  8067    write_stats_file(0, 0, 0);
  8068    save_auto();
  8069  
  8070  stop_fuzzing:
  8071  
  8072    SAYF(CURSOR_SHOW cLRD "\n\n+++ Testing aborted %s +++\n" cRST,
  8073         stop_soon == 2 ? "programmatically" : "by user");
  8074  
  8075    /* Running for more than 30 minutes but still doing first cycle? */
  8076  
  8077    if (queue_cycle == 1 && get_cur_time() - start_time > 30 * 60 * 1000) {
  8078  
  8079      SAYF("\n" cYEL "[!] " cRST
  8080             "Stopped during the first cycle, results may be incomplete.\n"
  8081             "    (For info on resuming, see %s/README.)\n", doc_path);
  8082  
  8083    }
  8084  
  8085    fclose(plot_file);
  8086    destroy_queue();
  8087    destroy_extras();
  8088    ck_free(target_path);
  8089    ck_free(sync_id);
  8090  
  8091    alloc_report();
  8092  
  8093    OKF("We're done here. Have a nice day!\n");
  8094  
  8095    exit(0);
  8096  
  8097  }
  8098  
  8099  #endif /* !AFL_LIB */