Home | History | Annotate | Download | only in tutorial
      1 /*
      2  * Copyright (C) 2009 Google Inc.
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  * http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 package tutorial;
     18 
     19 import com.google.caliper.BeforeExperiment;
     20 import com.google.caliper.Benchmark;
     21 import com.google.caliper.Param;
     22 
     23 /**
     24  * Caliper tutorial. To run the example benchmarks in this file:
     25  * {@code CLASSPATH=... [caliper_home]/caliper tutorial.Tutorial.Benchmark1}
     26  */
     27 public class Tutorial {
     28 
     29   /*
     30    * We begin the Caliper tutorial with the simplest benchmark you can write.
     31    * We'd like to know how efficient the method System.nanoTime() is.
     32    *
     33    * Notice:
     34    *
     35    *  - We write a class that extends com.google.caliper.Benchmark.
     36    *  - It contains a public instance method whose name begins with 'time' and
     37    *    which accepts a single 'int reps' parameter.
     38    *  - The body of the method simply executes the code we wish to measure,
     39    *    'reps' times.
     40    *
     41    * Example run:
     42    *
     43    *    $ CLASSPATH=build/classes/test caliper tutorial.Tutorial.Benchmark1
     44    *    [real-time results appear on this line]
     45    *
     46    *    Summary report for tutorial.Tutorial$Benchmark1:
     47    *
     48    *    Benchmark   ns
     49    *    ---------  ---
     50    *    NanoTime   233
     51    */
     52   public static class Benchmark1 {
     53     @Benchmark void timeNanoTime(int reps) {
     54       for (int i = 0; i < reps; i++) {
     55         System.nanoTime();
     56       }
     57     }
     58   }
     59 
     60   /*
     61    * Now let's compare two things: nanoTime() versus currentTimeMillis().
     62    * Notice:
     63    *
     64    *  - We simply add another method, following the same rules as the first.
     65    *
     66    * Example run output:
     67    *
     68    *   Benchmark           ns
     69    *   -----------------  ---
     70    *   NanoTime           248
     71    *   CurrentTimeMillis  118
     72    */
     73   public static class Benchmark2 {
     74     @Benchmark void timeNanoTime(int reps) {
     75       for (int i = 0; i < reps; i++) {
     76         System.nanoTime();
     77       }
     78     }
     79     @Benchmark void timeCurrentTimeMillis(int reps) {
     80       for (int i = 0; i < reps; i++) {
     81         System.currentTimeMillis();
     82       }
     83     }
     84   }
     85 
     86   /*
     87    * Let's try iterating over a large array. This seems simple enough, but
     88    * there is a problem!
     89    */
     90   public static class Benchmark3 {
     91     private final int[] array = new int[1000000];
     92 
     93     @SuppressWarnings("UnusedDeclaration") // IDEA tries to warn us!
     94     @Benchmark void timeArrayIteration_BAD(int reps) {
     95       for (int i = 0; i < reps; i++) {
     96         for (int ignoreMe : array) {}
     97       }
     98     }
     99   }
    100 
    101   /*
    102    * Caliper reported that the benchmark above ran in 4 nanoseconds.
    103    *
    104    * Wait, what?
    105    *
    106    * How can it possibly iterate over a million zeroes in 4 ns!?
    107    *
    108    * It is very important to sanity-check benchmark results with common sense!
    109    * In this case, we're indeed getting a bogus result. The problem is that the
    110    * Java Virtual Machine is too smart: it detected the fact that the loop was
    111    * producing no actual result, so it simply compiled it right out. The method
    112    * never looped at all. To fix this, we need to use a dummy result value.
    113    *
    114    * Notice:
    115    *
    116    *  - We simply change the 'time' method from 'void' to any return type we
    117    *    wish. Then we return a value that can't be known without actually
    118    *    performing the work, and thus we defeat the runtime optimizations.
    119    *  - We're no longer timing *just* the code we want to be testing - our
    120    *    result will now be inflated by the (small) cost of addition. This is an
    121    *    unfortunate fact of life with microbenchmarking. In fact, we were
    122    *    already inflated by the cost of an int comparison, "i < reps" as it was.
    123    *
    124    * With this change, Caliper should report a much more realistic value, more
    125    * on the order of an entire millisecond.
    126    */
    127   public static class Benchmark4 {
    128     private final int[] array = new int[1000000];
    129 
    130     @Benchmark int timeArrayIteration_fixed(int reps) {
    131       int dummy = 0;
    132       for (int i = 0; i < reps; i++) {
    133         for (int doNotIgnoreMe : array) {
    134           dummy += doNotIgnoreMe;
    135         }
    136       }
    137       return dummy; // framework ignores this, but it has served its purpose!
    138     }
    139   }
    140 
    141   /*
    142    * Now we'd like to know how various other *sizes* of arrays perform. We
    143    * don't want to have to cut and paste the whole benchmark just to provide a
    144    * different size. What we need is a parameter!
    145    *
    146    * When you run this benchmark the same way you ran the previous ones, you'll
    147    * now get an error: "No values provided for benchmark parameter 'size'".
    148    * You can provide the value requested at the command line like this:
    149    *
    150    *   [caliper_home]/caliper tutorial.Tutorial.Benchmark5 -Dsize=100}
    151    *
    152    * You'll see output like this:
    153    *
    154    *   Benchmark       size   ns
    155    *   --------------  ----  ---
    156    *   ArrayIteration   100   51
    157    *
    158    * Now that we've parameterized our benchmark, things are starting to get fun.
    159    * Try passing '-Dsize=10,100,1000' and see what happens!
    160    *
    161    *   Benchmark       size   ns
    162    *   --------------  ----  -----------------------------------
    163    *   ArrayIteration    10    7 |
    164    *   ArrayIteration   100   49 ||||
    165    *   ArrayIteration  1000  477 ||||||||||||||||||||||||||||||
    166    *
    167    */
    168   public static class Benchmark5 {
    169     @Param int size; // set automatically by framework
    170 
    171     private int[] array; // set by us, in setUp()
    172 
    173     @BeforeExperiment void setUp() {
    174       // @Param values are guaranteed to have been injected by now
    175       array = new int[size];
    176     }
    177 
    178     @Benchmark int timeArrayIteration(int reps) {
    179       int dummy = 0;
    180       for (int i = 0; i < reps; i++) {
    181         for (int doNotIgnoreMe : array) {
    182           dummy += doNotIgnoreMe;
    183         }
    184       }
    185       return dummy;
    186     }
    187   }
    188 }
    189