Rick

Rick
Rick

Wednesday, December 25, 2013

From File to java.util.Map. Boon faster than Jackson

Benchmark                                   Mode Thr     Count  Sec         Mean   Mean error    Units
i.g.j.f.BoonBenchMark.actionLabel          thrpt   8         5    1   208131.360    64075.346    ops/s
i.g.j.f.JacksonASTBenchmark.actionLabel    thrpt   8         5    1   150706.530     4497.339    ops/s

25% faster Boon!

i.g.j.f.BoonBenchMark.citmCatalog          thrpt   8         5    1      613.403       34.780    ops/s
i.g.j.f.JacksonASTBenchmark.citmCatalog    thrpt           5    1      332.803       17.120    ops/s

175% faster Boon!

i.g.j.f.BoonBenchMark.medium               thrpt   8         5    1   151640.130    96788.984    ops/s
i.g.j.f.JacksonASTBenchmark.medium         thrpt           5      125208.850    31949.124    ops/s


20% faster Boon!

i.g.j.f.BoonBenchMark.menu                 thrpt   8         5    1   209612.210     5704.342    ops/s
i.g.j.f.JacksonASTBenchmark.menu           thrpt           5      186255.893    75485.346    ops/s

10% Faster Boon!

i.g.j.f.BoonBenchMark.sgml                 thrpt   8         5    1   195752.990    17465.283    ops/s
i.g.j.f.JacksonASTBenchmark.sgml           thrpt           5      169619.163    73962.962    ops/s

15% Faster Boon!

i.g.j.f.BoonBenchMark.webxml               thrpt   8         5    1   126902.333    15238.895    ops/s

i.g.j.f.JacksonASTBenchmark.webxml         thrpt           5    1    82469.550    12080.954    ops/s

30% Faster Boon!

i.g.j.f.BoonBenchMark.widget               thrpt   8         5    1   194273.057     3714.143    ops/s
i.g.j.f.JacksonASTBenchmark.widget         thrpt   8         5    1   158183.903    58280.275    ops/s

20% Faster Boon





public class JacksonASTBenchmark {


    private static final ObjectMapper JACKSON_MAPPER = new ObjectMapper();


    public static final String FILE_ACTION_LABEL = ( "data/actionLabel.json" );
    public static final String FILE_CITM_CATALOG = ( "data/citm_catalog.json" );
    public static final String FILE_MEDIUM = ( "data/medium.json" );
    public static final String FILE_MENU = ( "data/menu.json" );
    public static final String FILE_SGML = ( "data/sgml.json" );
    public static final String FILE_WEBXML = ( "data/webxml.json" );
    public static final String FILE_WIDGET = ( "data/widget.json" );


    private Object parse(String fileName) throws Exception {
            return JACKSON_MAPPER.readTree ( new File (fileName) );
    }

    @GenerateMicroBenchmark
    @OutputTimeUnit ( TimeUnit.SECONDS)
    public void actionLabel(BlackHole bh) throws Exception {
        bh.consume( parse( FILE_ACTION_LABEL ) );
    }


    @GenerateMicroBenchmark
    @OutputTimeUnit(TimeUnit.SECONDS)
    public void citmCatalog(BlackHole bh) throws Exception {
        bh.consume( parse( FILE_CITM_CATALOG ) );
    }

    @GenerateMicroBenchmark
    @OutputTimeUnit(TimeUnit.SECONDS)
    public void medium(BlackHole bh) throws Exception {
        bh.consume( parse( FILE_MEDIUM ) );
    }

    @GenerateMicroBenchmark
    @OutputTimeUnit(TimeUnit.SECONDS)
    public void menu(BlackHole bh) throws Exception {
        bh.consume( parse( FILE_MENU ) );
    }

    @GenerateMicroBenchmark
    @OutputTimeUnit(TimeUnit.SECONDS)
    public void sgml(BlackHole bh) throws Exception {
        bh.consume( parse( FILE_SGML ) );
    }

    @GenerateMicroBenchmark
    @OutputTimeUnit(TimeUnit.SECONDS)
    public void webxml(BlackHole bh) throws Exception {
        bh.consume( parse( FILE_WEBXML ) );
    }

    @GenerateMicroBenchmark
    @OutputTimeUnit(TimeUnit.SECONDS)
    public void widget(BlackHole bh) throws Exception {
        bh.consume( parse( FILE_WIDGET ) );
    }


}

package io.gatling.jsonbenchmark.file;

import org.boon.json.JsonParser;
import org.boon.json.JsonParserFactory;
import org.openjdk.jmh.annotations.GenerateMicroBenchmark;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.logic.BlackHole;

import java.util.Map;
import java.util.concurrent.TimeUnit;

@State
public class BoonBenchMark {

    public static final String FILE_ACTION_LABEL = ( "data/actionLabel.json" );
    public static final String FILE_CITM_CATALOG = ( "data/citm_catalog.json" );
    public static final String FILE_MEDIUM = ( "data/medium.json" );
    public static final String FILE_MENU = ( "data/menu.json" );
    public static final String FILE_SGML = ( "data/sgml.json" );
    public static final String FILE_WEBXML = ( "data/webxml.json" );
    public static final String FILE_WIDGET = ( "data/widget.json" );


    private final JsonParser parser = new JsonParserFactory ().create ();

    private Object parse(String fileName) throws Exception {
        return parser.parseFile ( Map.class, fileName);
    }

    @GenerateMicroBenchmark
    @OutputTimeUnit ( TimeUnit.SECONDS)
    public void actionLabel(BlackHole bh) throws Exception {
        bh.consume( parse( FILE_ACTION_LABEL ) );
    }


    @GenerateMicroBenchmark
    @OutputTimeUnit(TimeUnit.SECONDS)
    public void citmCatalog(BlackHole bh) throws Exception {
        bh.consume( parse( FILE_CITM_CATALOG ) );
    }

    @GenerateMicroBenchmark
    @OutputTimeUnit(TimeUnit.SECONDS)
    public void medium(BlackHole bh) throws Exception {
        bh.consume( parse( FILE_MEDIUM ) );
    }

    @GenerateMicroBenchmark
    @OutputTimeUnit(TimeUnit.SECONDS)
    public void menu(BlackHole bh) throws Exception {
        bh.consume( parse( FILE_MENU ) );
    }

    @GenerateMicroBenchmark
    @OutputTimeUnit(TimeUnit.SECONDS)
    public void sgml(BlackHole bh) throws Exception {
        bh.consume( parse( FILE_SGML ) );
    }

    @GenerateMicroBenchmark
    @OutputTimeUnit(TimeUnit.SECONDS)
    public void webxml(BlackHole bh) throws Exception {
        bh.consume( parse( FILE_WEBXML ) );
    }

    @GenerateMicroBenchmark
    @OutputTimeUnit(TimeUnit.SECONDS)
    public void widget(BlackHole bh) throws Exception {
        bh.consume( parse( FILE_WIDGET ) );
    }

}

Boon JSON parser seems to be the fastest

I'll publish object serialization numbers later. Last I checked it was quite a bit faster than the rest.

Here are some benchmark numbers parsing various sample JSON files from json.org and a sample that a user of Boon JSON sent in.






To go from a 2K JSON String to a Map, Boon is 2x or so faster. Yes you say, but how is Boon at larger files? 


The above graph shows Boon, GSON and Jackson parsing a 1.7 MB string. Boon is up to twice as fast. 

Yes you say, but how does boon do at parsing byte[], reader, inputStream, etc.?  Pretty much Boon wins in every category for all files that I have tested, which were quite a few.

It took a bit of doing. Boon has an I/O lib which I employed to speed up the inputStream and reader support. Boon also has a relaxed mode JSON parser that allows no-quotes etc., it is just as fast as the strict parser. 

The above is not a complete list of tests. 

You don't have to take my word for it. The benchmarks are online. https://github.com/RichardHightower/json-parsers-benchmark.

Here are some numbers to go with the graphs.

12/25/13
1.7 MB JSON String

Benchmark                                      Mode Thr     Count  Sec         Mean   Mean error    Units
i.g.j.s.BoonBenchmark.citmCatalog             thrpt   8         5    1      873.970       94.240    ops/s
i.g.j.s.GSONBenchmark.citmCatalog             thrpt   8         5    1      410.783      217.476    ops/s
i.g.j.s.JacksonASTBenchmark.citmCatalog       thrpt   8         5    1      294.690       47.593    ops/s
i.g.j.s.JacksonObjectBenchmark.citmCatalog    thrpt   8         5    1      305.787       29.107    ops/s
i.g.j.s.JsonSmartBenchmark.citmCatalog        thrpt   8         5    1      311.063       29.646    ops/s

2K JSON String
Benchmark                                 Mode Thr     Count  Sec         Mean   Mean error    Units
i.g.j.s.BoonBenchmark.medium             thrpt   8         5    1   816416.973    13231.453    ops/s
i.g.j.s.GSONBenchmark.medium             thrpt   8         5    1   341148.250    18117.075    ops/s
i.g.j.s.JacksonASTBenchmark.medium       thrpt   8         5    1   263167.610   147495.795    ops/s
i.g.j.s.JacksonObjectBenchmark.medium    thrpt   8         5    1   282024.617     6922.138    ops/s
i.g.j.s.JsonSmartBenchmark.medium        thrpt   8         5    1   296944.993     7852.929    ops/s

1.7 MB JSON byte[]
Benchmark                                      Mode Thr     Count  Sec         Mean   Mean error    Units
i.g.j.b.BoonBenchmark.citmCatalog             thrpt   8         5    1      628.710       91.286    ops/s
i.g.j.b.GSONBenchmark.citmCatalog             thrpt   8         5    1      439.203      120.003    ops/s
i.g.j.b.JacksonASTBenchmark.citmCatalog       thrpt   8         5    1      381.350       97.841    ops/s
i.g.j.b.JacksonObjectBenchmark.citmCatalog    thrpt   8         5    1      402.537        3.634    ops/s
i.g.j.b.JsonSmartBenchmark.citmCatalog        thrpt   8         5    1      341.940       18.847    ops/s

2K JSON byte[]
Benchmark                                 Mode Thr     Count  Sec         Mean   Mean error    Units
i.g.j.b.BoonBenchmark.medium             thrpt   8         5    1   648162.887    18697.319    ops/s
i.g.j.b.GSONBenchmark.medium             thrpt   8         5    1   260145.827     5934.588    ops/s
i.g.j.b.JacksonASTBenchmark.medium       thrpt   8         5    1   289863.140    48969.875    ops/s
i.g.j.b.JacksonObjectBenchmark.medium    thrpt   8         5    1   289010.543    11205.881    ops/s
i.g.j.b.JsonSmartBenchmark.medium        thrpt   8         5    1   262873.957     3901.193    ops/s
1.7 MB JSON Inputstream
Benchmark                                                Mode Thr     Count  Sec         Mean   Mean error    Units
i.g.j.inputStream.BoonBenchmark.citmCatalog             thrpt   8         5    1      626.907       31.450    ops/s
i.g.j.inputStream.GSONBenchmark.citmCatalog             thrpt   8         5    1      426.120       13.946    ops/s
i.g.j.inputStream.JacksonASTBenchmark.citmCatalog       thrpt   8         5    1      376.820      115.502    ops/s
i.g.j.inputStream.JacksonObjectBenchmark.citmCatalog    thrpt   8         5    1      360.850       89.648    ops/s


2K file JSON Inputstream
Benchmark                                           Mode Thr     Count  Sec         Mean   Mean error    Units
i.g.j.inputStream.BoonBenchmark.medium             thrpt   8         5    1   218730.830     5262.596    ops/s
i.g.j.inputStream.GSONBenchmark.medium             thrpt   8         5    1   151255.407     4486.414    ops/s
i.g.j.inputStream.JacksonASTBenchmark.medium       thrpt   8         5    1   156512.527   107512.401    ops/s
i.g.j.inputStream.JacksonObjectBenchmark.medium    thrpt   8         5    1   160793.407     4056.790    ops/s

1.7 MB JSON Reader
Benchmark                                      Mode Thr     Count  Sec         Mean   Mean error    Units
i.g.j.r.BoonBenchmark.citmCatalog             thrpt   8         5    1      615.313       63.716    ops/s
i.g.j.r.GSONBenchmark.citmCatalog             thrpt   8         5    1      411.847       18.978    ops/s
i.g.j.r.JacksonASTBenchmark.citmCatalog       thrpt   8         5    1      264.727      118.541    ops/s
i.g.j.r.JacksonObjectBenchmark.citmCatalog    thrpt   8         5    1      246.783       93.409    ops/s
i.g.j.r.JsonSmartBenchmark.citmCatalog        thrpt   8         5    1      151.097        3.502    ops/s

2k JSON Reader
Benchmark                                 Mode Thr     Count  Sec         Mean   Mean error    Units
i.g.j.r.BoonBenchmark.medium             thrpt   8         5    1   185075.093     6528.567    ops/s
i.g.j.r.GSONBenchmark.medium             thrpt   8         5    1   134025.760     3385.134    ops/s
i.g.j.r.JacksonASTBenchmark.medium       thrpt   8         5    1   107676.323    60674.421    ops/s
i.g.j.r.JacksonObjectBenchmark.medium    thrpt   8         5    1   116903.500     3206.994    ops/s
i.g.j.r.JsonSmartBenchmark.medium        thrpt   8         5    1    77898.710     2434.773    ops/s
Other JSON.org examples:
webxml json.org example
Benchmark                                 Mode Thr     Count  Sec         Mean   Mean error    Units
i.g.j.s.BoonBenchmark.webxml             thrpt   8         5    1   421016.033    13428.790    ops/s
i.g.j.s.GSONBenchmark.webxml             thrpt   8         5    1   143801.263     7870.384    ops/s
i.g.j.s.JacksonASTBenchmark.webxml       thrpt   8         5    1   125981.563    36753.717    ops/s
i.g.j.s.JacksonObjectBenchmark.webxml    thrpt   8         5    1   130069.577    25055.300    ops/s
i.g.j.s.JsonSmartBenchmark.webxml        thrpt   8         5    1   132422.153    10254.167    ops/s
Boon 3X faster
sgml json.org example
Benchmark                               Mode Thr     Count  Sec         Mean   Mean error    Units
i.g.j.s.BoonBenchmark.sgml             thrpt   8         5    1  1846015.410   101291.991    ops/s
i.g.j.s.GSONBenchmark.sgml             thrpt   8         5    1   988186.433    35337.393    ops/s
i.g.j.s.JacksonASTBenchmark.sgml       thrpt   8         5    1   680502.597   289591.197    ops/s
i.g.j.s.JacksonObjectBenchmark.sgml    thrpt   8         5    1   709969.980    29621.959    ops/s
i.g.j.s.JsonSmartBenchmark.sgml        thrpt   8         5    1   796387.753    22697.397    ops/s
Boon 2x faster
actionLabel json.org example
Benchmark                                      Mode Thr     Count  Sec         Mean   Mean error    Units
i.g.j.s.BoonBenchmark.actionLabel             thrpt   8         5    1  1109285.703    78440.576    ops/s
i.g.j.s.GSONBenchmark.actionLabel             thrpt   8         5    1   429742.283    10097.416    ops/s
i.g.j.s.JacksonASTBenchmark.actionLabel       thrpt   8         5    1   421132.630    10514.598    ops/s
i.g.j.s.JacksonObjectBenchmark.actionLabel    thrpt   8         5    1   403535.453    16382.734    ops/s
i.g.j.s.JsonSmartBenchmark.actionLabel        thrpt   8         5    1   453847.673    25607.331    ops/s
Boon over 2x faster
menu json.org example
Benchmark                               Mode Thr     Count  Sec         Mean   Mean error    Units
i.g.j.s.BoonBenchmark.menu             thrpt   8         5    1  2582429.350   700873.986    ops/s
i.g.j.s.GSONBenchmark.menu             thrpt   8         5    1  1240234.083    22312.822    ops/s
i.g.j.s.JacksonASTBenchmark.menu       thrpt   8         5    1  1242132.793    19273.775    ops/s
i.g.j.s.JacksonObjectBenchmark.menu    thrpt   8         5    1  1141071.207    36489.605    ops/s
i.g.j.s.JsonSmartBenchmark.menu        thrpt   8         5    1  1463778.480    57490.408    ops/s
Boon 2x faster.
Benchmark                                 Mode Thr     Count  Sec         Mean   Mean error    Units
i.g.j.s.BoonBenchmark.widget             thrpt   8         5    1  1485476.970    79222.003    ops/s
i.g.j.s.GSONBenchmark.widget             thrpt   8         5    1   810153.490    20079.953    ops/s
i.g.j.s.JacksonASTBenchmark.widget       thrpt   8         5    1   724349.650   284735.196    ops/s
i.g.j.s.JacksonObjectBenchmark.widget    thrpt   8         5    1   705271.907    42304.730    ops/s
i.g.j.s.JsonSmartBenchmark.widget        thrpt   8         5    1   728506.560    29680.028    ops/s
Boon is damn fast.

It has many modes to fit various mediums depending on your goals (small footprint, direct byte parse, etc.). Don't worry, boon is not hard to use. It just works.

Benchmark                                      Mode Thr     Count  Sec         Mean   Mean error    Units
i.g.j.b.BoonAsciiBytes.actionLabel            thrpt   8         5    1   302902.677    21981.467    ops/s
i.g.j.b.BoonAsciiBytes.citmCatalog            thrpt   8         5    1      628.150       26.607    ops/s
i.g.j.b.BoonAsciiBytes.medium                 thrpt   8         5    1   320658.760    38751.800    ops/s
i.g.j.b.BoonAsciiBytes.menu                   thrpt   8         5    1  2081501.213   113660.611    ops/s
i.g.j.b.BoonAsciiBytes.sgml                   thrpt   8         5    1   998463.200    31916.216    ops/s
i.g.j.b.BoonAsciiBytes.small                  thrpt   8         5    1 11095898.987   534428.831    ops/s
i.g.j.b.BoonAsciiBytes.webxml                 thrpt   8         5    1   148348.463     5512.808    ops/s
i.g.j.b.BoonAsciiBytes.widget                 thrpt   8         5    1   879580.747    14598.011    ops/s
i.g.j.b.BoonBenchMarkLax.actionLabel          thrpt   8         5    1   806689.270    28745.917    ops/s
i.g.j.b.BoonBenchMarkLax.citmCatalog          thrpt   8         5    1      633.087       77.455    ops/s
i.g.j.b.BoonBenchMarkLax.medium               thrpt   8         5    1   569042.093    61404.916    ops/s
i.g.j.b.BoonBenchMarkLax.menu                 thrpt   8         5    1  2600248.763   105320.234    ops/s
i.g.j.b.BoonBenchMarkLax.sgml                 thrpt   8         5    1  1476412.973   284184.058    ops/s
i.g.j.b.BoonBenchMarkLax.small                thrpt   8         5    1 13336195.790  1442531.930    ops/s
i.g.j.b.BoonBenchMarkLax.webxml               thrpt   8         5    1   270060.157     6539.573    ops/s
i.g.j.b.BoonBenchMarkLax.widget               thrpt   8         5    1  1262768.937    51676.215    ops/s
i.g.j.b.BoonBenchMarkUTF8Bytes.actionLabel    thrpt   8         5    1   185209.077   670100.163    ops/s
i.g.j.b.BoonBenchMarkUTF8Bytes.citmCatalog    thrpt   8         5    1      379.917       30.037    ops/s
i.g.j.b.BoonBenchMarkUTF8Bytes.medium         thrpt   8         5    1   217107.220     5247.417    ops/s
i.g.j.b.BoonBenchMarkUTF8Bytes.menu           thrpt   8         5    1  1319969.417    79745.189    ops/s
i.g.j.b.BoonBenchMarkUTF8Bytes.sgml           thrpt   8         5    1   688184.650    34033.100    ops/s
i.g.j.b.BoonBenchMarkUTF8Bytes.small          thrpt   8         5    1  7486431.520  1228519.698    ops/s
i.g.j.b.BoonBenchMarkUTF8Bytes.webxml         thrpt   8         5    1   104078.393    15332.908    ops/s
i.g.j.b.BoonBenchMarkUTF8Bytes.widget         thrpt   8         5    1   526663.853   214399.644    ops/s
i.g.j.b.BoonCharArray.actionLabel             thrpt   8         5    1   407056.423   149970.346    ops/s
i.g.j.b.BoonCharArray.citmCatalog             thrpt   8         5    1      391.130       55.374    ops/s
i.g.j.b.BoonCharArray.medium                  thrpt   8         5    1   320601.040    83669.815    ops/s
i.g.j.b.BoonCharArray.menu                    thrpt   8         5    1  1686792.320   112046.346    ops/s
i.g.j.b.BoonCharArray.sgml                    thrpt   8         5    1  1052574.220    44541.919    ops/s
i.g.j.b.BoonCharArray.small                   thrpt   8         5    1  8071292.173   663678.327    ops/s
i.g.j.b.BoonCharArray.webxml                  thrpt   8         5    1   181207.910    32126.919    ops/s
i.g.j.b.BoonCharArray.widget                  thrpt   8         5    1   878541.030   137067.187    ops/s
i.g.j.b.BoonFastParser.actionLabel            thrpt   8         5    1   601141.330    77361.337    ops/s
i.g.j.b.BoonFastParser.citmCatalog            thrpt   8         5    1      429.987      198.559    ops/s
i.g.j.b.BoonFastParser.medium                 thrpt   8         5    1   462712.293   118751.410    ops/s
i.g.j.b.BoonFastParser.menu                   thrpt   8         5    1  1981728.817   239514.140    ops/s
i.g.j.b.BoonFastParser.sgml                   thrpt   8         5    1  1117030.450   209863.168    ops/s
i.g.j.b.BoonFastParser.small                  thrpt   8         5    1 10197156.600   169372.770    ops/s
i.g.j.b.BoonFastParser.webxml                 thrpt   8         5    1   230100.983    62048.894    ops/s
i.g.j.b.BoonFastParser.widget                 thrpt   8         5    1  1242538.033   169654.975    ops/s
i.g.j.b.BoonStringDirect.actionLabel          thrpt   8         5    1   461358.763    45184.611    ops/s
i.g.j.b.BoonStringDirect.citmCatalog          thrpt   8         5    1      332.883       25.544    ops/s
i.g.j.b.BoonStringDirect.medium               thrpt   8         5    1   323354.063    18819.168    ops/s
i.g.j.b.BoonStringDirect.menu                 thrpt   8         5    1  1668149.967    52797.831    ops/s
i.g.j.b.BoonStringDirect.sgml                 thrpt   8         5    1   933777.700    77093.442    ops/s
i.g.j.b.BoonStringDirect.small                thrpt   8         5    1  7111685.283   205942.968    ops/s
i.g.j.b.BoonStringDirect.webxml               thrpt   8         5    1   154376.677    50416.916    ops/s
i.g.j.b.BoonStringDirect.widget               thrpt   8         5    1   575450.757    45103.058    ops/s

Thursday, December 19, 2013

JsonPath.. Decides Boon is the fastest way to do JsonPath

Independently verified. Boon is faster than Jackson for JsonParsing.
"Update with latest new Boon parsers, rocking!"
"Boon now seems to be doing an incredible job for this use case"
Benchmark                                                                     Mode Thr     Count  Sec         Mean   Mean error    Units
GatlingBoonJsonParserForJsonPathBenchmark.parseBytesPrecompiledRoundRobin    thrpt   8        20    1   150867,743     8082,173    ops/s
GatlingBoonJsonParserForJsonPathBenchmark.parseCharsPrecompiledRoundRobin    thrpt   8        20    1   146406,116     8084,484    ops/s
GatlingBoonFastBenchmark.parseBytesPrecompiledRoundRobin                     thrpt   8        20    1   132918,858     6284,514    ops/s
GatlingBoonFastBenchmark.parseCharsPrecompiledRoundRobin                     thrpt   8        20    1   126100,990     4843,557    ops/s
GatlingJacksonBenchmark.parseBytesPrecompiledRoundRobin                      thrpt   8        20    1    98950,368     4735,560    ops/s
GatlingJacksonBenchmark.parseStringPrecompiledRoundRobin                     thrpt   8        20    1    76469,223     4301,819    ops/s
GatlingJsonSmartBenchmark.parseStringPrecompiledRoundRobin                   thrpt   8        20    1    74431,640     3237,084    ops/s
JaywayJacksonBenchmark.parseBytesPrecompiledRoundRobin                       thrpt   8        20    1    54236,658    16471,075    ops/s
JaywayJacksonBenchmark.parseStringPrecompiledRoundRobin                      thrpt   8        20    1    43793,581    13813,314    ops/s



The above benchmark was from direct bytes. You should see what Boon can do with char[] and String. It is even faster with those. 

Wednesday, December 18, 2013

LRUCache in Java using just java APIS

profile for RickHigh at Stack Overflow, Q&A for professional and enthusiast programmers
This is round two.
The first round was what I came up with then I reread the comments with the domain a bit more ingrained in my head.
So here is the simplest version with a unit test that shows it works based on some other versions.
First the non-concurrent version:
import java.util.LinkedHashMap;
import java.util.Map;

public class LruSimpleCache<K, V> implements LruCache <K, V>{

    Map<K, V> map = new LinkedHashMap (  );


    public LruSimpleCache (final int limit) {
           map = new LinkedHashMap <K, V> (16, 0.75f, true) {
               @Override
               protected boolean removeEldestEntry(final Map.Entry<K, V> eldest) {
                   return super.size() > limit;
               }
           };
    }
    @Override
    public void put ( K key, V value ) {
        map.put ( key, value );
    }

    @Override
    public V get ( K key ) {
        return map.get(key);
    }

    //For testing only
    @Override
    public V getSilent ( K key ) {
        V value =  map.get ( key );
        if (value!=null) {
            map.remove ( key );
            map.put(key, value);
        }
        return value;
    }

    @Override
    public void remove ( K key ) {
        map.remove ( key );
    }

    @Override
    public int size () {
        return map.size ();
    }

    public String toString() {
        return map.toString ();
    }


}
The true flag will track the access of gets and puts. See JavaDocs. The removeEdelstEntry without the true flag to the constructor would just implement a FIFO cache (see notes below on FIFO and removeEldestEntry).
Here is the test that proves it works as an LRU cache:
public class LruSimpleTest {

    @Test
    public void test () {
        LruCache <Integer, Integer> cache = new LruSimpleCache<> ( 4 );


        cache.put ( 0, 0 );
        cache.put ( 1, 1 );

        cache.put ( 2, 2 );
        cache.put ( 3, 3 );


        boolean ok = cache.size () == 4 || die ( "size" + cache.size () );


        cache.put ( 4, 4 );
        cache.put ( 5, 5 );
        ok |= cache.size () == 4 || die ( "size" + cache.size () );
        ok |= cache.getSilent ( 2 ) == 2 || die ();
        ok |= cache.getSilent ( 3 ) == 3 || die ();
        ok |= cache.getSilent ( 4 ) == 4 || die ();
        ok |= cache.getSilent ( 5 ) == 5 || die ();


        cache.get ( 2 );
        cache.get ( 3 );
        cache.put ( 6, 6 );
        cache.put ( 7, 7 );
        ok |= cache.size () == 4 || die ( "size" + cache.size () );
        ok |= cache.getSilent ( 2 ) == 2 || die ();
        ok |= cache.getSilent ( 3 ) == 3 || die ();
        ok |= cache.getSilent ( 4 ) == null || die ();
        ok |= cache.getSilent ( 5 ) == null || die ();


        if ( !ok ) die ();

    }
Now for the concurrent version...
package org.boon.cache;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;

public class LruSimpleConcurrentCache<K, V> implements LruCache<K, V> {

    final CacheMap<K, V>[] cacheRegions;


    private static class CacheMap<K, V> extends LinkedHashMap<K, V> {
        private final ReadWriteLock readWriteLock;
        private final int limit;

        CacheMap ( final int limit, boolean fair ) {
            super ( 16, 0.75f, true );
            this.limit = limit;
            readWriteLock = new ReentrantReadWriteLock ( fair );

        }

        protected boolean removeEldestEntry ( final Map.Entry<K, V> eldest ) {
            return super.size () > limit;
        }


        @Override
        public V put ( K key, V value ) {
            readWriteLock.writeLock ().lock ();

            V old;
            try {

                old = super.put ( key, value );
            } finally {
                readWriteLock.writeLock ().unlock ();
            }
            return old;

        }


        @Override
        public V get ( Object key ) {
            readWriteLock.writeLock ().lock ();
            V value;

            try {

                value = super.get ( key );
            } finally {
                readWriteLock.writeLock ().unlock ();
            }
            return value;
        }

        @Override
        public V remove ( Object key ) {

            readWriteLock.writeLock ().lock ();
            V value;

            try {

                value = super.remove ( key );
            } finally {
                readWriteLock.writeLock ().unlock ();
            }
            return value;

        }

        public V getSilent ( K key ) {
            readWriteLock.writeLock ().lock ();

            V value;

            try {

                value = this.get ( key );
                if ( value != null ) {
                    this.remove ( key );
                    this.put ( key, value );
                }
            } finally {
                readWriteLock.writeLock ().unlock ();
            }
            return value;

        }

        public int size () {
            readWriteLock.readLock ().lock ();
            int size = -1;
            try {
                size = super.size ();
            } finally {
                readWriteLock.readLock ().unlock ();
            }
            return size;
        }

        public String toString () {
            readWriteLock.readLock ().lock ();
            String str;
            try {
                str = super.toString ();
            } finally {
                readWriteLock.readLock ().unlock ();
            }
            return str;
        }


    }

    public LruSimpleConcurrentCache ( final int limit, boolean fair ) {
        int cores = Runtime.getRuntime ().availableProcessors ();
        int stripeSize = cores < 2 ? 4 : cores * 2;
        cacheRegions = new CacheMap[ stripeSize ];
        for ( int index = 0; index < cacheRegions.length; index++ ) {
            cacheRegions[ index ] = new CacheMap<> ( limit / cacheRegions.length, fair );
        }
    }

    public LruSimpleConcurrentCache ( final int concurrency, final int limit, boolean fair ) {

        cacheRegions = new CacheMap[ concurrency ];
        for ( int index = 0; index < cacheRegions.length; index++ ) {
            cacheRegions[ index ] = new CacheMap<> ( limit / cacheRegions.length, fair );
        }
    }

    private int stripeIndex ( K key ) {
        int hashCode = key.hashCode () * 31;
        return hashCode % ( cacheRegions.length );
    }

    private CacheMap<K, V> map ( K key ) {
        return cacheRegions[ stripeIndex ( key ) ];
    }

    @Override
    public void put ( K key, V value ) {

        map ( key ).put ( key, value );
    }

    @Override
    public V get ( K key ) {
        return map ( key ).get ( key );
    }

    //For testing only
    @Override
    public V getSilent ( K key ) {
        return map ( key ).getSilent ( key );

    }

    @Override
    public void remove ( K key ) {
        map ( key ).remove ( key );
    }

    @Override
    public int size () {
        int size = 0;
        for ( CacheMap<K, V> cache : cacheRegions ) {
            size += cache.size ();
        }
        return size;
    }

    public String toString () {

        StringBuilder builder = new StringBuilder ();
        for ( CacheMap<K, V> cache : cacheRegions ) {
            builder.append ( cache.toString () ).append ( '\n' );
        }

        return builder.toString ();
    }


}
You can see why I cover the non-concurrent version first. The above attempts to create some stripes to reduce lock contention. So we it hashes the key and then looks up that hash to find the actual cache. This makes the limit size more of a suggestion/rough guess within a fair amount of error depending on how well spread your keys hash algorithm is.
Here is the test to show that the concurrent version probably works. :) (Test under fire would be the real way).
public class SimpleConcurrentLRUCache {


    @Test
    public void test () {
        LruCache <Integer, Integer> cache = new LruSimpleConcurrentCache<> ( 1, 4, false );


        cache.put ( 0, 0 );
        cache.put ( 1, 1 );

        cache.put ( 2, 2 );
        cache.put ( 3, 3 );


        boolean ok = cache.size () == 4 || die ( "size" + cache.size () );


        cache.put ( 4, 4 );
        cache.put ( 5, 5 );

        puts (cache);
        ok |= cache.size () == 4 || die ( "size" + cache.size () );
        ok |= cache.getSilent ( 2 ) == 2 || die ();
        ok |= cache.getSilent ( 3 ) == 3 || die ();
        ok |= cache.getSilent ( 4 ) == 4 || die ();
        ok |= cache.getSilent ( 5 ) == 5 || die ();


        cache.get ( 2 );
        cache.get ( 3 );
        cache.put ( 6, 6 );
        cache.put ( 7, 7 );
        ok |= cache.size () == 4 || die ( "size" + cache.size () );
        ok |= cache.getSilent ( 2 ) == 2 || die ();
        ok |= cache.getSilent ( 3 ) == 3 || die ();

        cache.put ( 8, 8 );
        cache.put ( 9, 9 );

        ok |= cache.getSilent ( 4 ) == null || die ();
        ok |= cache.getSilent ( 5 ) == null || die ();


        puts (cache);


        if ( !ok ) die ();

    }


    @Test
    public void test2 () {
        LruCache <Integer, Integer> cache = new LruSimpleConcurrentCache<> ( 400, false );


        cache.put ( 0, 0 );
        cache.put ( 1, 1 );

        cache.put ( 2, 2 );
        cache.put ( 3, 3 );


        for (int index =0 ; index < 5_000; index++) {
            cache.get(0);
            cache.get ( 1 );
            cache.put ( 2, index  );
            cache.put ( 3, index );
            cache.put(index, index);
        }

        boolean ok = cache.getSilent ( 0 ) == 0 || die ();
        ok |= cache.getSilent ( 1 ) == 1 || die ();
        ok |= cache.getSilent ( 2 ) != null || die ();
        ok |= cache.getSilent ( 3 ) != null || die ();

        ok |= cache.size () < 600 || die();
        if ( !ok ) die ();



    }

}
This is the last post.. The first post I deleted as it was a LFU not an LRU cache.
I thought I would give this another go. I was trying trying to come up with the simplest version of an LRU cache using the standard JDK w/o too much implementation.
Here is what I came up with. My first attempt was a bit of a disaster as I implemented a LFU instead of and LRU, and then I added FIFO, and LRU support to it... and then I realized it was becoming a monster. Then I started talking to my buddy John who was barely interested, and then I described at deep length how I implemented an LFU, LRU and FIFO and how you could switch it with a simple ENUM arg, and then I realized that all I really wanted was a simple LRU. So ignore the earlier post from me, and let me know if you want to see an LRU/LFU/FIFO cache that is switchable via an enum... no? Ok.. here he go.
The simplest possible LRU using just the JDK. I implemented both a concurrent version and a non-concurrent version.
I created a common interface (it is minimalism so likely missing a few features that you would like but it works for my use cases, but let if you would like to see feature XYZ let me know... I live to write code.).
public interface LruCache<KEY, VALUE> {
    void put ( KEY key, VALUE value );

    VALUE get ( KEY key );

    VALUE getSilent ( KEY key );

    void remove ( KEY key );

    int size ();
}
You may wonder what getSilent is. I use this for testing. getSilent does not change LRU score of an item.
First the non-concurrent one....
import java.util.Deque;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.Map;

public class LruCacheNormal<KEY, VALUE> implements LruCache<KEY,VALUE> {

    Map<KEY, VALUE> map = new HashMap<> ();
    Deque<KEY> queue = new LinkedList<> ();
    final int limit;


    public LruCacheNormal ( int limit ) {
        this.limit = limit;
    }

    public void put ( KEY key, VALUE value ) {
        VALUE oldValue = map.put ( key, value );

        /*If there was already an object under this key,
         then remove it before adding to queue
         Frequently used keys will be at the top so the search could be fast.
         */
        if ( oldValue != null ) {
            queue.removeFirstOccurrence ( key );
        }
        queue.addFirst ( key );

        if ( map.size () > limit ) {
            final KEY removedKey = queue.removeLast ();
            map.remove ( removedKey );
        }

    }


    public VALUE get ( KEY key ) {

        /* Frequently used keys will be at the top so the search could be fast.*/
        queue.removeFirstOccurrence ( key );
        queue.addFirst ( key );
        return map.get ( key );
    }


    public VALUE getSilent ( KEY key ) {

        return map.get ( key );
    }

    public void remove ( KEY key ) {

        /* Frequently used keys will be at the top so the search could be fast.*/
        queue.removeFirstOccurrence ( key );
        map.remove ( key );
    }

    public int size () {
        return map.size ();
    }

    public String toString() {
        return map.toString ();
    }
}
The queue.removeFirstOccurrence is a potentially expensive operation if you have a large cache. One could take LinkedList as an example and add a reverse lookup hash map from element to node to make remove operations A LOT FASTER and more consistent. I started too, but then realized I don't need it. But... maybe...
When put is called, the key gets added to the queue. When get is called, the key gets removed and re-added to the top of the queue.
If your cache is small and the building an item is expensive then this should be a good cache. If your cache is really large, then the linear search could be a bottle neck especially if you don't have hot areas of cache. The more intense the hot spots, the faster the linear search as hot items are always at the top of the linear search. Anyway... what is needed for this to go faster is write another LinkedList that has a remove operation that has reverse element to node lookup for remove, then removing would be about as fast as removing a key from a hash map.
If you have a cache under 1,000 items, this should work out fine.
Here is a simple test to show its operations in action.
public class LruCacheTest {

    @Test
    public void test () {
        LruCache<Integer, Integer> cache = new LruCacheNormal<> ( 4 );


        cache.put ( 0, 0 );
        cache.put ( 1, 1 );

        cache.put ( 2, 2 );
        cache.put ( 3, 3 );


        boolean ok = cache.size () == 4 || die ( "size" + cache.size () );
        ok |= cache.getSilent ( 0 ) == 0 || die ();
        ok |= cache.getSilent ( 3 ) == 3 || die ();


        cache.put ( 4, 4 );
        cache.put ( 5, 5 );
        ok |= cache.size () == 4 || die ( "size" + cache.size () );
        ok |= cache.getSilent ( 0 ) == null || die ();
        ok |= cache.getSilent ( 1 ) == null || die ();
        ok |= cache.getSilent ( 2 ) == 2 || die ();
        ok |= cache.getSilent ( 3 ) == 3 || die ();
        ok |= cache.getSilent ( 4 ) == 4 || die ();
        ok |= cache.getSilent ( 5 ) == 5 || die ();

        if ( !ok ) die ();

    }
}
The last LRU cache was single threaded, and please don't wrap it in a synchronized anything....
Here is a stab at a concurrent version.
import java.util.Deque;
import java.util.LinkedList;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.locks.ReentrantLock;

public class ConcurrentLruCache<KEY, VALUE> implements LruCache<KEY,VALUE> {

    private final ReentrantLock lock = new ReentrantLock ();


    private final Map<KEY, VALUE> map = new ConcurrentHashMap<> ();
    private final Deque<KEY> queue = new LinkedList<> ();
    private final int limit;


    public ConcurrentLruCache ( int limit ) {
        this.limit = limit;
    }

    @Override
    public void put ( KEY key, VALUE value ) {
        VALUE oldValue = map.put ( key, value );
        if ( oldValue != null ) {
            removeThenAddKey ( key );
        } else {
            addKey ( key );
        }
        if (map.size () > limit) {
            map.remove ( removeLast() );
        }
    }


    @Override
    public VALUE get ( KEY key ) {
        removeThenAddKey ( key );
        return map.get ( key );
    }


    private void addKey(KEY key) {
        lock.lock ();
        try {
            queue.addFirst ( key );
        } finally {
            lock.unlock ();
        }


    }

    private KEY removeLast( ) {
        lock.lock ();
        try {
            final KEY removedKey = queue.removeLast ();
            return removedKey;
        } finally {
            lock.unlock ();
        }
    }

    private void removeThenAddKey(KEY key) {
        lock.lock ();
        try {
            queue.removeFirstOccurrence ( key );
            queue.addFirst ( key );
        } finally {
            lock.unlock ();
        }

    }

    private void removeFirstOccurrence(KEY key) {
        lock.lock ();
        try {
            queue.removeFirstOccurrence ( key );
        } finally {
            lock.unlock ();
        }

    }


    @Override
    public VALUE getSilent ( KEY key ) {
        return map.get ( key );
    }

    @Override
    public void remove ( KEY key ) {
        removeFirstOccurrence ( key );
        map.remove ( key );
    }

    @Override
    public int size () {
        return map.size ();
    }

    public String toString () {
        return map.toString ();
    }
}
The main differences are the use of the ConcurrentHashMap instead of HashMap, and the use of the Lock (I could have gotten away with synchronized, but...).
I have not tested it under fire, but it seems like a simple LRU cache that might work out in 80% of use cases where you need a simple LRU map.
I welcome feedback, except the why don't you use library a, b, or c. The reason I don't always use a library is because I don't always want every war file to be 80MB, and I write libraries so I tend to make the libs plug-able with a good enough solution in place and someone can plug-in another cache provider if they like. :) I never know when someone might need Guava or ehcache or something else I don't want to include them, but if I make caching plug-able, I will not exclude them either.
Reduction of dependencies has its own reward. I love to get some feedback on how to make this even simpler or faster or both.
Also if anyone knows of a ready to go....
Ok.. I know what you are thinking... Why doesn't he just use removeEldest entry from LinkedHashMap, and well I should but.... but.. but.. That would be a FIFO not an LRU and we were trying to implement a LRU.
    Map<KEY, VALUE> map = new LinkedHashMap<KEY, VALUE> () {

        @Override
        protected boolean removeEldestEntry ( Map.Entry<KEY, VALUE> eldest ) {
            return this.size () > limit;
        }
    };
This test fails for the above code...
        cache.get ( 2 );
        cache.get ( 3 );
        cache.put ( 6, 6 );
        cache.put ( 7, 7 );
        ok |= cache.size () == 4 || die ( "size" + cache.size () );
        ok |= cache.getSilent ( 2 ) == 2 || die ();
        ok |= cache.getSilent ( 3 ) == 3 || die ();
        ok |= cache.getSilent ( 4 ) == null || die ();
        ok |= cache.getSilent ( 5 ) == null || die ();
So here is a quick and dirty FIFO cache using removeEldestEntry.
import java.util.*;

public class FifoCache<KEY, VALUE> implements LruCache<KEY,VALUE> {

    final int limit;

    Map<KEY, VALUE> map = new LinkedHashMap<KEY, VALUE> () {

        @Override
        protected boolean removeEldestEntry ( Map.Entry<KEY, VALUE> eldest ) {
            return this.size () > limit;
        }
    };


    public LruCacheNormal ( int limit ) {
        this.limit = limit;
    }

    public void put ( KEY key, VALUE value ) {
         map.put ( key, value );


    }


    public VALUE get ( KEY key ) {

        return map.get ( key );
    }


    public VALUE getSilent ( KEY key ) {

        return map.get ( key );
    }

    public void remove ( KEY key ) {
        map.remove ( key );
    }

    public int size () {
        return map.size ();
    }

    public String toString() {
        return map.toString ();
    }
}
FIFOs are fast. No searching around. You could front a FIFO in front of an LRU and that would handle most hot entries quite nicely. A better LRU is going to need that reverse element to Node feature.
Anyway... now that I wrote some code, let me go through the other answers and see what I missed... the first time I scanned them.
Kafka and Cassandra support, training for AWS EC2 Cassandra 3.0 Training