[2016-08-11 12:58:37,209][DEBUG][bootstrap ] Linux seccomp filter installation successful, threads: [all] [2016-08-11 12:58:37,214][DEBUG][bootstrap ] java.class.path: /usr/share/elasticsearch/lib/elasticsearch-2.3.4.jar:/usr/share/elasticsearch/lib/jackson-dataformat-smile-2.6.6.jar:/usr/share/elasticsearch/lib/lucene-highlighter-5.5.0.jar:/usr/share/elasticsearch/lib/lucene-core-5.5.0.jar:/usr/share/elasticsearch/lib/lucene-queries-5.5.0.jar:/usr/share/elasticsearch/lib/commons-cli-1.3.1.jar:/usr/share/elasticsearch/lib/jackson-core-2.6.6.jar:/usr/share/elasticsearch/lib/lucene-spatial3d-5.5.0.jar:/usr/share/elasticsearch/lib/joda-convert-1.2.jar:/usr/share/elasticsearch/lib/jna-4.1.0.jar:/usr/share/elasticsearch/lib/lucene-misc-5.5.0.jar:/usr/share/elasticsearch/lib/guava-18.0.jar:/usr/share/elasticsearch/lib/compiler-0.8.13.jar:/usr/share/elasticsearch/lib/lucene-sandbox-5.5.0.jar:/usr/share/elasticsearch/lib/jts-1.13.jar:/usr/share/elasticsearch/lib/lucene-analyzers-common-5.5.0.jar:/usr/share/elasticsearch/lib/compress-lzf-1.0.2.jar:/usr/share/elasticsearch/lib/securesm-1.0.jar:/usr/share/elasticsearch/lib/t-digest-3.0.jar:/usr/share/elasticsearch/lib/lucene-suggest-5.5.0.jar:/usr/share/elasticsearch/lib/lucene-grouping-5.5.0.jar:/usr/share/elasticsearch/lib/jackson-dataformat-cbor-2.6.6.jar:/usr/share/elasticsearch/lib/jsr166e-1.1.0.jar:/usr/share/elasticsearch/lib/snakeyaml-1.15.jar:/usr/share/elasticsearch/lib/hppc-0.7.1.jar:/usr/share/elasticsearch/lib/spatial4j-0.5.jar:/usr/share/elasticsearch/lib/netty-3.10.5.Final.jar:/usr/share/elasticsearch/lib/lucene-memory-5.5.0.jar:/usr/share/elasticsearch/lib/apache-log4j-extras-1.2.17.jar:/usr/share/elasticsearch/lib/jackson-dataformat-yaml-2.6.6.jar:/usr/share/elasticsearch/lib/elasticsearch-2.3.4.jar:/usr/share/elasticsearch/lib/lucene-join-5.5.0.jar:/usr/share/elasticsearch/lib/joda-time-2.9.4.jar:/usr/share/elasticsearch/lib/log4j-1.2.17.jar:/usr/share/elasticsearch/lib/lucene-spatial-5.5.0.jar:/usr/share/elasticsearch/lib/HdrHistogram-2.1.6.jar:/usr/share/elasticsearch/lib/lucene-queryparser-5.5.0.jar:/usr/share/elasticsearch/lib/lucene-backward-codecs-5.5.0.jar [2016-08-11 12:58:37,218][DEBUG][bootstrap ] sun.boot.class.path: /usr/lib/jvm/java-8-openjdk-amd64/jre/lib/resources.jar:/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/rt.jar:/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/sunrsasign.jar:/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/jsse.jar:/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/jce.jar:/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/charsets.jar:/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/jfr.jar:/usr/lib/jvm/java-8-openjdk-amd64/jre/classes [2016-08-11 12:58:37,218][DEBUG][bootstrap ] classloader urls: [file:/usr/share/elasticsearch/lib/elasticsearch-2.3.4.jar, file:/usr/share/elasticsearch/lib/jackson-dataformat-smile-2.6.6.jar, file:/usr/share/elasticsearch/lib/lucene-highlighter-5.5.0.jar, file:/usr/share/elasticsearch/lib/lucene-core-5.5.0.jar, file:/usr/share/elasticsearch/lib/lucene-queries-5.5.0.jar, file:/usr/share/elasticsearch/lib/commons-cli-1.3.1.jar, file:/usr/share/elasticsearch/lib/jackson-core-2.6.6.jar, file:/usr/share/elasticsearch/lib/lucene-spatial3d-5.5.0.jar, file:/usr/share/elasticsearch/lib/joda-convert-1.2.jar, file:/usr/share/elasticsearch/lib/jna-4.1.0.jar, file:/usr/share/elasticsearch/lib/lucene-misc-5.5.0.jar, file:/usr/share/elasticsearch/lib/guava-18.0.jar, file:/usr/share/elasticsearch/lib/compiler-0.8.13.jar, file:/usr/share/elasticsearch/lib/lucene-sandbox-5.5.0.jar, file:/usr/share/elasticsearch/lib/jts-1.13.jar, file:/usr/share/elasticsearch/lib/lucene-analyzers-common-5.5.0.jar, file:/usr/share/elasticsearch/lib/compress-lzf-1.0.2.jar, file:/usr/share/elasticsearch/lib/securesm-1.0.jar, file:/usr/share/elasticsearch/lib/t-digest-3.0.jar, file:/usr/share/elasticsearch/lib/lucene-suggest-5.5.0.jar, file:/usr/share/elasticsearch/lib/lucene-grouping-5.5.0.jar, file:/usr/share/elasticsearch/lib/jackson-dataformat-cbor-2.6.6.jar, file:/usr/share/elasticsearch/lib/jsr166e-1.1.0.jar, file:/usr/share/elasticsearch/lib/snakeyaml-1.15.jar, file:/usr/share/elasticsearch/lib/hppc-0.7.1.jar, file:/usr/share/elasticsearch/lib/spatial4j-0.5.jar, file:/usr/share/elasticsearch/lib/netty-3.10.5.Final.jar, file:/usr/share/elasticsearch/lib/lucene-memory-5.5.0.jar, file:/usr/share/elasticsearch/lib/apache-log4j-extras-1.2.17.jar, file:/usr/share/elasticsearch/lib/jackson-dataformat-yaml-2.6.6.jar, file:/usr/share/elasticsearch/lib/elasticsearch-2.3.4.jar, file:/usr/share/elasticsearch/lib/lucene-join-5.5.0.jar, file:/usr/share/elasticsearch/lib/joda-time-2.9.4.jar, file:/usr/share/elasticsearch/lib/log4j-1.2.17.jar, file:/usr/share/elasticsearch/lib/lucene-spatial-5.5.0.jar, file:/usr/share/elasticsearch/lib/HdrHistogram-2.1.6.jar, file:/usr/share/elasticsearch/lib/lucene-queryparser-5.5.0.jar, file:/usr/share/elasticsearch/lib/lucene-backward-codecs-5.5.0.jar] [2016-08-11 12:58:37,227][DEBUG][bootstrap ] java.home: /usr/lib/jvm/java-8-openjdk-amd64/jre [2016-08-11 12:58:37,227][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/elasticsearch-2.3.4.jar [2016-08-11 12:58:37,270][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jackson-dataformat-smile-2.6.6.jar [2016-08-11 12:58:37,271][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-highlighter-5.5.0.jar [2016-08-11 12:58:37,272][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-core-5.5.0.jar [2016-08-11 12:58:37,279][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-queries-5.5.0.jar [2016-08-11 12:58:37,281][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/commons-cli-1.3.1.jar [2016-08-11 12:58:37,282][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jackson-core-2.6.6.jar [2016-08-11 12:58:37,288][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-spatial3d-5.5.0.jar [2016-08-11 12:58:37,289][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/joda-convert-1.2.jar [2016-08-11 12:58:37,290][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jna-4.1.0.jar [2016-08-11 12:58:37,291][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-misc-5.5.0.jar [2016-08-11 12:58:37,292][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/guava-18.0.jar [2016-08-11 12:58:37,296][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/compiler-0.8.13.jar [2016-08-11 12:58:37,299][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-sandbox-5.5.0.jar [2016-08-11 12:58:37,303][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jts-1.13.jar [2016-08-11 12:58:37,305][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-analyzers-common-5.5.0.jar [2016-08-11 12:58:37,307][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/compress-lzf-1.0.2.jar [2016-08-11 12:58:37,311][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/securesm-1.0.jar [2016-08-11 12:58:37,311][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/t-digest-3.0.jar [2016-08-11 12:58:37,312][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-suggest-5.5.0.jar [2016-08-11 12:58:37,314][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-grouping-5.5.0.jar [2016-08-11 12:58:37,316][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jackson-dataformat-cbor-2.6.6.jar [2016-08-11 12:58:37,317][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jsr166e-1.1.0.jar [2016-08-11 12:58:37,317][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/snakeyaml-1.15.jar [2016-08-11 12:58:37,321][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/hppc-0.7.1.jar [2016-08-11 12:58:37,327][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/spatial4j-0.5.jar [2016-08-11 12:58:37,327][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/netty-3.10.5.Final.jar [2016-08-11 12:58:37,331][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-memory-5.5.0.jar [2016-08-11 12:58:37,332][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/apache-log4j-extras-1.2.17.jar [2016-08-11 12:58:37,333][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jackson-dataformat-yaml-2.6.6.jar [2016-08-11 12:58:37,335][DEBUG][bootstrap ] excluding duplicate classpath element: /usr/share/elasticsearch/lib/elasticsearch-2.3.4.jar [2016-08-11 12:58:37,336][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-join-5.5.0.jar [2016-08-11 12:58:37,337][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/joda-time-2.9.4.jar [2016-08-11 12:58:37,343][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/log4j-1.2.17.jar [2016-08-11 12:58:37,344][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-spatial-5.5.0.jar [2016-08-11 12:58:37,345][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/HdrHistogram-2.1.6.jar [2016-08-11 12:58:37,346][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-queryparser-5.5.0.jar [2016-08-11 12:58:37,348][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-backward-codecs-5.5.0.jar [2016-08-11 12:58:37,530][INFO ][node ] [Vader] version[2.3.4], pid[17531], build[e455fd0/2016-06-30T11:24:31Z] [2016-08-11 12:58:37,535][INFO ][node ] [Vader] initializing ... [2016-08-11 12:58:37,535][DEBUG][node ] [Vader] using config [/etc/elasticsearch], data [[/var/lib/elasticsearch]], logs [/var/log/elasticsearch], plugins [/usr/share/elasticsearch/plugins] [2016-08-11 12:58:37,555][DEBUG][bootstrap ] java.home: /usr/lib/jvm/java-8-openjdk-amd64/jre [2016-08-11 12:58:37,555][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/elasticsearch-2.3.4.jar [2016-08-11 12:58:37,571][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jackson-dataformat-smile-2.6.6.jar [2016-08-11 12:58:37,571][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-highlighter-5.5.0.jar [2016-08-11 12:58:37,572][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-core-5.5.0.jar [2016-08-11 12:58:37,575][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-queries-5.5.0.jar [2016-08-11 12:58:37,576][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/commons-cli-1.3.1.jar [2016-08-11 12:58:37,577][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jackson-core-2.6.6.jar [2016-08-11 12:58:37,583][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-spatial3d-5.5.0.jar [2016-08-11 12:58:37,583][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/joda-convert-1.2.jar [2016-08-11 12:58:37,587][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jna-4.1.0.jar [2016-08-11 12:58:37,588][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-misc-5.5.0.jar [2016-08-11 12:58:37,591][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/guava-18.0.jar [2016-08-11 12:58:37,594][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/compiler-0.8.13.jar [2016-08-11 12:58:37,598][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-sandbox-5.5.0.jar [2016-08-11 12:58:37,599][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jts-1.13.jar [2016-08-11 12:58:37,600][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-analyzers-common-5.5.0.jar [2016-08-11 12:58:37,604][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/compress-lzf-1.0.2.jar [2016-08-11 12:58:37,607][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/securesm-1.0.jar [2016-08-11 12:58:37,609][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/t-digest-3.0.jar [2016-08-11 12:58:37,610][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-suggest-5.5.0.jar [2016-08-11 12:58:37,614][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-grouping-5.5.0.jar [2016-08-11 12:58:37,614][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jackson-dataformat-cbor-2.6.6.jar [2016-08-11 12:58:37,614][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jsr166e-1.1.0.jar [2016-08-11 12:58:37,615][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/snakeyaml-1.15.jar [2016-08-11 12:58:37,615][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/hppc-0.7.1.jar [2016-08-11 12:58:37,623][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/spatial4j-0.5.jar [2016-08-11 12:58:37,624][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/netty-3.10.5.Final.jar [2016-08-11 12:58:37,626][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-memory-5.5.0.jar [2016-08-11 12:58:37,627][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/apache-log4j-extras-1.2.17.jar [2016-08-11 12:58:37,629][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jackson-dataformat-yaml-2.6.6.jar [2016-08-11 12:58:37,632][DEBUG][bootstrap ] excluding duplicate classpath element: /usr/share/elasticsearch/lib/elasticsearch-2.3.4.jar [2016-08-11 12:58:37,633][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-join-5.5.0.jar [2016-08-11 12:58:37,633][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/joda-time-2.9.4.jar [2016-08-11 12:58:37,637][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/log4j-1.2.17.jar [2016-08-11 12:58:37,639][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-spatial-5.5.0.jar [2016-08-11 12:58:37,641][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/HdrHistogram-2.1.6.jar [2016-08-11 12:58:37,643][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-queryparser-5.5.0.jar [2016-08-11 12:58:37,644][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-backward-codecs-5.5.0.jar [2016-08-11 12:58:37,647][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/modules/lang-groovy/groovy-2.4.6-indy.jar [2016-08-11 12:58:37,652][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/modules/lang-groovy/lang-groovy-2.3.4.jar [2016-08-11 12:58:37,909][DEBUG][bootstrap ] java.home: /usr/lib/jvm/java-8-openjdk-amd64/jre [2016-08-11 12:58:37,910][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/elasticsearch-2.3.4.jar [2016-08-11 12:58:37,922][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jackson-dataformat-smile-2.6.6.jar [2016-08-11 12:58:37,927][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-highlighter-5.5.0.jar [2016-08-11 12:58:37,928][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-core-5.5.0.jar [2016-08-11 12:58:37,930][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-queries-5.5.0.jar [2016-08-11 12:58:37,937][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/commons-cli-1.3.1.jar [2016-08-11 12:58:37,938][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jackson-core-2.6.6.jar [2016-08-11 12:58:37,939][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-spatial3d-5.5.0.jar [2016-08-11 12:58:37,940][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/joda-convert-1.2.jar [2016-08-11 12:58:37,941][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jna-4.1.0.jar [2016-08-11 12:58:37,942][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-misc-5.5.0.jar [2016-08-11 12:58:37,942][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/guava-18.0.jar [2016-08-11 12:58:37,945][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/compiler-0.8.13.jar [2016-08-11 12:58:37,946][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-sandbox-5.5.0.jar [2016-08-11 12:58:37,947][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jts-1.13.jar [2016-08-11 12:58:37,948][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-analyzers-common-5.5.0.jar [2016-08-11 12:58:37,953][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/compress-lzf-1.0.2.jar [2016-08-11 12:58:37,954][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/securesm-1.0.jar [2016-08-11 12:58:37,955][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/t-digest-3.0.jar [2016-08-11 12:58:37,956][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-suggest-5.5.0.jar [2016-08-11 12:58:37,957][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-grouping-5.5.0.jar [2016-08-11 12:58:37,959][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jackson-dataformat-cbor-2.6.6.jar [2016-08-11 12:58:37,960][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jsr166e-1.1.0.jar [2016-08-11 12:58:37,961][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/snakeyaml-1.15.jar [2016-08-11 12:58:37,963][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/hppc-0.7.1.jar [2016-08-11 12:58:37,964][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/spatial4j-0.5.jar [2016-08-11 12:58:37,966][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/netty-3.10.5.Final.jar [2016-08-11 12:58:37,968][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-memory-5.5.0.jar [2016-08-11 12:58:37,968][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/apache-log4j-extras-1.2.17.jar [2016-08-11 12:58:37,969][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jackson-dataformat-yaml-2.6.6.jar [2016-08-11 12:58:37,970][DEBUG][bootstrap ] excluding duplicate classpath element: /usr/share/elasticsearch/lib/elasticsearch-2.3.4.jar [2016-08-11 12:58:37,970][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-join-5.5.0.jar [2016-08-11 12:58:37,972][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/joda-time-2.9.4.jar [2016-08-11 12:58:37,974][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/log4j-1.2.17.jar [2016-08-11 12:58:37,975][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-spatial-5.5.0.jar [2016-08-11 12:58:37,976][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/HdrHistogram-2.1.6.jar [2016-08-11 12:58:37,976][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-queryparser-5.5.0.jar [2016-08-11 12:58:37,977][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-backward-codecs-5.5.0.jar [2016-08-11 12:58:37,979][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/modules/reindex/reindex-2.3.4.jar [2016-08-11 12:58:38,015][DEBUG][bootstrap ] java.home: /usr/lib/jvm/java-8-openjdk-amd64/jre [2016-08-11 12:58:38,016][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/elasticsearch-2.3.4.jar [2016-08-11 12:58:38,028][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jackson-dataformat-smile-2.6.6.jar [2016-08-11 12:58:38,031][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-highlighter-5.5.0.jar [2016-08-11 12:58:38,032][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-core-5.5.0.jar [2016-08-11 12:58:38,033][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-queries-5.5.0.jar [2016-08-11 12:58:38,035][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/commons-cli-1.3.1.jar [2016-08-11 12:58:38,036][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jackson-core-2.6.6.jar [2016-08-11 12:58:38,037][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-spatial3d-5.5.0.jar [2016-08-11 12:58:38,037][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/joda-convert-1.2.jar [2016-08-11 12:58:38,039][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jna-4.1.0.jar [2016-08-11 12:58:38,039][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-misc-5.5.0.jar [2016-08-11 12:58:38,040][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/guava-18.0.jar [2016-08-11 12:58:38,042][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/compiler-0.8.13.jar [2016-08-11 12:58:38,043][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-sandbox-5.5.0.jar [2016-08-11 12:58:38,044][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jts-1.13.jar [2016-08-11 12:58:38,045][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-analyzers-common-5.5.0.jar [2016-08-11 12:58:38,046][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/compress-lzf-1.0.2.jar [2016-08-11 12:58:38,046][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/securesm-1.0.jar [2016-08-11 12:58:38,047][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/t-digest-3.0.jar [2016-08-11 12:58:38,047][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-suggest-5.5.0.jar [2016-08-11 12:58:38,048][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-grouping-5.5.0.jar [2016-08-11 12:58:38,049][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jackson-dataformat-cbor-2.6.6.jar [2016-08-11 12:58:38,050][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jsr166e-1.1.0.jar [2016-08-11 12:58:38,050][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/snakeyaml-1.15.jar [2016-08-11 12:58:38,052][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/hppc-0.7.1.jar [2016-08-11 12:58:38,056][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/spatial4j-0.5.jar [2016-08-11 12:58:38,057][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/netty-3.10.5.Final.jar [2016-08-11 12:58:38,059][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-memory-5.5.0.jar [2016-08-11 12:58:38,059][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/apache-log4j-extras-1.2.17.jar [2016-08-11 12:58:38,060][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jackson-dataformat-yaml-2.6.6.jar [2016-08-11 12:58:38,061][DEBUG][bootstrap ] excluding duplicate classpath element: /usr/share/elasticsearch/lib/elasticsearch-2.3.4.jar [2016-08-11 12:58:38,062][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-join-5.5.0.jar [2016-08-11 12:58:38,063][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/joda-time-2.9.4.jar [2016-08-11 12:58:38,067][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/log4j-1.2.17.jar [2016-08-11 12:58:38,067][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-spatial-5.5.0.jar [2016-08-11 12:58:38,071][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/HdrHistogram-2.1.6.jar [2016-08-11 12:58:38,071][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-queryparser-5.5.0.jar [2016-08-11 12:58:38,072][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-backward-codecs-5.5.0.jar [2016-08-11 12:58:38,078][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/modules/lang-expression/antlr4-runtime-4.5.1-1.jar [2016-08-11 12:58:38,079][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/modules/lang-expression/lang-expression-2.3.4.jar [2016-08-11 12:58:38,079][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/modules/lang-expression/asm-5.0.4.jar [2016-08-11 12:58:38,080][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/modules/lang-expression/asm-commons-5.0.4.jar [2016-08-11 12:58:38,081][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/modules/lang-expression/lucene-expressions-5.5.0.jar [2016-08-11 12:58:38,323][DEBUG][bootstrap ] java.home: /usr/lib/jvm/java-8-openjdk-amd64/jre [2016-08-11 12:58:38,323][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/elasticsearch-2.3.4.jar [2016-08-11 12:58:38,352][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jackson-dataformat-smile-2.6.6.jar [2016-08-11 12:58:38,353][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-highlighter-5.5.0.jar [2016-08-11 12:58:38,354][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-core-5.5.0.jar [2016-08-11 12:58:38,357][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-queries-5.5.0.jar [2016-08-11 12:58:38,358][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/commons-cli-1.3.1.jar [2016-08-11 12:58:38,358][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jackson-core-2.6.6.jar [2016-08-11 12:58:38,359][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-spatial3d-5.5.0.jar [2016-08-11 12:58:38,359][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/joda-convert-1.2.jar [2016-08-11 12:58:38,360][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jna-4.1.0.jar [2016-08-11 12:58:38,361][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-misc-5.5.0.jar [2016-08-11 12:58:38,363][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/guava-18.0.jar [2016-08-11 12:58:38,365][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/compiler-0.8.13.jar [2016-08-11 12:58:38,365][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-sandbox-5.5.0.jar [2016-08-11 12:58:38,366][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jts-1.13.jar [2016-08-11 12:58:38,367][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-analyzers-common-5.5.0.jar [2016-08-11 12:58:38,371][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/compress-lzf-1.0.2.jar [2016-08-11 12:58:38,371][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/securesm-1.0.jar [2016-08-11 12:58:38,372][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/t-digest-3.0.jar [2016-08-11 12:58:38,372][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-suggest-5.5.0.jar [2016-08-11 12:58:38,379][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-grouping-5.5.0.jar [2016-08-11 12:58:38,380][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jackson-dataformat-cbor-2.6.6.jar [2016-08-11 12:58:38,383][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jsr166e-1.1.0.jar [2016-08-11 12:58:38,383][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/snakeyaml-1.15.jar [2016-08-11 12:58:38,387][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/hppc-0.7.1.jar [2016-08-11 12:58:38,388][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/spatial4j-0.5.jar [2016-08-11 12:58:38,388][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/netty-3.10.5.Final.jar [2016-08-11 12:58:38,390][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-memory-5.5.0.jar [2016-08-11 12:58:38,391][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/apache-log4j-extras-1.2.17.jar [2016-08-11 12:58:38,392][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jackson-dataformat-yaml-2.6.6.jar [2016-08-11 12:58:38,393][DEBUG][bootstrap ] excluding duplicate classpath element: /usr/share/elasticsearch/lib/elasticsearch-2.3.4.jar [2016-08-11 12:58:38,395][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-join-5.5.0.jar [2016-08-11 12:58:38,395][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/joda-time-2.9.4.jar [2016-08-11 12:58:38,397][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/log4j-1.2.17.jar [2016-08-11 12:58:38,399][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-spatial-5.5.0.jar [2016-08-11 12:58:38,400][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/HdrHistogram-2.1.6.jar [2016-08-11 12:58:38,401][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-queryparser-5.5.0.jar [2016-08-11 12:58:38,403][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-backward-codecs-5.5.0.jar [2016-08-11 12:58:38,403][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/plugins/search-guard-ssl/netty-buffer-4.0.37.Final.jar [2016-08-11 12:58:38,404][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/plugins/search-guard-ssl/netty-handler-4.0.37.Final.jar [2016-08-11 12:58:38,405][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/plugins/search-guard-ssl/netty-common-4.0.37.Final.jar [2016-08-11 12:58:38,405][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/plugins/search-guard-ssl/netty-transport-4.0.37.Final.jar [2016-08-11 12:58:38,406][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/plugins/search-guard-ssl/netty-tcnative-openss-static.jar [2016-08-11 12:58:38,407][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/plugins/search-guard-ssl/netty-codec-4.0.37.Final.jar [2016-08-11 12:58:38,408][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/plugins/search-guard-ssl/search-guard-ssl-2.3.4.14.jar [2016-08-11 12:58:38,409][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/plugins/search-guard-2/search-guard-2-2.3.4.5.jar [2016-08-11 12:58:38,441][DEBUG][io.netty.util.internal.logging.InternalLoggerFactory] Using Log4J as the default logging framework [2016-08-11 12:58:38,452][DEBUG][io.netty.util.internal.PlatformDependent0] java.nio.Buffer.address: available [2016-08-11 12:58:38,453][DEBUG][io.netty.util.internal.PlatformDependent0] sun.misc.Unsafe.theUnsafe: available [2016-08-11 12:58:38,453][DEBUG][io.netty.util.internal.PlatformDependent0] sun.misc.Unsafe.copyMemory: available [2016-08-11 12:58:38,454][DEBUG][io.netty.util.internal.PlatformDependent0] java.nio.Bits.unaligned: true [2016-08-11 12:58:38,455][DEBUG][io.netty.util.internal.PlatformDependent0] java.nio.DirectByteBuffer.(long, int): available [2016-08-11 12:58:38,458][DEBUG][io.netty.util.internal.PlatformDependent] Java version: 8 [2016-08-11 12:58:38,463][DEBUG][io.netty.util.internal.PlatformDependent] -Dio.netty.noUnsafe: false [2016-08-11 12:58:38,463][DEBUG][io.netty.util.internal.PlatformDependent] sun.misc.Unsafe: available [2016-08-11 12:58:38,464][DEBUG][io.netty.util.internal.PlatformDependent] -Dio.netty.noJavassist: false [2016-08-11 12:58:38,465][DEBUG][io.netty.util.internal.PlatformDependent] Javassist: unavailable [2016-08-11 12:58:38,465][DEBUG][io.netty.util.internal.PlatformDependent] You don't have Javassist in your class path or you don't have enough permission to load dynamically generated classes. Please check the configuration for better performance. [2016-08-11 12:58:38,466][DEBUG][io.netty.util.internal.PlatformDependent] -Dio.netty.tmpdir: /tmp (java.io.tmpdir) [2016-08-11 12:58:38,466][DEBUG][io.netty.util.internal.PlatformDependent] -Dio.netty.bitMode: 64 (sun.arch.data.model) [2016-08-11 12:58:38,466][DEBUG][io.netty.util.internal.PlatformDependent] -Dio.netty.noPreferDirect: false [2016-08-11 12:58:38,467][DEBUG][io.netty.util.internal.PlatformDependent] io.netty.maxDirectMemory: 1065025536 bytes [2016-08-11 12:58:38,480][DEBUG][io.netty.util.internal.NativeLibraryLoader] -Dio.netty.tmpdir: /tmp (java.io.tmpdir) [2016-08-11 12:58:38,482][DEBUG][io.netty.util.internal.NativeLibraryLoader] -Dio.netty.native.workdir: /tmp (io.netty.tmpdir) [2016-08-11 12:58:38,484][DEBUG][io.netty.util.internal.NativeLibraryLoader] Unable to load the library: netty-tcnative-linux-x86_64. java.lang.UnsatisfiedLinkError: no netty-tcnative-linux-x86_64 in java.library.path at java.lang.ClassLoader.loadLibrary(ClassLoader.java:1867) at java.lang.Runtime.loadLibrary0(Runtime.java:870) at java.lang.System.loadLibrary(System.java:1122) at io.netty.util.internal.NativeLibraryLoader.load(NativeLibraryLoader.java:189) at io.netty.util.internal.NativeLibraryLoader.loadFirstAvailable(NativeLibraryLoader.java:161) at io.netty.handler.ssl.OpenSsl.loadTcNative(OpenSsl.java:238) at io.netty.handler.ssl.OpenSsl.(OpenSsl.java:65) at com.floragunn.searchguard.ssl.SearchGuardSSLPlugin$1.run(SearchGuardSSLPlugin.java:69) at java.security.AccessController.doPrivileged(Native Method) at com.floragunn.searchguard.ssl.SearchGuardSSLPlugin.(SearchGuardSSLPlugin.java:65) at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62) at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.lang.reflect.Constructor.newInstance(Constructor.java:423) at org.elasticsearch.plugins.PluginsService.loadPlugin(PluginsService.java:472) at org.elasticsearch.plugins.PluginsService.loadBundles(PluginsService.java:432) at org.elasticsearch.plugins.PluginsService.(PluginsService.java:129) at org.elasticsearch.node.Node.(Node.java:158) at org.elasticsearch.node.Node.(Node.java:140) at org.elasticsearch.node.NodeBuilder.build(NodeBuilder.java:143) at org.elasticsearch.bootstrap.Bootstrap.setup(Bootstrap.java:178) at org.elasticsearch.bootstrap.Bootstrap.init(Bootstrap.java:270) at org.elasticsearch.bootstrap.Elasticsearch.main(Elasticsearch.java:35) [2016-08-11 12:58:38,496][DEBUG][io.netty.util.internal.NativeLibraryLoader] Unable to load the library: netty-tcnative-linux-x86_64-fedora. java.lang.UnsatisfiedLinkError: no netty-tcnative-linux-x86_64-fedora in java.library.path at java.lang.ClassLoader.loadLibrary(ClassLoader.java:1867) at java.lang.Runtime.loadLibrary0(Runtime.java:870) at java.lang.System.loadLibrary(System.java:1122) at io.netty.util.internal.NativeLibraryLoader.load(NativeLibraryLoader.java:189) at io.netty.util.internal.NativeLibraryLoader.loadFirstAvailable(NativeLibraryLoader.java:161) at io.netty.handler.ssl.OpenSsl.loadTcNative(OpenSsl.java:238) at io.netty.handler.ssl.OpenSsl.(OpenSsl.java:65) at com.floragunn.searchguard.ssl.SearchGuardSSLPlugin$1.run(SearchGuardSSLPlugin.java:69) at java.security.AccessController.doPrivileged(Native Method) at com.floragunn.searchguard.ssl.SearchGuardSSLPlugin.(SearchGuardSSLPlugin.java:65) at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62) at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.lang.reflect.Constructor.newInstance(Constructor.java:423) at org.elasticsearch.plugins.PluginsService.loadPlugin(PluginsService.java:472) at org.elasticsearch.plugins.PluginsService.loadBundles(PluginsService.java:432) at org.elasticsearch.plugins.PluginsService.(PluginsService.java:129) at org.elasticsearch.node.Node.(Node.java:158) at org.elasticsearch.node.Node.(Node.java:140) at org.elasticsearch.node.NodeBuilder.build(NodeBuilder.java:143) at org.elasticsearch.bootstrap.Bootstrap.setup(Bootstrap.java:178) at org.elasticsearch.bootstrap.Bootstrap.init(Bootstrap.java:270) at org.elasticsearch.bootstrap.Elasticsearch.main(Elasticsearch.java:35) [2016-08-11 12:58:38,581][INFO ][com.floragunn.searchguard.ssl.SearchGuardSSLPlugin] Search Guard 2 plugin also available [2016-08-11 12:58:38,590][DEBUG][bootstrap ] java.home: /usr/lib/jvm/java-8-openjdk-amd64/jre [2016-08-11 12:58:38,590][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/elasticsearch-2.3.4.jar [2016-08-11 12:58:38,595][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jackson-dataformat-smile-2.6.6.jar [2016-08-11 12:58:38,596][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-highlighter-5.5.0.jar [2016-08-11 12:58:38,597][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-core-5.5.0.jar [2016-08-11 12:58:38,599][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-queries-5.5.0.jar [2016-08-11 12:58:38,600][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/commons-cli-1.3.1.jar [2016-08-11 12:58:38,601][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jackson-core-2.6.6.jar [2016-08-11 12:58:38,603][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-spatial3d-5.5.0.jar [2016-08-11 12:58:38,604][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/joda-convert-1.2.jar [2016-08-11 12:58:38,605][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jna-4.1.0.jar [2016-08-11 12:58:38,605][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-misc-5.5.0.jar [2016-08-11 12:58:38,606][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/guava-18.0.jar [2016-08-11 12:58:38,612][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/compiler-0.8.13.jar [2016-08-11 12:58:38,615][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-sandbox-5.5.0.jar [2016-08-11 12:58:38,616][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jts-1.13.jar [2016-08-11 12:58:38,617][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-analyzers-common-5.5.0.jar [2016-08-11 12:58:38,620][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/compress-lzf-1.0.2.jar [2016-08-11 12:58:38,623][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/securesm-1.0.jar [2016-08-11 12:58:38,624][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/t-digest-3.0.jar [2016-08-11 12:58:38,625][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-suggest-5.5.0.jar [2016-08-11 12:58:38,625][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-grouping-5.5.0.jar [2016-08-11 12:58:38,627][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jackson-dataformat-cbor-2.6.6.jar [2016-08-11 12:58:38,628][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jsr166e-1.1.0.jar [2016-08-11 12:58:38,629][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/snakeyaml-1.15.jar [2016-08-11 12:58:38,630][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/hppc-0.7.1.jar [2016-08-11 12:58:38,631][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/spatial4j-0.5.jar [2016-08-11 12:58:38,635][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/netty-3.10.5.Final.jar [2016-08-11 12:58:38,639][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-memory-5.5.0.jar [2016-08-11 12:58:38,640][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/apache-log4j-extras-1.2.17.jar [2016-08-11 12:58:38,643][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/jackson-dataformat-yaml-2.6.6.jar [2016-08-11 12:58:38,643][DEBUG][bootstrap ] excluding duplicate classpath element: /usr/share/elasticsearch/lib/elasticsearch-2.3.4.jar [2016-08-11 12:58:38,644][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-join-5.5.0.jar [2016-08-11 12:58:38,647][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/joda-time-2.9.4.jar [2016-08-11 12:58:38,651][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/log4j-1.2.17.jar [2016-08-11 12:58:38,652][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-spatial-5.5.0.jar [2016-08-11 12:58:38,653][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/HdrHistogram-2.1.6.jar [2016-08-11 12:58:38,653][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-queryparser-5.5.0.jar [2016-08-11 12:58:38,654][DEBUG][bootstrap ] examining jar: /usr/share/elasticsearch/lib/lucene-backward-codecs-5.5.0.jar [2016-08-11 12:58:38,656][INFO ][plugins ] [Vader] modules [reindex, lang-expression, lang-groovy], plugins [search-guard-ssl, kopf, search-guard-2], sites [kopf] [2016-08-11 12:58:38,690][DEBUG][env ] [Vader] using node location [[NodePath{path=/var/lib/elasticsearch/SHU/nodes/0, spins=false}]], local_node_id [0] [2016-08-11 12:58:38,699][DEBUG][env ] [Vader] node data locations details: -> /var/lib/elasticsearch/SHU/nodes/0, free_space [27.7gb], usable_space [26.4gb], total_space [29.3gb], spins? [no], mount [/ (/dev/xvda1)], type [ext4] [2016-08-11 12:58:38,703][INFO ][env ] [Vader] heap size [1015.6mb], compressed ordinary object pointers [true] [2016-08-11 12:58:38,703][WARN ][env ] [Vader] max file descriptors [65535] for elasticsearch process likely too low, consider increasing to at least [65536] [2016-08-11 12:58:38,721][DEBUG][threadpool ] [Vader] creating thread_pool [force_merge], type [fixed], size [1], queue_size [null] [2016-08-11 12:58:38,734][DEBUG][threadpool ] [Vader] creating thread_pool [percolate], type [fixed], size [1], queue_size [1k] [2016-08-11 12:58:38,751][DEBUG][threadpool ] [Vader] creating thread_pool [fetch_shard_started], type [scaling], min [1], size [2], keep_alive [5m] [2016-08-11 12:58:38,757][DEBUG][threadpool ] [Vader] creating thread_pool [listener], type [fixed], size [1], queue_size [null] [2016-08-11 12:58:38,757][DEBUG][threadpool ] [Vader] creating thread_pool [index], type [fixed], size [1], queue_size [200] [2016-08-11 12:58:38,757][DEBUG][threadpool ] [Vader] creating thread_pool [refresh], type [scaling], min [1], size [1], keep_alive [5m] [2016-08-11 12:58:38,758][DEBUG][threadpool ] [Vader] creating thread_pool [suggest], type [fixed], size [1], queue_size [1k] [2016-08-11 12:58:38,758][DEBUG][threadpool ] [Vader] creating thread_pool [generic], type [cached], keep_alive [30s] [2016-08-11 12:58:38,759][DEBUG][threadpool ] [Vader] creating thread_pool [warmer], type [scaling], min [1], size [1], keep_alive [5m] [2016-08-11 12:58:38,759][DEBUG][threadpool ] [Vader] creating thread_pool [search], type [fixed], size [2], queue_size [1k] [2016-08-11 12:58:38,763][DEBUG][threadpool ] [Vader] creating thread_pool [flush], type [scaling], min [1], size [1], keep_alive [5m] [2016-08-11 12:58:38,763][DEBUG][threadpool ] [Vader] creating thread_pool [fetch_shard_store], type [scaling], min [1], size [2], keep_alive [5m] [2016-08-11 12:58:38,763][DEBUG][threadpool ] [Vader] creating thread_pool [management], type [scaling], min [1], size [5], keep_alive [5m] [2016-08-11 12:58:38,764][DEBUG][threadpool ] [Vader] creating thread_pool [get], type [fixed], size [1], queue_size [1k] [2016-08-11 12:58:38,764][DEBUG][threadpool ] [Vader] creating thread_pool [bulk], type [fixed], size [1], queue_size [50] [2016-08-11 12:58:38,765][DEBUG][threadpool ] [Vader] creating thread_pool [snapshot], type [scaling], min [1], size [1], keep_alive [5m] [2016-08-11 12:58:38,776][DEBUG][io.netty.handler.ssl.CipherSuiteConverter] Cipher suite mapping: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 => ECDHE-RSA-AES128-GCM-SHA256 [2016-08-11 12:58:38,779][DEBUG][io.netty.handler.ssl.CipherSuiteConverter] Cipher suite mapping: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 => ECDHE-ECDSA-AES128-GCM-SHA256 [2016-08-11 12:58:38,780][DEBUG][io.netty.handler.ssl.CipherSuiteConverter] Cipher suite mapping: TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 => ECDHE-RSA-AES256-GCM-SHA384 [2016-08-11 12:58:38,781][DEBUG][io.netty.handler.ssl.CipherSuiteConverter] Cipher suite mapping: TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 => ECDHE-ECDSA-AES256-GCM-SHA384 [2016-08-11 12:58:38,783][DEBUG][io.netty.handler.ssl.CipherSuiteConverter] Cipher suite mapping: TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 => DHE-RSA-AES128-GCM-SHA256 [2016-08-11 12:58:38,783][DEBUG][io.netty.handler.ssl.CipherSuiteConverter] Cipher suite mapping: TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 => DHE-DSS-AES128-GCM-SHA256 [2016-08-11 12:58:38,783][DEBUG][io.netty.handler.ssl.CipherSuiteConverter] Cipher suite mapping: TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 => DHE-DSS-AES256-GCM-SHA384 [2016-08-11 12:58:38,784][DEBUG][io.netty.handler.ssl.CipherSuiteConverter] Cipher suite mapping: TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 => DHE-RSA-AES256-GCM-SHA384 [2016-08-11 12:58:38,784][DEBUG][io.netty.handler.ssl.CipherSuiteConverter] Cipher suite mapping: TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 => ECDHE-RSA-AES128-SHA256 [2016-08-11 12:58:38,785][DEBUG][io.netty.handler.ssl.CipherSuiteConverter] Cipher suite mapping: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 => ECDHE-ECDSA-AES128-SHA256 [2016-08-11 12:58:38,785][DEBUG][io.netty.handler.ssl.CipherSuiteConverter] Cipher suite mapping: TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA => ECDHE-RSA-AES128-SHA [2016-08-11 12:58:38,785][DEBUG][io.netty.handler.ssl.CipherSuiteConverter] Cipher suite mapping: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA => ECDHE-ECDSA-AES128-SHA [2016-08-11 12:58:38,786][DEBUG][io.netty.handler.ssl.CipherSuiteConverter] Cipher suite mapping: TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 => ECDHE-RSA-AES256-SHA384 [2016-08-11 12:58:38,786][DEBUG][io.netty.handler.ssl.CipherSuiteConverter] Cipher suite mapping: TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 => ECDHE-ECDSA-AES256-SHA384 [2016-08-11 12:58:38,786][DEBUG][io.netty.handler.ssl.CipherSuiteConverter] Cipher suite mapping: TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA => ECDHE-RSA-AES256-SHA [2016-08-11 12:58:38,787][DEBUG][io.netty.handler.ssl.CipherSuiteConverter] Cipher suite mapping: TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA => ECDHE-ECDSA-AES256-SHA [2016-08-11 12:58:38,788][DEBUG][io.netty.handler.ssl.CipherSuiteConverter] Cipher suite mapping: TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 => DHE-RSA-AES128-SHA256 [2016-08-11 12:58:38,788][DEBUG][io.netty.handler.ssl.CipherSuiteConverter] Cipher suite mapping: TLS_DHE_RSA_WITH_AES_128_CBC_SHA => DHE-RSA-AES128-SHA [2016-08-11 12:58:38,789][DEBUG][io.netty.handler.ssl.CipherSuiteConverter] Cipher suite mapping: TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 => DHE-DSS-AES128-SHA256 [2016-08-11 12:58:38,789][DEBUG][io.netty.handler.ssl.CipherSuiteConverter] Cipher suite mapping: TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 => DHE-RSA-AES256-SHA256 [2016-08-11 12:58:38,789][DEBUG][io.netty.handler.ssl.CipherSuiteConverter] Cipher suite mapping: TLS_DHE_DSS_WITH_AES_256_CBC_SHA => DHE-DSS-AES256-SHA [2016-08-11 12:58:38,790][DEBUG][io.netty.handler.ssl.CipherSuiteConverter] Cipher suite mapping: TLS_DHE_RSA_WITH_AES_256_CBC_SHA => DHE-RSA-AES256-SHA [2016-08-11 12:58:39,663][INFO ][com.floragunn.searchguard.ssl.SearchGuardKeyStore] Config directory is /etc/elasticsearch/, from there the key- and truststore files are resolved relatively [2016-08-11 12:58:39,668][DEBUG][com.floragunn.searchguard.ssl.util.SSLCertificateHelper] Alias node-0: is a certificate entry?false/is a key entry?true [2016-08-11 12:58:39,668][DEBUG][com.floragunn.searchguard.ssl.util.SSLCertificateHelper] No alias given, will trust all of the certificates in the store [2016-08-11 12:58:39,677][DEBUG][com.floragunn.searchguard.ssl.util.SSLCertificateHelper] Alias shu: is a certificate entry?true/is a key entry?false [2016-08-11 12:58:39,678][DEBUG][com.floragunn.searchguard.ssl.util.SSLCertificateHelper] Alias root-ca-chain: is a certificate entry?true/is a key entry?false [2016-08-11 12:58:39,678][DEBUG][com.floragunn.searchguard.ssl.util.SSLCertificateHelper] No alias given, will trust all of the certificates in the store [2016-08-11 12:58:39,786][DEBUG][io.netty.handler.ssl.JdkSslContext] Default protocols (JDK): [TLSv1.2, TLSv1.1, TLSv1] [2016-08-11 12:58:39,786][DEBUG][io.netty.handler.ssl.JdkSslContext] Default cipher suites (JDK): [TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_256_CBC_SHA, SSL_RSA_WITH_3DES_EDE_CBC_SHA] [2016-08-11 12:58:39,874][INFO ][com.floragunn.searchguard.ssl.SearchGuardKeyStore] HTTPS client auth mode OPTIONAL [2016-08-11 12:58:39,875][DEBUG][com.floragunn.searchguard.ssl.util.SSLCertificateHelper] Alias node-0: is a certificate entry?false/is a key entry?true [2016-08-11 12:58:39,879][DEBUG][com.floragunn.searchguard.ssl.util.SSLCertificateHelper] No alias given, will trust all of the certificates in the store [2016-08-11 12:58:39,880][DEBUG][com.floragunn.searchguard.ssl.util.SSLCertificateHelper] Alias shu: is a certificate entry?true/is a key entry?false [2016-08-11 12:58:39,880][DEBUG][com.floragunn.searchguard.ssl.util.SSLCertificateHelper] Alias root-ca-chain: is a certificate entry?true/is a key entry?false [2016-08-11 12:58:39,880][DEBUG][com.floragunn.searchguard.ssl.util.SSLCertificateHelper] No alias given, will trust all of the certificates in the store [2016-08-11 12:58:39,898][INFO ][com.floragunn.searchguard.ssl.SearchGuardKeyStore] sslTransportClientProvider:JDK with ciphers [TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_DHE_RSA_WITH_AES_256_CBC_SHA, TLS_DHE_DSS_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_DHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_DHE_RSA_WITH_AES_256_GCM_SHA384, TLS_DHE_DSS_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_DHE_RSA_WITH_AES_128_GCM_SHA256, TLS_DHE_DSS_WITH_AES_128_GCM_SHA256] [2016-08-11 12:58:39,898][INFO ][com.floragunn.searchguard.ssl.SearchGuardKeyStore] sslTransportServerProvider:JDK with ciphers [TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_DHE_RSA_WITH_AES_256_CBC_SHA, TLS_DHE_DSS_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_DHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_DHE_RSA_WITH_AES_256_GCM_SHA384, TLS_DHE_DSS_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_DHE_RSA_WITH_AES_128_GCM_SHA256, TLS_DHE_DSS_WITH_AES_128_GCM_SHA256] [2016-08-11 12:58:39,898][INFO ][com.floragunn.searchguard.ssl.SearchGuardKeyStore] sslHTTPProvider:JDK with ciphers [TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_DHE_RSA_WITH_AES_256_CBC_SHA, TLS_DHE_DSS_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_DHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_DHE_RSA_WITH_AES_256_GCM_SHA384, TLS_DHE_DSS_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_DHE_RSA_WITH_AES_128_GCM_SHA256, TLS_DHE_DSS_WITH_AES_128_GCM_SHA256] [2016-08-11 12:58:39,899][INFO ][com.floragunn.searchguard.ssl.SearchGuardKeyStore] sslTransport protocols [TLSv1.2, TLSv1.1] [2016-08-11 12:58:39,899][INFO ][com.floragunn.searchguard.ssl.SearchGuardKeyStore] sslHTTP protocols [TLSv1.2, TLSv1.1] [2016-08-11 12:58:40,207][INFO ][http ] [Vader] Using [org.elasticsearch.http.netty.NettyHttpServerTransport] as http transport, overridden by [search-guard2] [2016-08-11 12:58:40,349][INFO ][com.floragunn.searchguard.configuration.ConfigurationModule] FLS/DLS valve not bound (noop) [2016-08-11 12:58:40,355][INFO ][com.floragunn.searchguard.auditlog.AuditLogModule] Auditlog not available [2016-08-11 12:58:40,503][INFO ][transport ] [Vader] Using [com.floragunn.searchguard.transport.SearchGuardTransportService] as transport service, overridden by [search-guard2] [2016-08-11 12:58:40,507][INFO ][transport ] [Vader] Using [com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] as transport, overridden by [search-guard-ssl] [2016-08-11 12:58:42,079][DEBUG][common.network ] configuration: lo inet 127.0.0.1 netmask:255.0.0.0 scope:host inet6 ::1 prefixlen:128 scope:host UP LOOPBACK mtu:65536 index:1 eth0 inet 10.8.8.136 netmask:255.255.255.0 broadcast:10.8.8.255 scope:site inet6 fe80::4c3:1dff:fe16:30b3 prefixlen:64 scope:link hardware 06:C3:1D:16:30:B3 UP MULTICAST mtu:9001 index:2 [2016-08-11 12:58:42,092][DEBUG][common.netty ] using gathering [true] [2016-08-11 12:58:42,127][DEBUG][discovery.zen.elect ] [Vader] using minimum_master_nodes [-1] [2016-08-11 12:58:42,129][DEBUG][discovery.zen.ping.unicast] [Vader] using initial hosts [127.0.0.1, [::1]], with concurrent_connects [10] [2016-08-11 12:58:42,138][DEBUG][discovery.zen ] [Vader] using ping.timeout [3s], join.timeout [1m], master_election.filter_client [true], master_election.filter_data [false] [2016-08-11 12:58:42,141][DEBUG][discovery.zen.fd ] [Vader] [master] uses ping_interval [1s], ping_timeout [30s], ping_retries [3] [2016-08-11 12:58:42,144][DEBUG][discovery.zen.fd ] [Vader] [node ] uses ping_interval [1s], ping_timeout [30s], ping_retries [3] [2016-08-11 12:58:42,180][DEBUG][cluster.routing.allocation.decider] [Vader] using node_concurrent_recoveries [2], node_initial_primaries_recoveries [4] [2016-08-11 12:58:42,185][DEBUG][monitor.jvm ] [Vader] enabled [true], interval [1s], gc_threshold [{default=GcThreshold{name='default', warnThreshold=10000, infoThreshold=5000, debugThreshold=2000}, young=GcThreshold{name='young', warnThreshold=1000, infoThreshold=700, debugThreshold=400}, old=GcThreshold{name='old', warnThreshold=10000, infoThreshold=5000, debugThreshold=2000}}] [2016-08-11 12:58:42,188][DEBUG][monitor.os ] [Vader] Using probe [org.elasticsearch.monitor.os.OsProbe@2687725a] with refresh_interval [1s] [2016-08-11 12:58:42,192][DEBUG][monitor.process ] [Vader] Using probe [org.elasticsearch.monitor.process.ProcessProbe@5a8ab2] with refresh_interval [1s] [2016-08-11 12:58:42,200][DEBUG][monitor.jvm ] [Vader] Using refresh_interval [1s] [2016-08-11 12:58:42,201][DEBUG][monitor.fs ] [Vader] Using probe [org.elasticsearch.monitor.fs.FsProbe@697a34af] with refresh_interval [1s] [2016-08-11 12:58:42,585][DEBUG][script ] [Vader] using script cache with max_size [100], expire [null] [2016-08-11 12:58:42,641][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] CN=kirk, OU=client, O=client, L=Test, C=DE [2016-08-11 12:58:42,642][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] CN=kirk, OU=client, O=client, L=Test, C=DE [2016-08-11 12:58:42,642][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] CN=kirk,OU=client,O=client,L=Test,C=DE [2016-08-11 12:58:42,642][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] CN=node-0.example.com, OU=SSL, O=Test, L=Test, C=DE [2016-08-11 12:58:42,643][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] CN=kirk,OU=client,O=client,l=tEst, C=De [2016-08-11 12:58:42,643][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] Loaded 2 admin DN's [CN=kirk, OU=client, O=client, L=Test, C=DE, CN=node-0.example.com, OU=SSL, O=Test, L=Test, C=DE] [2016-08-11 12:58:42,643][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] Loaded 0 impersonation DN's {} [2016-08-11 12:58:42,690][DEBUG][cluster.routing.allocation.decider] [Vader] using [cluster_concurrent_rebalance] with [2] [2016-08-11 12:58:42,694][DEBUG][cluster.routing.allocation.decider] [Vader] using [cluster.routing.allocation.allow_rebalance] with [indices_all_active] [2016-08-11 12:58:42,701][DEBUG][gateway ] [Vader] using initial_shards [quorum] [2016-08-11 12:58:42,948][DEBUG][com.floragunn.searchguard.action.configupdate.TransportConfigUpdateAction] [Vader] Add config listener class com.floragunn.searchguard.configuration.ActionGroupHolder [2016-08-11 12:58:42,948][DEBUG][com.floragunn.searchguard.action.configupdate.TransportConfigUpdateAction] [Vader] Add config listener class com.floragunn.searchguard.configuration.PrivilegesEvaluator [2016-08-11 12:58:42,948][DEBUG][com.floragunn.searchguard.action.configupdate.TransportConfigUpdateAction] [Vader] Add config listener class com.floragunn.searchguard.configuration.PrivilegesEvaluator [2016-08-11 12:58:42,949][DEBUG][com.floragunn.searchguard.action.configupdate.TransportConfigUpdateAction] [Vader] Add config listener class com.floragunn.searchguard.auth.internal.InternalAuthenticationBackend [2016-08-11 12:58:42,950][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] CN=kirk, OU=client, O=client, L=Test, C=DE [2016-08-11 12:58:42,955][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] CN=kirk, OU=client, O=client, L=Test, C=DE [2016-08-11 12:58:42,955][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] CN=kirk,OU=client,O=client,L=Test,C=DE [2016-08-11 12:58:42,955][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] CN=node-0.example.com, OU=SSL, O=Test, L=Test, C=DE [2016-08-11 12:58:42,955][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] CN=kirk,OU=client,O=client,l=tEst, C=De [2016-08-11 12:58:42,956][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] Loaded 2 admin DN's [CN=kirk, OU=client, O=client, L=Test, C=DE, CN=node-0.example.com, OU=SSL, O=Test, L=Test, C=DE] [2016-08-11 12:58:42,956][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] Loaded 0 impersonation DN's {} [2016-08-11 12:58:42,956][DEBUG][com.floragunn.searchguard.action.configupdate.TransportConfigUpdateAction] [Vader] Add config listener class com.floragunn.searchguard.http.XFFResolver [2016-08-11 12:58:42,958][DEBUG][com.floragunn.searchguard.action.configupdate.TransportConfigUpdateAction] [Vader] Add config listener class com.floragunn.searchguard.auth.BackendRegistry [2016-08-11 12:58:43,011][DEBUG][com.floragunn.searchguard.http.SearchGuardHttpServerTransport] [Vader] using max_chunk_size[8kb], max_header_size[8kb], max_initial_line_length[4kb], max_content_length[100mb], receive_predictor[512kb->512kb], pipelining[true], pipelining_max_events[10000] [2016-08-11 12:58:43,034][DEBUG][indices.recovery ] [Vader] using max_bytes_per_sec[40mb], concurrent_streams [3], file_chunk_size [512kb], translog_size [512kb], translog_ops [1000], and compress [true] [2016-08-11 12:58:43,050][DEBUG][indices.store ] [Vader] using indices.store.throttle.type [NONE], with index.store.throttle.max_bytes_per_sec [10gb] [2016-08-11 12:58:43,055][DEBUG][indices.memory ] [Vader] using indexing buffer size [101.5mb], with indices.memory.min_shard_index_buffer_size [4mb], indices.memory.max_shard_index_buffer_size [512mb], indices.memory.shard_inactive_time [5m], indices.memory.interval [30s] [2016-08-11 12:58:43,057][DEBUG][indices.cache.query ] [Vader] using [node] query cache with size [10%], actual_size [101.5mb], max filter count [1000] [2016-08-11 12:58:43,058][DEBUG][indices.fielddata.cache ] [Vader] using size [-1] [-1b] [2016-08-11 12:58:43,125][DEBUG][common.compress.lzf ] using decoder[VanillaChunkDecoder] [2016-08-11 12:58:43,217][DEBUG][gateway ] [Vader] took 17ms to load state [2016-08-11 12:58:43,220][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] CN=kirk, OU=client, O=client, L=Test, C=DE [2016-08-11 12:58:43,221][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] CN=kirk, OU=client, O=client, L=Test, C=DE [2016-08-11 12:58:43,221][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] CN=kirk,OU=client,O=client,L=Test,C=DE [2016-08-11 12:58:43,221][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] CN=node-0.example.com, OU=SSL, O=Test, L=Test, C=DE [2016-08-11 12:58:43,222][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] CN=kirk,OU=client,O=client,l=tEst, C=De [2016-08-11 12:58:43,222][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] Loaded 2 admin DN's [CN=kirk, OU=client, O=client, L=Test, C=DE, CN=node-0.example.com, OU=SSL, O=Test, L=Test, C=DE] [2016-08-11 12:58:43,222][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] Loaded 0 impersonation DN's {} [2016-08-11 12:58:43,225][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] CN=kirk, OU=client, O=client, L=Test, C=DE [2016-08-11 12:58:43,231][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] CN=kirk, OU=client, O=client, L=Test, C=DE [2016-08-11 12:58:43,231][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] CN=kirk,OU=client,O=client,L=Test,C=DE [2016-08-11 12:58:43,231][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] CN=node-0.example.com, OU=SSL, O=Test, L=Test, C=DE [2016-08-11 12:58:43,231][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] CN=kirk,OU=client,O=client,l=tEst, C=De [2016-08-11 12:58:43,231][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] Loaded 2 admin DN's [CN=kirk, OU=client, O=client, L=Test, C=DE, CN=node-0.example.com, OU=SSL, O=Test, L=Test, C=DE] [2016-08-11 12:58:43,232][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] Loaded 0 impersonation DN's {} [2016-08-11 12:58:43,232][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] CN=kirk, OU=client, O=client, L=Test, C=DE [2016-08-11 12:58:43,232][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] CN=kirk, OU=client, O=client, L=Test, C=DE [2016-08-11 12:58:43,232][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] CN=kirk,OU=client,O=client,L=Test,C=DE [2016-08-11 12:58:43,232][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] CN=node-0.example.com, OU=SSL, O=Test, L=Test, C=DE [2016-08-11 12:58:43,232][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] CN=kirk,OU=client,O=client,l=tEst, C=De [2016-08-11 12:58:43,232][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] Loaded 2 admin DN's [CN=kirk, OU=client, O=client, L=Test, C=DE, CN=node-0.example.com, OU=SSL, O=Test, L=Test, C=DE] [2016-08-11 12:58:43,233][DEBUG][com.floragunn.searchguard.configuration.AdminDNs] Loaded 0 impersonation DN's {} [2016-08-11 12:58:43,235][INFO ][node ] [Vader] initialized [2016-08-11 12:58:43,236][INFO ][node ] [Vader] starting ... [2016-08-11 12:58:43,259][DEBUG][netty.channel.socket.nio.SelectorUtil] Using select timeout of 500 [2016-08-11 12:58:43,259][DEBUG][netty.channel.socket.nio.SelectorUtil] Epoll-bug workaround enabled = false [2016-08-11 12:58:43,291][DEBUG][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] Node client configured for SSL [2016-08-11 12:58:43,293][DEBUG][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] using profile[default], worker_count[2], port[9300-9400], bind_host[127.0.0.1], publish_host[127.0.0.1], compress[false], connect_timeout[30s], connections_per_node[2/3/6/1/1], receive_predictor[512kb->512kb] [2016-08-11 12:58:43,307][DEBUG][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] Node server configured for SSL [2016-08-11 12:58:43,308][DEBUG][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] binding server bootstrap to: 127.0.0.1 [2016-08-11 12:58:43,336][DEBUG][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] Bound profile [default] to address {127.0.0.1:9300} [2016-08-11 12:58:43,339][INFO ][com.floragunn.searchguard.transport.SearchGuardTransportService] [Vader] publish_address {127.0.0.1:9300}, bound_addresses {127.0.0.1:9300} [2016-08-11 12:58:43,348][INFO ][discovery ] [Vader] SHU/hnX5TnGhR_KVe9-J-iq5JQ [2016-08-11 12:58:43,348][DEBUG][com.floragunn.searchguard.action.configupdate.TransportConfigUpdateAction] [Vader] Node started, try to initialize it. Wait for at least yellow cluster state.... [2016-08-11 12:58:43,354][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action cluster:monitor/health from null/ [2016-08-11 12:58:43,359][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [] [2016-08-11 12:58:43,359][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 12:58:43,363][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: null [2016-08-11 12:58:43,364][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] No user, will allow only standard discovery and monitoring actions [2016-08-11 12:58:43,366][DEBUG][cluster.service ] [Vader] processing [initial_join]: execute [2016-08-11 12:58:43,374][DEBUG][cluster.service ] [Vader] processing [initial_join]: took 8ms no change in cluster_state [2016-08-11 12:58:43,376][DEBUG][action.admin.cluster.health] [Vader] no known master node, scheduling a retry [2016-08-11 12:58:43,420][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [0][internal:discovery/zen/unicast] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:58:43,445][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [1][internal:discovery/zen/unicast] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:58:43,478][DEBUG][io.netty.buffer.PooledByteBufAllocator] -Dio.netty.allocator.numHeapArenas: 2 [2016-08-11 12:58:43,478][DEBUG][io.netty.buffer.PooledByteBufAllocator] -Dio.netty.allocator.numDirectArenas: 2 [2016-08-11 12:58:43,479][DEBUG][io.netty.buffer.PooledByteBufAllocator] -Dio.netty.allocator.pageSize: 8192 [2016-08-11 12:58:43,479][DEBUG][io.netty.buffer.PooledByteBufAllocator] -Dio.netty.allocator.maxOrder: 11 [2016-08-11 12:58:43,479][DEBUG][io.netty.buffer.PooledByteBufAllocator] -Dio.netty.allocator.chunkSize: 16777216 [2016-08-11 12:58:43,479][DEBUG][io.netty.buffer.PooledByteBufAllocator] -Dio.netty.allocator.tinyCacheSize: 512 [2016-08-11 12:58:43,479][DEBUG][io.netty.buffer.PooledByteBufAllocator] -Dio.netty.allocator.smallCacheSize: 256 [2016-08-11 12:58:43,479][DEBUG][io.netty.buffer.PooledByteBufAllocator] -Dio.netty.allocator.normalCacheSize: 64 [2016-08-11 12:58:43,479][DEBUG][io.netty.buffer.PooledByteBufAllocator] -Dio.netty.allocator.maxCachedBufferCapacity: 32768 [2016-08-11 12:58:43,480][DEBUG][io.netty.buffer.PooledByteBufAllocator] -Dio.netty.allocator.cacheTrimInterval: 8192 [2016-08-11 12:58:43,532][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] connect exception caught on transport layer [[id: 0x2dd153d6]] java.net.ConnectException: Connection refused: /127.0.0.1:9304 at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717) at org.jboss.netty.channel.socket.nio.NioClientBoss.connect(NioClientBoss.java:152) at org.jboss.netty.channel.socket.nio.NioClientBoss.processSelectedKeys(NioClientBoss.java:105) at org.jboss.netty.channel.socket.nio.NioClientBoss.process(NioClientBoss.java:79) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.NioClientBoss.run(NioClientBoss.java:42) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 12:58:43,541][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] connect exception caught on transport layer [[id: 0xd83c39b3]] java.net.ConnectException: Connection refused: /0:0:0:0:0:0:0:1:9304 at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717) at org.jboss.netty.channel.socket.nio.NioClientBoss.connect(NioClientBoss.java:152) at org.jboss.netty.channel.socket.nio.NioClientBoss.processSelectedKeys(NioClientBoss.java:105) at org.jboss.netty.channel.socket.nio.NioClientBoss.process(NioClientBoss.java:79) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.NioClientBoss.run(NioClientBoss.java:42) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 12:58:43,544][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] connect exception caught on transport layer [[id: 0x4e0e7a32]] java.net.ConnectException: Connection refused: /0:0:0:0:0:0:0:1:9302 at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717) at org.jboss.netty.channel.socket.nio.NioClientBoss.connect(NioClientBoss.java:152) at org.jboss.netty.channel.socket.nio.NioClientBoss.processSelectedKeys(NioClientBoss.java:105) at org.jboss.netty.channel.socket.nio.NioClientBoss.process(NioClientBoss.java:79) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.NioClientBoss.run(NioClientBoss.java:42) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 12:58:43,545][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] connect exception caught on transport layer [[id: 0xa6b0f58f]] java.net.ConnectException: Connection refused: /127.0.0.1:9301 at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717) at org.jboss.netty.channel.socket.nio.NioClientBoss.connect(NioClientBoss.java:152) at org.jboss.netty.channel.socket.nio.NioClientBoss.processSelectedKeys(NioClientBoss.java:105) at org.jboss.netty.channel.socket.nio.NioClientBoss.process(NioClientBoss.java:79) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.NioClientBoss.run(NioClientBoss.java:42) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 12:58:43,547][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] connect exception caught on transport layer [[id: 0x1bc882aa]] java.net.ConnectException: Connection refused: /0:0:0:0:0:0:0:1:9301 at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717) at org.jboss.netty.channel.socket.nio.NioClientBoss.connect(NioClientBoss.java:152) at org.jboss.netty.channel.socket.nio.NioClientBoss.processSelectedKeys(NioClientBoss.java:105) at org.jboss.netty.channel.socket.nio.NioClientBoss.process(NioClientBoss.java:79) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.NioClientBoss.run(NioClientBoss.java:42) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 12:58:43,548][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] connect exception caught on transport layer [[id: 0xe3d36349]] java.net.ConnectException: Connection refused: /0:0:0:0:0:0:0:1:9303 at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717) at org.jboss.netty.channel.socket.nio.NioClientBoss.connect(NioClientBoss.java:152) at org.jboss.netty.channel.socket.nio.NioClientBoss.processSelectedKeys(NioClientBoss.java:105) at org.jboss.netty.channel.socket.nio.NioClientBoss.process(NioClientBoss.java:79) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.NioClientBoss.run(NioClientBoss.java:42) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 12:58:43,550][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] connect exception caught on transport layer [[id: 0x4808b941]] java.net.ConnectException: Connection refused: /0:0:0:0:0:0:0:1:9300 at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717) at org.jboss.netty.channel.socket.nio.NioClientBoss.connect(NioClientBoss.java:152) at org.jboss.netty.channel.socket.nio.NioClientBoss.processSelectedKeys(NioClientBoss.java:105) at org.jboss.netty.channel.socket.nio.NioClientBoss.process(NioClientBoss.java:79) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.NioClientBoss.run(NioClientBoss.java:42) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 12:58:43,552][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] connect exception caught on transport layer [[id: 0x090092d1]] java.net.ConnectException: Connection refused: /127.0.0.1:9302 at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717) at org.jboss.netty.channel.socket.nio.NioClientBoss.connect(NioClientBoss.java:152) at org.jboss.netty.channel.socket.nio.NioClientBoss.processSelectedKeys(NioClientBoss.java:105) at org.jboss.netty.channel.socket.nio.NioClientBoss.process(NioClientBoss.java:79) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.NioClientBoss.run(NioClientBoss.java:42) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 12:58:43,553][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] connect exception caught on transport layer [[id: 0x03fb257d]] java.net.ConnectException: Connection refused: /127.0.0.1:9303 at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717) at org.jboss.netty.channel.socket.nio.NioClientBoss.connect(NioClientBoss.java:152) at org.jboss.netty.channel.socket.nio.NioClientBoss.processSelectedKeys(NioClientBoss.java:105) at org.jboss.netty.channel.socket.nio.NioClientBoss.process(NioClientBoss.java:79) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.NioClientBoss.run(NioClientBoss.java:42) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 12:58:44,958][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [2][internal:discovery/zen/unicast] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:58:44,972][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [3][internal:discovery/zen/unicast] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:58:44,973][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] connect exception caught on transport layer [[id: 0xcf45cdd0]] java.net.ConnectException: Connection refused: /127.0.0.1:9302 at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717) at org.jboss.netty.channel.socket.nio.NioClientBoss.connect(NioClientBoss.java:152) at org.jboss.netty.channel.socket.nio.NioClientBoss.processSelectedKeys(NioClientBoss.java:105) at org.jboss.netty.channel.socket.nio.NioClientBoss.process(NioClientBoss.java:79) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.NioClientBoss.run(NioClientBoss.java:42) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 12:58:44,975][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] connect exception caught on transport layer [[id: 0x37aef34e]] java.net.ConnectException: Connection refused: /0:0:0:0:0:0:0:1:9303 at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717) at org.jboss.netty.channel.socket.nio.NioClientBoss.connect(NioClientBoss.java:152) at org.jboss.netty.channel.socket.nio.NioClientBoss.processSelectedKeys(NioClientBoss.java:105) at org.jboss.netty.channel.socket.nio.NioClientBoss.process(NioClientBoss.java:79) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.NioClientBoss.run(NioClientBoss.java:42) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 12:58:44,976][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] connect exception caught on transport layer [[id: 0x9153403a]] java.net.ConnectException: Connection refused: /127.0.0.1:9303 at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717) at org.jboss.netty.channel.socket.nio.NioClientBoss.connect(NioClientBoss.java:152) at org.jboss.netty.channel.socket.nio.NioClientBoss.processSelectedKeys(NioClientBoss.java:105) at org.jboss.netty.channel.socket.nio.NioClientBoss.process(NioClientBoss.java:79) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.NioClientBoss.run(NioClientBoss.java:42) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 12:58:44,977][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] connect exception caught on transport layer [[id: 0x6027c857]] java.net.ConnectException: Connection refused: /0:0:0:0:0:0:0:1:9304 at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717) at org.jboss.netty.channel.socket.nio.NioClientBoss.connect(NioClientBoss.java:152) at org.jboss.netty.channel.socket.nio.NioClientBoss.processSelectedKeys(NioClientBoss.java:105) at org.jboss.netty.channel.socket.nio.NioClientBoss.process(NioClientBoss.java:79) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.NioClientBoss.run(NioClientBoss.java:42) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 12:58:44,978][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] connect exception caught on transport layer [[id: 0xa01f7110]] java.net.ConnectException: Connection refused: /0:0:0:0:0:0:0:1:9300 at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717) at org.jboss.netty.channel.socket.nio.NioClientBoss.connect(NioClientBoss.java:152) at org.jboss.netty.channel.socket.nio.NioClientBoss.processSelectedKeys(NioClientBoss.java:105) at org.jboss.netty.channel.socket.nio.NioClientBoss.process(NioClientBoss.java:79) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.NioClientBoss.run(NioClientBoss.java:42) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 12:58:44,979][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] connect exception caught on transport layer [[id: 0x7034c5fc]] java.net.ConnectException: Connection refused: /0:0:0:0:0:0:0:1:9301 at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717) at org.jboss.netty.channel.socket.nio.NioClientBoss.connect(NioClientBoss.java:152) at org.jboss.netty.channel.socket.nio.NioClientBoss.processSelectedKeys(NioClientBoss.java:105) at org.jboss.netty.channel.socket.nio.NioClientBoss.process(NioClientBoss.java:79) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.NioClientBoss.run(NioClientBoss.java:42) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 12:58:44,981][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] connect exception caught on transport layer [[id: 0x08160296]] java.net.ConnectException: Connection refused: /127.0.0.1:9301 at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717) at org.jboss.netty.channel.socket.nio.NioClientBoss.connect(NioClientBoss.java:152) at org.jboss.netty.channel.socket.nio.NioClientBoss.processSelectedKeys(NioClientBoss.java:105) at org.jboss.netty.channel.socket.nio.NioClientBoss.process(NioClientBoss.java:79) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.NioClientBoss.run(NioClientBoss.java:42) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 12:58:44,987][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] connect exception caught on transport layer [[id: 0x471d16b1]] java.net.ConnectException: Connection refused: /0:0:0:0:0:0:0:1:9302 at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717) at org.jboss.netty.channel.socket.nio.NioClientBoss.connect(NioClientBoss.java:152) at org.jboss.netty.channel.socket.nio.NioClientBoss.processSelectedKeys(NioClientBoss.java:105) at org.jboss.netty.channel.socket.nio.NioClientBoss.process(NioClientBoss.java:79) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.NioClientBoss.run(NioClientBoss.java:42) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 12:58:44,988][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] connect exception caught on transport layer [[id: 0x06d5c28f]] java.net.ConnectException: Connection refused: /127.0.0.1:9304 at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717) at org.jboss.netty.channel.socket.nio.NioClientBoss.connect(NioClientBoss.java:152) at org.jboss.netty.channel.socket.nio.NioClientBoss.processSelectedKeys(NioClientBoss.java:105) at org.jboss.netty.channel.socket.nio.NioClientBoss.process(NioClientBoss.java:79) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.NioClientBoss.run(NioClientBoss.java:42) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 12:58:46,474][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [4][internal:discovery/zen/unicast] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:58:46,475][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [5][internal:discovery/zen/unicast] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:58:46,488][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] connect exception caught on transport layer [[id: 0x96dc808d]] java.net.ConnectException: Connection refused: /0:0:0:0:0:0:0:1:9301 at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717) at org.jboss.netty.channel.socket.nio.NioClientBoss.connect(NioClientBoss.java:152) at org.jboss.netty.channel.socket.nio.NioClientBoss.processSelectedKeys(NioClientBoss.java:105) at org.jboss.netty.channel.socket.nio.NioClientBoss.process(NioClientBoss.java:79) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.NioClientBoss.run(NioClientBoss.java:42) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 12:58:46,490][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] connect exception caught on transport layer [[id: 0x39af380a]] java.net.ConnectException: Connection refused: /127.0.0.1:9301 at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717) at org.jboss.netty.channel.socket.nio.NioClientBoss.connect(NioClientBoss.java:152) at org.jboss.netty.channel.socket.nio.NioClientBoss.processSelectedKeys(NioClientBoss.java:105) at org.jboss.netty.channel.socket.nio.NioClientBoss.process(NioClientBoss.java:79) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.NioClientBoss.run(NioClientBoss.java:42) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 12:58:46,491][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] connect exception caught on transport layer [[id: 0x7a41146c]] java.net.ConnectException: Connection refused: /0:0:0:0:0:0:0:1:9304 at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717) at org.jboss.netty.channel.socket.nio.NioClientBoss.connect(NioClientBoss.java:152) at org.jboss.netty.channel.socket.nio.NioClientBoss.processSelectedKeys(NioClientBoss.java:105) at org.jboss.netty.channel.socket.nio.NioClientBoss.process(NioClientBoss.java:79) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.NioClientBoss.run(NioClientBoss.java:42) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 12:58:46,492][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] connect exception caught on transport layer [[id: 0x918ab81c]] java.net.ConnectException: Connection refused: /127.0.0.1:9302 at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717) at org.jboss.netty.channel.socket.nio.NioClientBoss.connect(NioClientBoss.java:152) at org.jboss.netty.channel.socket.nio.NioClientBoss.processSelectedKeys(NioClientBoss.java:105) at org.jboss.netty.channel.socket.nio.NioClientBoss.process(NioClientBoss.java:79) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.NioClientBoss.run(NioClientBoss.java:42) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 12:58:46,493][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] connect exception caught on transport layer [[id: 0x4b366361]] java.net.ConnectException: Connection refused: /0:0:0:0:0:0:0:1:9302 at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717) at org.jboss.netty.channel.socket.nio.NioClientBoss.connect(NioClientBoss.java:152) at org.jboss.netty.channel.socket.nio.NioClientBoss.processSelectedKeys(NioClientBoss.java:105) at org.jboss.netty.channel.socket.nio.NioClientBoss.process(NioClientBoss.java:79) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.NioClientBoss.run(NioClientBoss.java:42) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 12:58:46,494][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] connect exception caught on transport layer [[id: 0x640df592]] java.net.ConnectException: Connection refused: /127.0.0.1:9304 at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717) at org.jboss.netty.channel.socket.nio.NioClientBoss.connect(NioClientBoss.java:152) at org.jboss.netty.channel.socket.nio.NioClientBoss.processSelectedKeys(NioClientBoss.java:105) at org.jboss.netty.channel.socket.nio.NioClientBoss.process(NioClientBoss.java:79) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.NioClientBoss.run(NioClientBoss.java:42) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 12:58:46,495][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] connect exception caught on transport layer [[id: 0x6661d1a0]] java.net.ConnectException: Connection refused: /0:0:0:0:0:0:0:1:9300 at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717) at org.jboss.netty.channel.socket.nio.NioClientBoss.connect(NioClientBoss.java:152) at org.jboss.netty.channel.socket.nio.NioClientBoss.processSelectedKeys(NioClientBoss.java:105) at org.jboss.netty.channel.socket.nio.NioClientBoss.process(NioClientBoss.java:79) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.NioClientBoss.run(NioClientBoss.java:42) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 12:58:46,503][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] connect exception caught on transport layer [[id: 0x72d9fd50]] java.net.ConnectException: Connection refused: /0:0:0:0:0:0:0:1:9303 at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717) at org.jboss.netty.channel.socket.nio.NioClientBoss.connect(NioClientBoss.java:152) at org.jboss.netty.channel.socket.nio.NioClientBoss.processSelectedKeys(NioClientBoss.java:105) at org.jboss.netty.channel.socket.nio.NioClientBoss.process(NioClientBoss.java:79) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.NioClientBoss.run(NioClientBoss.java:42) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 12:58:46,504][DEBUG][discovery.zen ] [Vader] filtered ping responses: (filter_client[true], filter_data[false]) {none} [2016-08-11 12:58:46,505][DEBUG][discovery.zen ] [Vader] elected as master, waiting for incoming joins ([0] needed) [2016-08-11 12:58:46,506][DEBUG][cluster.service ] [Vader] processing [zen-disco-join(elected_as_master, [0] joins received)]: execute [2016-08-11 12:58:46,511][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] connect exception caught on transport layer [[id: 0xf6db6d25]] java.net.ConnectException: Connection refused: /127.0.0.1:9303 at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717) at org.jboss.netty.channel.socket.nio.NioClientBoss.connect(NioClientBoss.java:152) at org.jboss.netty.channel.socket.nio.NioClientBoss.processSelectedKeys(NioClientBoss.java:105) at org.jboss.netty.channel.socket.nio.NioClientBoss.process(NioClientBoss.java:79) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.NioClientBoss.run(NioClientBoss.java:42) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 12:58:46,516][DEBUG][cluster.service ] [Vader] cluster state updated, version [1], source [zen-disco-join(elected_as_master, [0] joins received)] [2016-08-11 12:58:46,519][INFO ][cluster.service ] [Vader] new_master {Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}, reason: zen-disco-join(elected_as_master, [0] joins received) [2016-08-11 12:58:46,519][DEBUG][cluster.service ] [Vader] publishing cluster state version [1] [2016-08-11 12:58:46,520][DEBUG][cluster.service ] [Vader] set local cluster state to version 1 [2016-08-11 12:58:46,529][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action internal:gateway/local/meta_state from null/ [2016-08-11 12:58:46,531][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [] [2016-08-11 12:58:46,531][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 12:58:46,531][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: null [2016-08-11 12:58:46,531][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] No user, will allow only standard discovery and monitoring actions [2016-08-11 12:58:46,545][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [6][internal:gateway/local/meta_state[n]] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:58:46,546][DEBUG][cluster.service ] [Vader] processing [zen-disco-join(elected_as_master, [0] joins received)]: took 40ms done applying updated cluster_state (version: 1, uuid: 4kbbxd1VRiGakXqmkQi1vQ) [2016-08-11 12:58:46,553][DEBUG][cluster.service ] [Vader] processing [local-gateway-elected-state]: execute [2016-08-11 12:58:46,570][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action internal:gateway/local/started_shards from null/ [2016-08-11 12:58:46,574][DEBUG][com.floragunn.searchguard.http.SearchGuardHttpServerTransport] [Vader] Bound http to address {[::]:9200} [2016-08-11 12:58:46,577][INFO ][http ] [Vader] publish_address {10.8.8.136:9200}, bound_addresses {[::]:9200} [2016-08-11 12:58:46,577][INFO ][node ] [Vader] started [2016-08-11 12:58:46,577][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [] [2016-08-11 12:58:46,577][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 12:58:46,577][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: null [2016-08-11 12:58:46,578][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] No user, will allow only standard discovery and monitoring actions [2016-08-11 12:58:46,585][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action internal:gateway/local/started_shards from null/ [2016-08-11 12:58:46,585][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [] [2016-08-11 12:58:46,585][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 12:58:46,586][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: null [2016-08-11 12:58:46,586][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] No user, will allow only standard discovery and monitoring actions [2016-08-11 12:58:46,586][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action internal:gateway/local/started_shards from null/ [2016-08-11 12:58:46,595][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [] [2016-08-11 12:58:46,595][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 12:58:46,595][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: null [2016-08-11 12:58:46,595][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] No user, will allow only standard discovery and monitoring actions [2016-08-11 12:58:46,596][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action internal:gateway/local/started_shards from null/ [2016-08-11 12:58:46,596][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [] [2016-08-11 12:58:46,596][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 12:58:46,597][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: null [2016-08-11 12:58:46,597][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] No user, will allow only standard discovery and monitoring actions [2016-08-11 12:58:46,597][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action internal:gateway/local/started_shards from null/ [2016-08-11 12:58:46,598][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [] [2016-08-11 12:58:46,598][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 12:58:46,598][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: null [2016-08-11 12:58:46,598][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] No user, will allow only standard discovery and monitoring actions [2016-08-11 12:58:46,598][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action internal:gateway/local/started_shards from null/ [2016-08-11 12:58:46,599][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [] [2016-08-11 12:58:46,599][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 12:58:46,599][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: null [2016-08-11 12:58:46,599][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] No user, will allow only standard discovery and monitoring actions [2016-08-11 12:58:46,599][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action internal:gateway/local/started_shards from null/ [2016-08-11 12:58:46,600][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [] [2016-08-11 12:58:46,600][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 12:58:46,600][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: null [2016-08-11 12:58:46,600][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] No user, will allow only standard discovery and monitoring actions [2016-08-11 12:58:46,602][DEBUG][gateway ] [Vader] [searchguard][0] shard state info found: [version [16], primary [true]] [2016-08-11 12:58:46,606][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [7][internal:gateway/local/started_shards[n]] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:58:46,603][DEBUG][gateway ] [Vader] [person][0] shard state info found: [version [120], primary [true]] [2016-08-11 12:58:46,607][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [8][internal:gateway/local/started_shards[n]] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:58:46,608][DEBUG][gateway ] [Vader] [person][2] shard state info found: [version [120], primary [true]] [2016-08-11 12:58:46,609][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [9][internal:gateway/local/started_shards[n]] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:58:46,609][DEBUG][cluster.routing.allocation.allocator] [Vader] skipping rebalance due to in-flight shard/store fetches [2016-08-11 12:58:46,610][DEBUG][cluster.service ] [Vader] cluster state updated, version [2], source [local-gateway-elected-state] [2016-08-11 12:58:46,612][DEBUG][cluster.service ] [Vader] publishing cluster state version [2] [2016-08-11 12:58:46,613][DEBUG][cluster.service ] [Vader] set local cluster state to version 2 [2016-08-11 12:58:46,612][DEBUG][gateway ] [Vader] [person][4] shard state info found: [version [120], primary [true]] [2016-08-11 12:58:46,615][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [11][internal:gateway/local/started_shards[n]] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:58:46,611][DEBUG][gateway ] [Vader] [person][1] shard state info found: [version [120], primary [true]] [2016-08-11 12:58:46,615][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [10][internal:gateway/local/started_shards[n]] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:58:46,619][DEBUG][gateway ] [Vader] [.kibana][0] loaded data path [/var/lib/elasticsearch/SHU/nodes/0/indices/.kibana/0], state path [/var/lib/elasticsearch/SHU/nodes/0/indices/.kibana/0] [2016-08-11 12:58:46,627][DEBUG][gateway ] [Vader] [person][3] loaded data path [/var/lib/elasticsearch/SHU/nodes/0/indices/person/3], state path [/var/lib/elasticsearch/SHU/nodes/0/indices/person/3] [2016-08-11 12:58:46,681][DEBUG][gateway ] [Vader] [person][3] shard state info found: [version [120], primary [true]] [2016-08-11 12:58:46,681][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [12][internal:gateway/local/started_shards[n]] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:58:46,683][DEBUG][gateway ] [Vader] [.kibana][0] shard state info found: [version [172], primary [true]] [2016-08-11 12:58:46,684][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [13][internal:gateway/local/started_shards[n]] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:58:46,696][INFO ][gateway ] [Vader] recovered [3] indices into cluster_state [2016-08-11 12:58:46,697][DEBUG][cluster.service ] [Vader] processing [local-gateway-elected-state]: took 143ms done applying updated cluster_state (version: 2, uuid: avMTjGqvQqyT4OjmovBgzQ) [2016-08-11 12:58:46,697][DEBUG][cluster.service ] [Vader] processing [cluster_reroute(async_shard_fetch)]: execute [2016-08-11 12:58:46,698][DEBUG][gateway ] [Vader] [searchguard][0] found 1 allocations of [searchguard][0], node[null], [P], v[0], s[UNASSIGNED], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.559Z]], highest version: [16] [2016-08-11 12:58:46,704][DEBUG][gateway ] [Vader] [searchguard][0]: allocating [[searchguard][0], node[null], [P], v[0], s[UNASSIGNED], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.559Z]]] to [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] on primary allocation [2016-08-11 12:58:46,705][DEBUG][gateway ] [Vader] [person][1] found 1 allocations of [person][1], node[null], [P], v[0], s[UNASSIGNED], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]], highest version: [120] [2016-08-11 12:58:46,705][DEBUG][gateway ] [Vader] [person][1]: allocating [[person][1], node[null], [P], v[0], s[UNASSIGNED], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]] to [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] on primary allocation [2016-08-11 12:58:46,705][DEBUG][gateway ] [Vader] [person][2] found 1 allocations of [person][2], node[null], [P], v[0], s[UNASSIGNED], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]], highest version: [120] [2016-08-11 12:58:46,706][DEBUG][gateway ] [Vader] [person][2]: allocating [[person][2], node[null], [P], v[0], s[UNASSIGNED], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]] to [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] on primary allocation [2016-08-11 12:58:46,706][DEBUG][gateway ] [Vader] [person][0] found 1 allocations of [person][0], node[null], [P], v[0], s[UNASSIGNED], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]], highest version: [120] [2016-08-11 12:58:46,706][DEBUG][gateway ] [Vader] [person][0]: allocating [[person][0], node[null], [P], v[0], s[UNASSIGNED], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]] to [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] on primary allocation [2016-08-11 12:58:46,707][DEBUG][gateway ] [Vader] [person][3] found 1 allocations of [person][3], node[null], [P], v[0], s[UNASSIGNED], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]], highest version: [120] [2016-08-11 12:58:46,707][DEBUG][gateway ] [Vader] [person][3]: throttling allocation [[person][3], node[null], [P], v[0], s[UNASSIGNED], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]] to [[{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}]] on primary allocation [2016-08-11 12:58:46,707][DEBUG][gateway ] [Vader] [person][4] found 1 allocations of [person][4], node[null], [P], v[0], s[UNASSIGNED], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]], highest version: [120] [2016-08-11 12:58:46,708][DEBUG][gateway ] [Vader] [person][4]: throttling allocation [[person][4], node[null], [P], v[0], s[UNASSIGNED], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]] to [[{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}]] on primary allocation [2016-08-11 12:58:46,708][DEBUG][gateway ] [Vader] [.kibana][0] found 1 allocations of [.kibana][0], node[null], [P], v[0], s[UNASSIGNED], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]], highest version: [172] [2016-08-11 12:58:46,711][DEBUG][gateway ] [Vader] [.kibana][0]: throttling allocation [[.kibana][0], node[null], [P], v[0], s[UNASSIGNED], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]] to [[{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}]] on primary allocation [2016-08-11 12:58:46,713][DEBUG][cluster.service ] [Vader] cluster state updated, version [3], source [cluster_reroute(async_shard_fetch)] [2016-08-11 12:58:46,713][DEBUG][cluster.service ] [Vader] publishing cluster state version [3] [2016-08-11 12:58:46,714][DEBUG][cluster.service ] [Vader] set local cluster state to version 3 [2016-08-11 12:58:46,714][DEBUG][indices.cluster ] [Vader] [searchguard] creating index [2016-08-11 12:58:46,715][DEBUG][indices ] [Vader] creating Index [searchguard], shards [1]/[0] [2016-08-11 12:58:46,842][DEBUG][index.store ] [Vader] [searchguard] using index.store.throttle.type [none], with index.store.throttle.max_bytes_per_sec [0b] [2016-08-11 12:58:46,866][DEBUG][index.mapper ] [Vader] [searchguard] using dynamic[true] [2016-08-11 12:58:46,885][DEBUG][indices.cluster ] [Vader] [person] creating index [2016-08-11 12:58:46,885][DEBUG][indices ] [Vader] creating Index [person], shards [5]/[1] [2016-08-11 12:58:46,904][DEBUG][index.store ] [Vader] [person] using index.store.throttle.type [none], with index.store.throttle.max_bytes_per_sec [0b] [2016-08-11 12:58:46,909][DEBUG][index.mapper ] [Vader] [person] using dynamic[true] [2016-08-11 12:58:46,912][DEBUG][indices.cluster ] [Vader] [searchguard] adding mapping [rolesmapping], source [{"rolesmapping":{"properties":{"sg_all_access":{"properties":{"users":{"type":"string"}}},"sg_kibana4":{"properties":{"users":{"type":"string"}}},"sg_kibana4_server":{"properties":{"users":{"type":"string"}}},"sg_kibana4_testindex":{"properties":{"users":{"type":"string"}}},"sg_logstash":{"properties":{"users":{"type":"string"}}},"sg_public":{"properties":{"users":{"type":"string"}}},"sg_readall":{"properties":{"users":{"type":"string"}}},"sg_readonly_dlsfls":{"properties":{"users":{"type":"string"}}},"sg_role_klingons1":{"properties":{"backendroles":{"type":"string"},"hosts":{"type":"string"},"users":{"type":"string"}}},"sg_role_starfleet":{"properties":{"backendroles":{"type":"string"},"hosts":{"type":"string"},"users":{"type":"string"}}},"sg_role_starfleet_captains":{"properties":{"backendroles":{"type":"string"}}}}}}] [2016-08-11 12:58:47,033][DEBUG][indices.cluster ] [Vader] [searchguard] adding mapping [actiongroups], source [{"actiongroups":{"properties":{"ALL":{"type":"string"},"CLUSTER_ALL":{"type":"string"},"CLUSTER_MONITOR":{"type":"string"},"CREATE_INDEX":{"type":"string"},"CRUD":{"type":"string"},"DATA_ACCESS":{"type":"string"},"DELETE":{"type":"string"},"GET":{"type":"string"},"INDEX":{"type":"string"},"MANAGE":{"type":"string"},"MANAGE_ALIASES":{"type":"string"},"MONITOR":{"type":"string"},"READ":{"type":"string"},"SEARCH":{"type":"string"},"SUGGEST":{"type":"string"},"WRITE":{"type":"string"}}}}] [2016-08-11 12:58:47,045][DEBUG][indices.cluster ] [Vader] [searchguard] adding mapping [roles], source [{"roles":{"properties":{"sg_all_access":{"properties":{"cluster":{"type":"string"},"indices":{"properties":{"*":{"properties":{"*":{"type":"string"}}}}}}},"sg_kibana4":{"properties":{"indices":{"properties":{"*":{"properties":{"*":{"type":"string"}}},"?kibana":{"properties":{"*":{"type":"string"}}}}}}},"sg_kibana4_server":{"properties":{"cluster":{"type":"string"},"indices":{"properties":{"?kibana":{"properties":{"*":{"type":"string"}}}}}}},"sg_kibana4_testindex":{"properties":{"indices":{"properties":{"?kibana":{"properties":{"*":{"type":"string"}}},"test*":{"properties":{"*":{"type":"string"}}}}}}},"sg_logstash":{"properties":{"cluster":{"type":"string"},"indices":{"properties":{"*beat*":{"properties":{"*":{"type":"string"}}},"logstash-*":{"properties":{"*":{"type":"string"}}}}}}},"sg_readall":{"properties":{"indices":{"properties":{"*":{"properties":{"*":{"type":"string"}}}}}}},"sg_readonly_and_monitor":{"properties":{"cluster":{"type":"string"},"indices":{"properties":{"*":{"properties":{"*":{"type":"string"}}}}}}},"sg_readonly_dlsfls":{"properties":{"indices":{"properties":{"/\\S*/":{"properties":{"*":{"type":"string"},"_dls_":{"type":"string"},"_fls_":{"type":"string"}}}}}}},"sg_role_starfleet":{"properties":{"indices":{"properties":{"pub*":{"properties":{"*":{"type":"string"}}},"sf":{"properties":{"alumni":{"type":"string"},"public":{"type":"string"},"ships":{"type":"string"},"students":{"type":"string"}}}}}}},"sg_role_starfleet_captains":{"properties":{"cluster":{"type":"string"},"indices":{"properties":{"pub*":{"properties":{"*":{"type":"string"}}},"sf":{"properties":{"*":{"type":"string"}}}}}}},"sg_transport_client":{"properties":{"cluster":{"type":"string"}}}}}}] [2016-08-11 12:58:47,067][DEBUG][indices.cluster ] [Vader] [searchguard] adding mapping [internalusers], source [{"internalusers":{"properties":{"admin":{"properties":{"hash":{"type":"string"}}},"dlsflsuser":{"properties":{"hash":{"type":"string"}}},"kibanaro":{"properties":{"hash":{"type":"string"}}},"kibanaserver":{"properties":{"hash":{"type":"string"}}},"kirk":{"properties":{"hash":{"type":"string"},"roles":{"type":"string"}}},"logstash":{"properties":{"hash":{"type":"string"}}},"mister_picard":{"properties":{"hash":{"type":"string"},"username":{"type":"string"}}},"readall":{"properties":{"hash":{"type":"string"}}},"spock":{"properties":{"hash":{"type":"string"},"roles":{"type":"string"}}},"test":{"properties":{"hash":{"type":"string"}}},"worf":{"properties":{"hash":{"type":"string"}}}}}}] [2016-08-11 12:58:47,082][DEBUG][indices.cluster ] [Vader] [searchguard] adding mapping [config], source [{"config":{"properties":{"searchguard":{"properties":{"dynamic":{"properties":{"authc":{"properties":{"basic_internal_auth_domain":{"properties":{"authentication_backend":{"properties":{"type":{"type":"string"}}},"enabled":{"type":"boolean"},"http_authenticator":{"properties":{"challenge":{"type":"boolean"},"type":{"type":"string"}}},"order":{"type":"long"}}},"clientcert_auth_domain":{"properties":{"authentication_backend":{"properties":{"type":{"type":"string"}}},"enabled":{"type":"boolean"},"http_authenticator":{"properties":{"challenge":{"type":"boolean"},"type":{"type":"string"}}},"order":{"type":"long"}}},"host_auth_domain":{"properties":{"authentication_backend":{"properties":{"type":{"type":"string"}}},"enabled":{"type":"boolean"},"http_authenticator":{"properties":{"challenge":{"type":"boolean"},"type":{"type":"string"}}},"order":{"type":"long"}}},"jwt_auth_domain":{"properties":{"authentication_backend":{"properties":{"type":{"type":"string"}}},"enabled":{"type":"boolean"},"http_authenticator":{"properties":{"challenge":{"type":"boolean"},"config":{"properties":{"jwt_header":{"type":"string"},"signing_key":{"type":"string"}}},"type":{"type":"string"}}},"order":{"type":"long"}}},"kerberos_auth_domain":{"properties":{"authentication_backend":{"properties":{"type":{"type":"string"}}},"enabled":{"type":"boolean"},"http_authenticator":{"properties":{"challenge":{"type":"boolean"},"config":{"properties":{"acceptor_principal":{"type":"string"},"krb_debug":{"type":"boolean"},"strip_realm_from_principal":{"type":"boolean"}}},"type":{"type":"string"}}},"order":{"type":"long"}}},"ldap":{"properties":{"authentication_backend":{"properties":{"config":{"properties":{"enable_ssl":{"type":"boolean"},"enable_ssl_client_auth":{"type":"boolean"},"enable_start_tls":{"type":"boolean"},"hosts":{"type":"string"},"userbase":{"type":"string"},"usersearch":{"type":"string"},"verify_hostnames":{"type":"boolean"}}},"type":{"type":"string"}}},"enabled":{"type":"boolean"},"http_authenticator":{"properties":{"challenge":{"type":"boolean"},"type":{"type":"string"}}},"order":{"type":"long"}}},"proxy_auth_domain":{"properties":{"authentication_backend":{"properties":{"type":{"type":"string"}}},"enabled":{"type":"boolean"},"http_authenticator":{"properties":{"challenge":{"type":"boolean"},"config":{"properties":{"roles_header":{"type":"string"},"user_header":{"type":"string"}}},"type":{"type":"string"}}},"order":{"type":"long"}}}}},"authz":{"properties":{"roles_from_another_ldap":{"properties":{"authorization_backend":{"properties":{"type":{"type":"string"}}},"enabled":{"type":"boolean"}}},"roles_from_myldap":{"properties":{"authorization_backend":{"properties":{"config":{"properties":{"enable_ssl":{"type":"boolean"},"enable_ssl_client_auth":{"type":"boolean"},"enable_start_tls":{"type":"boolean"},"hosts":{"type":"string"},"resolve_nested_roles":{"type":"boolean"},"rolebase":{"type":"string"},"rolename":{"type":"string"},"rolesearch":{"type":"string"},"userbase":{"type":"string"},"userrolename":{"type":"string"},"usersearch":{"type":"string"},"verify_hostnames":{"type":"boolean"}}},"type":{"type":"string"}}},"enabled":{"type":"boolean"}}}}},"http":{"properties":{"anonymous_auth_enabled":{"type":"boolean"},"xff":{"properties":{"enabled":{"type":"boolean"},"internalProxies":{"type":"string"},"proxiesHeader":{"type":"string"},"remoteIpHeader":{"type":"string"}}}}}}}}}}}}] [2016-08-11 12:58:47,122][DEBUG][indices.cluster ] [Vader] [person] adding mapping [student], source [{"student":{"properties":{"address":{"properties":{"city":{"type":"string"},"country":{"type":"string"},"state":{"type":"string"},"streetAddress1":{"type":"string"},"streetAddress2":{"type":"string"},"streetAddress3":{"type":"string"},"zip":{"type":"string"}}},"emails":{"properties":{"personal":{"type":"string"},"primary":{"type":"string"}}},"ferpaRestricted":{"type":"boolean"},"idNum":{"type":"long"},"majors":{"properties":{"major1":{"properties":{"code":{"type":"string"},"description":{"type":"string"}}},"major2":{"properties":{"code":{"type":"string"},"description":{"type":"string"}}},"major3":{"properties":{"code":{"type":"string"},"description":{"type":"string"}}},"major4":{"type":"object"}}},"name":{"properties":{"first":{"type":"string"},"last":{"type":"string"},"middle":{"type":"string"}}}}}}] [2016-08-11 12:58:47,139][DEBUG][indices.cluster ] [Vader] [searchguard][0] creating shard [2016-08-11 12:58:47,140][DEBUG][index ] [Vader] [searchguard] [searchguard][0] loaded data path [/var/lib/elasticsearch/SHU/nodes/0/indices/searchguard/0], state path [/var/lib/elasticsearch/SHU/nodes/0/indices/searchguard/0] [2016-08-11 12:58:47,142][DEBUG][index ] [Vader] [searchguard] [searchguard][0] creating using an existing path [ShardPath{path=/var/lib/elasticsearch/SHU/nodes/0/indices/searchguard/0, indexUUID='lhqhelFtSB6ou1z_1bHm9w', shard=[searchguard][0]}] [2016-08-11 12:58:47,142][DEBUG][index ] [Vader] [searchguard] creating shard_id [searchguard][0] [2016-08-11 12:58:47,145][DEBUG][com.floragunn.searchguard.configuration.SearchGuardIndexSearcherWrapperModule] FLS/DLS not enabled [2016-08-11 12:58:47,219][DEBUG][index.store ] [Vader] [searchguard][0] store stats are refreshed with refresh_interval [10s] [2016-08-11 12:58:47,221][DEBUG][index.deletionpolicy ] [Vader] [searchguard][0] Using [keep_only_last] deletion policy [2016-08-11 12:58:47,239][DEBUG][index.shard ] [Vader] [searchguard][0] using [tiered] merge mergePolicy with expunge_deletes_allowed[10.0], floor_segment[2mb], max_merge_at_once[10], max_merge_at_once_explicit[30], max_merged_segment[5gb], segments_per_tier[10.0], reclaim_deletes_weight[2.0] [2016-08-11 12:58:47,239][DEBUG][index.shard ] [Vader] [searchguard][0] state: [CREATED] [2016-08-11 12:58:47,248][DEBUG][index.translog ] [Vader] [searchguard][0] interval [5s], flush_threshold_ops [2147483647], flush_threshold_size [512mb], flush_threshold_period [30m] [2016-08-11 12:58:47,254][DEBUG][index.shard ] [Vader] [searchguard][0] state: [CREATED]->[RECOVERING], reason [from store] [2016-08-11 12:58:47,258][DEBUG][index.shard ] [Vader] [searchguard][0] starting recovery from shard_store ... [2016-08-11 12:58:47,265][DEBUG][indices.cluster ] [Vader] [person][2] creating shard [2016-08-11 12:58:47,266][DEBUG][index ] [Vader] [person] [person][2] loaded data path [/var/lib/elasticsearch/SHU/nodes/0/indices/person/2], state path [/var/lib/elasticsearch/SHU/nodes/0/indices/person/2] [2016-08-11 12:58:47,267][DEBUG][index ] [Vader] [person] [person][2] creating using an existing path [ShardPath{path=/var/lib/elasticsearch/SHU/nodes/0/indices/person/2, indexUUID='mvvBFMS_RQaOGGaGHEGIng', shard=[person][2]}] [2016-08-11 12:58:47,271][DEBUG][index ] [Vader] [person] creating shard_id [person][2] [2016-08-11 12:58:47,271][DEBUG][com.floragunn.searchguard.configuration.SearchGuardIndexSearcherWrapperModule] FLS/DLS not enabled [2016-08-11 12:58:47,272][DEBUG][indices.memory ] [Vader] recalculating shard indexing buffer, total is [101.5mb] with [1] active shards, each shard set to indexing=[101.5mb], translog=[64kb] [2016-08-11 12:58:47,272][DEBUG][index.shard ] [Vader] [searchguard][0] updateBufferSize: engine is closed; skipping [2016-08-11 12:58:47,281][DEBUG][index.store ] [Vader] [person][2] store stats are refreshed with refresh_interval [10s] [2016-08-11 12:58:47,291][DEBUG][index.deletionpolicy ] [Vader] [person][2] Using [keep_only_last] deletion policy [2016-08-11 12:58:47,291][DEBUG][index.shard ] [Vader] [person][2] using [tiered] merge mergePolicy with expunge_deletes_allowed[10.0], floor_segment[2mb], max_merge_at_once[10], max_merge_at_once_explicit[30], max_merged_segment[5gb], segments_per_tier[10.0], reclaim_deletes_weight[2.0] [2016-08-11 12:58:47,292][DEBUG][index.shard ] [Vader] [person][2] state: [CREATED] [2016-08-11 12:58:47,292][DEBUG][index.translog ] [Vader] [person][2] interval [5s], flush_threshold_ops [2147483647], flush_threshold_size [512mb], flush_threshold_period [30m] [2016-08-11 12:58:47,293][DEBUG][index.shard ] [Vader] [person][2] state: [CREATED]->[RECOVERING], reason [from store] [2016-08-11 12:58:47,295][DEBUG][index.shard ] [Vader] [person][2] starting recovery from shard_store ... [2016-08-11 12:58:47,299][DEBUG][indices.memory ] [Vader] recalculating shard indexing buffer, total is [101.5mb] with [2] active shards, each shard set to indexing=[50.7mb], translog=[64kb] [2016-08-11 12:58:47,299][DEBUG][index.shard ] [Vader] [searchguard][0] updateBufferSize: engine is closed; skipping [2016-08-11 12:58:47,299][DEBUG][index.shard ] [Vader] [person][2] updateBufferSize: engine is closed; skipping [2016-08-11 12:58:47,299][DEBUG][indices.cluster ] [Vader] [person][1] creating shard [2016-08-11 12:58:47,300][DEBUG][index ] [Vader] [person] [person][1] loaded data path [/var/lib/elasticsearch/SHU/nodes/0/indices/person/1], state path [/var/lib/elasticsearch/SHU/nodes/0/indices/person/1] [2016-08-11 12:58:47,300][DEBUG][index ] [Vader] [person] [person][1] creating using an existing path [ShardPath{path=/var/lib/elasticsearch/SHU/nodes/0/indices/person/1, indexUUID='mvvBFMS_RQaOGGaGHEGIng', shard=[person][1]}] [2016-08-11 12:58:47,301][DEBUG][index ] [Vader] [person] creating shard_id [person][1] [2016-08-11 12:58:47,303][DEBUG][com.floragunn.searchguard.configuration.SearchGuardIndexSearcherWrapperModule] FLS/DLS not enabled [2016-08-11 12:58:47,323][DEBUG][index.store ] [Vader] [person][1] store stats are refreshed with refresh_interval [10s] [2016-08-11 12:58:47,327][DEBUG][index.deletionpolicy ] [Vader] [person][1] Using [keep_only_last] deletion policy [2016-08-11 12:58:47,327][DEBUG][index.shard ] [Vader] [person][1] using [tiered] merge mergePolicy with expunge_deletes_allowed[10.0], floor_segment[2mb], max_merge_at_once[10], max_merge_at_once_explicit[30], max_merged_segment[5gb], segments_per_tier[10.0], reclaim_deletes_weight[2.0] [2016-08-11 12:58:47,328][DEBUG][index.shard ] [Vader] [person][1] state: [CREATED] [2016-08-11 12:58:47,328][DEBUG][index.translog ] [Vader] [person][1] interval [5s], flush_threshold_ops [2147483647], flush_threshold_size [512mb], flush_threshold_period [30m] [2016-08-11 12:58:47,328][DEBUG][index.shard ] [Vader] [person][1] state: [CREATED]->[RECOVERING], reason [from store] [2016-08-11 12:58:47,331][DEBUG][index.shard ] [Vader] [person][1] starting recovery from shard_store ... [2016-08-11 12:58:47,332][DEBUG][indices.memory ] [Vader] recalculating shard indexing buffer, total is [101.5mb] with [3] active shards, each shard set to indexing=[33.8mb], translog=[64kb] [2016-08-11 12:58:47,332][DEBUG][index.shard ] [Vader] [searchguard][0] updateBufferSize: engine is closed; skipping [2016-08-11 12:58:47,333][DEBUG][index.shard ] [Vader] [person][1] updateBufferSize: engine is closed; skipping [2016-08-11 12:58:47,333][DEBUG][index.shard ] [Vader] [person][2] updateBufferSize: engine is closed; skipping [2016-08-11 12:58:47,333][DEBUG][indices.cluster ] [Vader] [person][0] creating shard [2016-08-11 12:58:47,334][DEBUG][index ] [Vader] [person] [person][0] loaded data path [/var/lib/elasticsearch/SHU/nodes/0/indices/person/0], state path [/var/lib/elasticsearch/SHU/nodes/0/indices/person/0] [2016-08-11 12:58:47,334][DEBUG][index ] [Vader] [person] [person][0] creating using an existing path [ShardPath{path=/var/lib/elasticsearch/SHU/nodes/0/indices/person/0, indexUUID='mvvBFMS_RQaOGGaGHEGIng', shard=[person][0]}] [2016-08-11 12:58:47,334][DEBUG][index ] [Vader] [person] creating shard_id [person][0] [2016-08-11 12:58:47,334][DEBUG][com.floragunn.searchguard.configuration.SearchGuardIndexSearcherWrapperModule] FLS/DLS not enabled [2016-08-11 12:58:47,368][DEBUG][index.store ] [Vader] [person][0] store stats are refreshed with refresh_interval [10s] [2016-08-11 12:58:47,371][DEBUG][index.deletionpolicy ] [Vader] [person][0] Using [keep_only_last] deletion policy [2016-08-11 12:58:47,373][DEBUG][index.shard ] [Vader] [person][0] using [tiered] merge mergePolicy with expunge_deletes_allowed[10.0], floor_segment[2mb], max_merge_at_once[10], max_merge_at_once_explicit[30], max_merged_segment[5gb], segments_per_tier[10.0], reclaim_deletes_weight[2.0] [2016-08-11 12:58:47,373][DEBUG][index.shard ] [Vader] [person][0] state: [CREATED] [2016-08-11 12:58:47,373][DEBUG][index.translog ] [Vader] [person][0] interval [5s], flush_threshold_ops [2147483647], flush_threshold_size [512mb], flush_threshold_period [30m] [2016-08-11 12:58:47,374][DEBUG][index.shard ] [Vader] [person][0] state: [CREATED]->[RECOVERING], reason [from store] [2016-08-11 12:58:47,379][DEBUG][cluster.service ] [Vader] processing [cluster_reroute(async_shard_fetch)]: took 682ms done applying updated cluster_state (version: 3, uuid: yMzN-yPgSTGPuW-CmxjisA) [2016-08-11 12:58:47,380][DEBUG][index.shard ] [Vader] [person][0] starting recovery from shard_store ... [2016-08-11 12:58:47,382][DEBUG][indices.memory ] [Vader] recalculating shard indexing buffer, total is [101.5mb] with [4] active shards, each shard set to indexing=[25.3mb], translog=[64kb] [2016-08-11 12:58:47,391][DEBUG][index.shard ] [Vader] [searchguard][0] updateBufferSize: engine is closed; skipping [2016-08-11 12:58:47,391][DEBUG][index.shard ] [Vader] [person][0] updateBufferSize: engine is closed; skipping [2016-08-11 12:58:47,391][DEBUG][index.shard ] [Vader] [person][1] updateBufferSize: engine is closed; skipping [2016-08-11 12:58:47,391][DEBUG][index.shard ] [Vader] [person][2] updateBufferSize: engine is closed; skipping [2016-08-11 12:58:47,419][DEBUG][index.translog ] [Vader] [person][1] open uncommitted translog checkpoint Checkpoint{offset=43, numOps=0, translogFileGeneration= 62} [2016-08-11 12:58:47,412][DEBUG][index.translog ] [Vader] [person][2] open uncommitted translog checkpoint Checkpoint{offset=43, numOps=0, translogFileGeneration= 62} [2016-08-11 12:58:47,412][DEBUG][index.translog ] [Vader] [person][0] open uncommitted translog checkpoint Checkpoint{offset=43, numOps=0, translogFileGeneration= 62} [2016-08-11 12:58:47,430][DEBUG][index.translog ] [Vader] [searchguard][0] open uncommitted translog checkpoint Checkpoint{offset=43, numOps=0, translogFileGeneration= 19} [2016-08-11 12:58:47,549][DEBUG][index.shard ] [Vader] [person][0] scheduling refresher every 1s [2016-08-11 12:58:47,552][DEBUG][index.shard ] [Vader] [searchguard][0] scheduling refresher every 1s [2016-08-11 12:58:47,551][DEBUG][index.shard ] [Vader] [person][2] scheduling refresher every 1s [2016-08-11 12:58:47,550][DEBUG][index.shard ] [Vader] [person][1] scheduling refresher every 1s [2016-08-11 12:58:47,557][DEBUG][index.shard ] [Vader] [person][2] state: [RECOVERING]->[POST_RECOVERY], reason [post recovery from shard_store] [2016-08-11 12:58:47,558][DEBUG][index.shard ] [Vader] [person][2] recovery completed from [shard_store], took [264ms] [2016-08-11 12:58:47,558][DEBUG][cluster.action.shard ] [Vader] [person][2] sending shard started for target shard [[person][2], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=TFpqhQL3SrSoSZorj_cdgQ], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]], indexUUID [mvvBFMS_RQaOGGaGHEGIng], message [after recovery from store] [2016-08-11 12:58:47,558][DEBUG][index.shard ] [Vader] [person][1] state: [RECOVERING]->[POST_RECOVERY], reason [post recovery from shard_store] [2016-08-11 12:58:47,559][DEBUG][index.shard ] [Vader] [person][1] recovery completed from [shard_store], took [229ms] [2016-08-11 12:58:47,559][DEBUG][index.shard ] [Vader] [searchguard][0] state: [RECOVERING]->[POST_RECOVERY], reason [post recovery from shard_store] [2016-08-11 12:58:47,560][DEBUG][index.shard ] [Vader] [searchguard][0] recovery completed from [shard_store], took [305ms] [2016-08-11 12:58:47,560][DEBUG][cluster.action.shard ] [Vader] received shard started for target shard [[person][2], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=TFpqhQL3SrSoSZorj_cdgQ], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]], indexUUID [mvvBFMS_RQaOGGaGHEGIng], message [after recovery from store] [2016-08-11 12:58:47,560][DEBUG][cluster.service ] [Vader] processing [shard-started ([person][2], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=TFpqhQL3SrSoSZorj_cdgQ], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]), reason [after recovery from store]]: execute [2016-08-11 12:58:47,561][DEBUG][gateway ] [Vader] [person][4] found 1 allocations of [person][4], node[null], [P], v[0], s[UNASSIGNED], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]], highest version: [120] [2016-08-11 12:58:47,561][DEBUG][gateway ] [Vader] [person][4]: allocating [[person][4], node[null], [P], v[0], s[UNASSIGNED], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]] to [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] on primary allocation [2016-08-11 12:58:47,562][DEBUG][gateway ] [Vader] [person][3] found 1 allocations of [person][3], node[null], [P], v[0], s[UNASSIGNED], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]], highest version: [120] [2016-08-11 12:58:47,562][DEBUG][gateway ] [Vader] [person][3]: throttling allocation [[person][3], node[null], [P], v[0], s[UNASSIGNED], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]] to [[{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}]] on primary allocation [2016-08-11 12:58:47,562][DEBUG][gateway ] [Vader] [.kibana][0] found 1 allocations of [.kibana][0], node[null], [P], v[0], s[UNASSIGNED], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]], highest version: [172] [2016-08-11 12:58:47,562][DEBUG][gateway ] [Vader] [.kibana][0]: throttling allocation [[.kibana][0], node[null], [P], v[0], s[UNASSIGNED], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]] to [[{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}]] on primary allocation [2016-08-11 12:58:47,564][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [14][internal:cluster/shard/started] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:58:47,559][DEBUG][cluster.action.shard ] [Vader] [person][1] sending shard started for target shard [[person][1], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=uh6oeFIxTpmrTOHnfQzKcA], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]], indexUUID [mvvBFMS_RQaOGGaGHEGIng], message [after recovery from store] [2016-08-11 12:58:47,565][DEBUG][cluster.action.shard ] [Vader] received shard started for target shard [[person][1], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=uh6oeFIxTpmrTOHnfQzKcA], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]], indexUUID [mvvBFMS_RQaOGGaGHEGIng], message [after recovery from store] [2016-08-11 12:58:47,565][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [15][internal:cluster/shard/started] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:58:47,560][DEBUG][cluster.action.shard ] [Vader] [searchguard][0] sending shard started for target shard [[searchguard][0], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[17], s[INITIALIZING], a[id=fMsw0djWRrmbpOCiUQIJXA], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.559Z]]], indexUUID [lhqhelFtSB6ou1z_1bHm9w], message [after recovery from store] [2016-08-11 12:58:47,565][DEBUG][cluster.action.shard ] [Vader] received shard started for target shard [[searchguard][0], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[17], s[INITIALIZING], a[id=fMsw0djWRrmbpOCiUQIJXA], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.559Z]]], indexUUID [lhqhelFtSB6ou1z_1bHm9w], message [after recovery from store] [2016-08-11 12:58:47,565][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [16][internal:cluster/shard/started] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:58:47,566][DEBUG][cluster.service ] [Vader] cluster state updated, version [4], source [shard-started ([person][2], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=TFpqhQL3SrSoSZorj_cdgQ], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]), reason [after recovery from store]] [2016-08-11 12:58:47,566][DEBUG][cluster.service ] [Vader] publishing cluster state version [4] [2016-08-11 12:58:47,566][DEBUG][cluster.service ] [Vader] set local cluster state to version 4 [2016-08-11 12:58:47,566][DEBUG][cluster.action.shard ] [Vader] [searchguard][0] sending shard started for target shard [[searchguard][0], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[17], s[INITIALIZING], a[id=fMsw0djWRrmbpOCiUQIJXA], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.559Z]]], indexUUID [lhqhelFtSB6ou1z_1bHm9w], message [master {Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300} marked shard as initializing, but shard state is [POST_RECOVERY], mark shard as started] [2016-08-11 12:58:47,567][DEBUG][cluster.action.shard ] [Vader] received shard started for target shard [[searchguard][0], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[17], s[INITIALIZING], a[id=fMsw0djWRrmbpOCiUQIJXA], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.559Z]]], indexUUID [lhqhelFtSB6ou1z_1bHm9w], message [master {Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300} marked shard as initializing, but shard state is [POST_RECOVERY], mark shard as started] [2016-08-11 12:58:47,567][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [17][internal:cluster/shard/started] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:58:47,571][DEBUG][indices.cluster ] [Vader] [person][4] creating shard [2016-08-11 12:58:47,571][DEBUG][index ] [Vader] [person] [person][4] loaded data path [/var/lib/elasticsearch/SHU/nodes/0/indices/person/4], state path [/var/lib/elasticsearch/SHU/nodes/0/indices/person/4] [2016-08-11 12:58:47,567][DEBUG][index.shard ] [Vader] [person][0] state: [RECOVERING]->[POST_RECOVERY], reason [post recovery from shard_store] [2016-08-11 12:58:47,572][DEBUG][index.shard ] [Vader] [person][0] recovery completed from [shard_store], took [193ms] [2016-08-11 12:58:47,572][DEBUG][cluster.action.shard ] [Vader] [person][0] sending shard started for target shard [[person][0], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=f1kewp78QdyukyJQJGFMLw], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]], indexUUID [mvvBFMS_RQaOGGaGHEGIng], message [after recovery from store] [2016-08-11 12:58:47,572][DEBUG][cluster.action.shard ] [Vader] received shard started for target shard [[person][0], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=f1kewp78QdyukyJQJGFMLw], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]], indexUUID [mvvBFMS_RQaOGGaGHEGIng], message [after recovery from store] [2016-08-11 12:58:47,572][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [18][internal:cluster/shard/started] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:58:47,572][DEBUG][index ] [Vader] [person] [person][4] creating using an existing path [ShardPath{path=/var/lib/elasticsearch/SHU/nodes/0/indices/person/4, indexUUID='mvvBFMS_RQaOGGaGHEGIng', shard=[person][4]}] [2016-08-11 12:58:47,573][DEBUG][index ] [Vader] [person] creating shard_id [person][4] [2016-08-11 12:58:47,573][DEBUG][com.floragunn.searchguard.configuration.SearchGuardIndexSearcherWrapperModule] FLS/DLS not enabled [2016-08-11 12:58:47,582][DEBUG][index.store ] [Vader] [person][4] store stats are refreshed with refresh_interval [10s] [2016-08-11 12:58:47,584][DEBUG][index.deletionpolicy ] [Vader] [person][4] Using [keep_only_last] deletion policy [2016-08-11 12:58:47,587][DEBUG][index.shard ] [Vader] [person][4] using [tiered] merge mergePolicy with expunge_deletes_allowed[10.0], floor_segment[2mb], max_merge_at_once[10], max_merge_at_once_explicit[30], max_merged_segment[5gb], segments_per_tier[10.0], reclaim_deletes_weight[2.0] [2016-08-11 12:58:47,588][DEBUG][index.shard ] [Vader] [person][4] state: [CREATED] [2016-08-11 12:58:47,588][DEBUG][index.translog ] [Vader] [person][4] interval [5s], flush_threshold_ops [2147483647], flush_threshold_size [512mb], flush_threshold_period [30m] [2016-08-11 12:58:47,589][DEBUG][index.shard ] [Vader] [person][4] state: [CREATED]->[RECOVERING], reason [from store] [2016-08-11 12:58:47,589][DEBUG][index.shard ] [Vader] [person][4] starting recovery from shard_store ... [2016-08-11 12:58:47,590][DEBUG][indices.memory ] [Vader] recalculating shard indexing buffer, total is [101.5mb] with [5] active shards, each shard set to indexing=[20.3mb], translog=[64kb] [2016-08-11 12:58:47,591][DEBUG][index.shard ] [Vader] [searchguard][0] updating index_buffer_size from [25.3mb] to [20.3mb]; IndexWriter now using [0] bytes [2016-08-11 12:58:47,591][DEBUG][index.shard ] [Vader] [person][0] updating index_buffer_size from [25.3mb] to [20.3mb]; IndexWriter now using [0] bytes [2016-08-11 12:58:47,592][DEBUG][index.shard ] [Vader] [person][1] updating index_buffer_size from [25.3mb] to [20.3mb]; IndexWriter now using [0] bytes [2016-08-11 12:58:47,592][DEBUG][index.shard ] [Vader] [person][2] updating index_buffer_size from [25.3mb] to [20.3mb]; IndexWriter now using [0] bytes [2016-08-11 12:58:47,592][DEBUG][index.shard ] [Vader] [person][4] updateBufferSize: engine is closed; skipping [2016-08-11 12:58:47,600][DEBUG][index.shard ] [Vader] [person][2] state: [POST_RECOVERY]->[STARTED], reason [global state is [STARTED]] [2016-08-11 12:58:47,608][DEBUG][index.translog ] [Vader] [person][4] open uncommitted translog checkpoint Checkpoint{offset=43, numOps=0, translogFileGeneration= 62} [2016-08-11 12:58:47,645][DEBUG][index.shard ] [Vader] [person][4] scheduling refresher every 1s [2016-08-11 12:58:47,646][DEBUG][index.shard ] [Vader] [person][4] state: [RECOVERING]->[POST_RECOVERY], reason [post recovery from shard_store] [2016-08-11 12:58:47,646][DEBUG][index.shard ] [Vader] [person][4] recovery completed from [shard_store], took [57ms] [2016-08-11 12:58:47,647][DEBUG][cluster.action.shard ] [Vader] [person][1] sending shard started for target shard [[person][1], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=uh6oeFIxTpmrTOHnfQzKcA], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]], indexUUID [mvvBFMS_RQaOGGaGHEGIng], message [master {Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300} marked shard as initializing, but shard state is [POST_RECOVERY], mark shard as started] [2016-08-11 12:58:47,648][DEBUG][cluster.action.shard ] [Vader] received shard started for target shard [[person][1], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=uh6oeFIxTpmrTOHnfQzKcA], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]], indexUUID [mvvBFMS_RQaOGGaGHEGIng], message [master {Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300} marked shard as initializing, but shard state is [POST_RECOVERY], mark shard as started] [2016-08-11 12:58:47,649][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [19][internal:cluster/shard/started] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:58:47,649][DEBUG][cluster.action.shard ] [Vader] [person][0] sending shard started for target shard [[person][0], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=f1kewp78QdyukyJQJGFMLw], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]], indexUUID [mvvBFMS_RQaOGGaGHEGIng], message [master {Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300} marked shard as initializing, but shard state is [POST_RECOVERY], mark shard as started] [2016-08-11 12:58:47,649][DEBUG][cluster.action.shard ] [Vader] received shard started for target shard [[person][0], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=f1kewp78QdyukyJQJGFMLw], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]], indexUUID [mvvBFMS_RQaOGGaGHEGIng], message [master {Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300} marked shard as initializing, but shard state is [POST_RECOVERY], mark shard as started] [2016-08-11 12:58:47,649][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [20][internal:cluster/shard/started] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:58:47,650][DEBUG][cluster.service ] [Vader] processing [shard-started ([person][2], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=TFpqhQL3SrSoSZorj_cdgQ], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]), reason [after recovery from store]]: took 89ms done applying updated cluster_state (version: 4, uuid: AJXuzJYgTNCioXSre7C7fg) [2016-08-11 12:58:47,654][DEBUG][cluster.service ] [Vader] processing [shard-started ([person][1], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=uh6oeFIxTpmrTOHnfQzKcA], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]), reason [after recovery from store],shard-started ([searchguard][0], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[17], s[INITIALIZING], a[id=fMsw0djWRrmbpOCiUQIJXA], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.559Z]]), reason [after recovery from store],shard-started ([searchguard][0], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[17], s[INITIALIZING], a[id=fMsw0djWRrmbpOCiUQIJXA], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.559Z]]), reason [master {Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300} marked shard as initializing, but shard state is [POST_RECOVERY], mark shard as started],shard-started ([person][0], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=f1kewp78QdyukyJQJGFMLw], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]), reason [after recovery from store],shard-started ([person][1], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=uh6oeFIxTpmrTOHnfQzKcA], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]), reason [master {Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300} marked shard as initializing, but shard state is [POST_RECOVERY], mark shard as started],shard-started ([person][0], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=f1kewp78QdyukyJQJGFMLw], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]), reason [master {Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300} marked shard as initializing, but shard state is [POST_RECOVERY], mark shard as started]]: execute [2016-08-11 12:58:47,651][DEBUG][cluster.action.shard ] [Vader] [person][4] sending shard started for target shard [[person][4], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=HBW8GNKST7mwbRo5pc7xlw], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]], indexUUID [mvvBFMS_RQaOGGaGHEGIng], message [after recovery from store] [2016-08-11 12:58:47,655][DEBUG][cluster.action.shard ] [Vader] received shard started for target shard [[person][4], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=HBW8GNKST7mwbRo5pc7xlw], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]], indexUUID [mvvBFMS_RQaOGGaGHEGIng], message [after recovery from store] [2016-08-11 12:58:47,655][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [21][internal:cluster/shard/started] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:58:47,655][DEBUG][gateway ] [Vader] [person][3] found 1 allocations of [person][3], node[null], [P], v[0], s[UNASSIGNED], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]], highest version: [120] [2016-08-11 12:58:47,656][DEBUG][gateway ] [Vader] [person][3]: allocating [[person][3], node[null], [P], v[0], s[UNASSIGNED], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]] to [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] on primary allocation [2016-08-11 12:58:47,656][DEBUG][gateway ] [Vader] [.kibana][0] found 1 allocations of [.kibana][0], node[null], [P], v[0], s[UNASSIGNED], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]], highest version: [172] [2016-08-11 12:58:47,656][DEBUG][gateway ] [Vader] [.kibana][0]: allocating [[.kibana][0], node[null], [P], v[0], s[UNASSIGNED], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]] to [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] on primary allocation [2016-08-11 12:58:47,663][DEBUG][cluster.service ] [Vader] cluster state updated, version [5], source [shard-started ([person][1], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=uh6oeFIxTpmrTOHnfQzKcA], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]), reason [after recovery from store],shard-started ([searchguard][0], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[17], s[INITIALIZING], a[id=fMsw0djWRrmbpOCiUQIJXA], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.559Z]]), reason [after recovery from store],shard-started ([searchguard][0], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[17], s[INITIALIZING], a[id=fMsw0djWRrmbpOCiUQIJXA], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.559Z]]), reason [master {Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300} marked shard as initializing, but shard state is [POST_RECOVERY], mark shard as started],shard-started ([person][0], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=f1kewp78QdyukyJQJGFMLw], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]), reason [after recovery from store],shard-started ([person][1], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=uh6oeFIxTpmrTOHnfQzKcA], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]), reason [master {Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300} marked shard as initializing, but shard state is [POST_RECOVERY], mark shard as started],shard-started ([person][0], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=f1kewp78QdyukyJQJGFMLw], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]), reason [master {Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300} marked shard as initializing, but shard state is [POST_RECOVERY], mark shard as started]] [2016-08-11 12:58:47,663][DEBUG][cluster.service ] [Vader] publishing cluster state version [5] [2016-08-11 12:58:47,663][DEBUG][cluster.service ] [Vader] set local cluster state to version 5 [2016-08-11 12:58:47,663][DEBUG][indices.cluster ] [Vader] [.kibana] creating index [2016-08-11 12:58:47,665][DEBUG][indices ] [Vader] creating Index [.kibana], shards [1]/[1] [2016-08-11 12:58:47,675][DEBUG][index.store ] [Vader] [.kibana] using index.store.throttle.type [none], with index.store.throttle.max_bytes_per_sec [0b] [2016-08-11 12:58:47,685][DEBUG][index.mapper ] [Vader] [.kibana] using dynamic[true] [2016-08-11 12:58:47,686][DEBUG][indices.cluster ] [Vader] [.kibana] adding mapping [search], source [{"search":{"properties":{"columns":{"type":"string"},"description":{"type":"string"},"hits":{"type":"integer"},"kibanaSavedObjectMeta":{"properties":{"searchSourceJSON":{"type":"string"}}},"sort":{"type":"string"},"title":{"type":"string"},"version":{"type":"integer"}}}}] [2016-08-11 12:58:47,697][DEBUG][indices.cluster ] [Vader] [.kibana] adding mapping [dashboard], source [{"dashboard":{"properties":{"description":{"type":"string"},"hits":{"type":"integer"},"kibanaSavedObjectMeta":{"properties":{"searchSourceJSON":{"type":"string"}}},"optionsJSON":{"type":"string"},"panelsJSON":{"type":"string"},"timeFrom":{"type":"string"},"timeRestore":{"type":"boolean"},"timeTo":{"type":"string"},"title":{"type":"string"},"uiStateJSON":{"type":"string"},"version":{"type":"integer"}}}}] [2016-08-11 12:58:47,701][DEBUG][indices.cluster ] [Vader] [.kibana] adding mapping [visualization], source [{"visualization":{"properties":{"description":{"type":"string"},"kibanaSavedObjectMeta":{"properties":{"searchSourceJSON":{"type":"string"}}},"savedSearchId":{"type":"string"},"title":{"type":"string"},"uiStateJSON":{"type":"string"},"version":{"type":"integer"},"visState":{"type":"string"}}}}] [2016-08-11 12:58:47,711][DEBUG][indices.cluster ] [Vader] [.kibana] adding mapping [index-pattern], source [{"index-pattern":{"properties":{"fieldFormatMap":{"type":"string"},"fields":{"type":"string"},"intervalName":{"type":"string"},"notExpandable":{"type":"boolean"},"timeFieldName":{"type":"string"},"title":{"type":"string"}}}}] [2016-08-11 12:58:47,713][DEBUG][indices.cluster ] [Vader] [.kibana] adding mapping [config], source [{"config":{"properties":{"buildNum":{"type":"string","index":"not_analyzed"},"defaultIndex":{"type":"string"}}}}] [2016-08-11 12:58:47,725][DEBUG][index.shard ] [Vader] [searchguard][0] state: [POST_RECOVERY]->[STARTED], reason [global state is [STARTED]] [2016-08-11 12:58:47,733][DEBUG][indices.cluster ] [Vader] [.kibana][0] creating shard [2016-08-11 12:58:47,733][DEBUG][index ] [Vader] [.kibana] [.kibana][0] loaded data path [/var/lib/elasticsearch/SHU/nodes/0/indices/.kibana/0], state path [/var/lib/elasticsearch/SHU/nodes/0/indices/.kibana/0] [2016-08-11 12:58:47,733][DEBUG][index ] [Vader] [.kibana] [.kibana][0] creating using an existing path [ShardPath{path=/var/lib/elasticsearch/SHU/nodes/0/indices/.kibana/0, indexUUID='tUl6piVEQc6T5-5XMfP_aA', shard=[.kibana][0]}] [2016-08-11 12:58:47,733][DEBUG][index ] [Vader] [.kibana] creating shard_id [.kibana][0] [2016-08-11 12:58:47,734][DEBUG][com.floragunn.searchguard.configuration.SearchGuardIndexSearcherWrapperModule] FLS/DLS not enabled [2016-08-11 12:58:47,737][DEBUG][index.store ] [Vader] [.kibana][0] store stats are refreshed with refresh_interval [10s] [2016-08-11 12:58:47,743][DEBUG][index.deletionpolicy ] [Vader] [.kibana][0] Using [keep_only_last] deletion policy [2016-08-11 12:58:47,744][DEBUG][index.shard ] [Vader] [.kibana][0] using [tiered] merge mergePolicy with expunge_deletes_allowed[10.0], floor_segment[2mb], max_merge_at_once[10], max_merge_at_once_explicit[30], max_merged_segment[5gb], segments_per_tier[10.0], reclaim_deletes_weight[2.0] [2016-08-11 12:58:47,744][DEBUG][index.shard ] [Vader] [.kibana][0] state: [CREATED] [2016-08-11 12:58:47,745][DEBUG][index.translog ] [Vader] [.kibana][0] interval [5s], flush_threshold_ops [2147483647], flush_threshold_size [512mb], flush_threshold_period [30m] [2016-08-11 12:58:47,745][DEBUG][index.shard ] [Vader] [.kibana][0] state: [CREATED]->[RECOVERING], reason [from store] [2016-08-11 12:58:47,747][DEBUG][index.shard ] [Vader] [.kibana][0] starting recovery from shard_store ... [2016-08-11 12:58:47,748][DEBUG][indices.memory ] [Vader] recalculating shard indexing buffer, total is [101.5mb] with [6] active shards, each shard set to indexing=[16.9mb], translog=[64kb] [2016-08-11 12:58:47,748][DEBUG][index.shard ] [Vader] [searchguard][0] updating index_buffer_size from [20.3mb] to [16.9mb]; IndexWriter now using [0] bytes [2016-08-11 12:58:47,748][DEBUG][index.shard ] [Vader] [.kibana][0] updateBufferSize: engine is closed; skipping [2016-08-11 12:58:47,748][DEBUG][index.shard ] [Vader] [person][0] updating index_buffer_size from [20.3mb] to [16.9mb]; IndexWriter now using [0] bytes [2016-08-11 12:58:47,748][DEBUG][index.shard ] [Vader] [person][1] updating index_buffer_size from [20.3mb] to [16.9mb]; IndexWriter now using [0] bytes [2016-08-11 12:58:47,749][DEBUG][index.shard ] [Vader] [person][2] updating index_buffer_size from [20.3mb] to [16.9mb]; IndexWriter now using [0] bytes [2016-08-11 12:58:47,749][DEBUG][index.shard ] [Vader] [person][4] updating index_buffer_size from [20.3mb] to [16.9mb]; IndexWriter now using [0] bytes [2016-08-11 12:58:47,751][DEBUG][cluster.action.shard ] [Vader] [person][4] sending shard started for target shard [[person][4], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=HBW8GNKST7mwbRo5pc7xlw], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]], indexUUID [mvvBFMS_RQaOGGaGHEGIng], message [master {Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300} marked shard as initializing, but shard state is [POST_RECOVERY], mark shard as started] [2016-08-11 12:58:47,751][DEBUG][cluster.action.shard ] [Vader] received shard started for target shard [[person][4], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=HBW8GNKST7mwbRo5pc7xlw], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]], indexUUID [mvvBFMS_RQaOGGaGHEGIng], message [master {Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300} marked shard as initializing, but shard state is [POST_RECOVERY], mark shard as started] [2016-08-11 12:58:47,751][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [22][internal:cluster/shard/started] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:58:47,751][DEBUG][index.translog ] [Vader] [.kibana][0] open uncommitted translog checkpoint Checkpoint{offset=43, numOps=0, translogFileGeneration= 88} [2016-08-11 12:58:47,754][DEBUG][index.shard ] [Vader] [person][1] state: [POST_RECOVERY]->[STARTED], reason [global state is [STARTED]] [2016-08-11 12:58:47,769][DEBUG][indices.cluster ] [Vader] [person][3] creating shard [2016-08-11 12:58:47,769][DEBUG][index ] [Vader] [person] [person][3] loaded data path [/var/lib/elasticsearch/SHU/nodes/0/indices/person/3], state path [/var/lib/elasticsearch/SHU/nodes/0/indices/person/3] [2016-08-11 12:58:47,769][DEBUG][index ] [Vader] [person] [person][3] creating using an existing path [ShardPath{path=/var/lib/elasticsearch/SHU/nodes/0/indices/person/3, indexUUID='mvvBFMS_RQaOGGaGHEGIng', shard=[person][3]}] [2016-08-11 12:58:47,769][DEBUG][index ] [Vader] [person] creating shard_id [person][3] [2016-08-11 12:58:47,770][DEBUG][com.floragunn.searchguard.configuration.SearchGuardIndexSearcherWrapperModule] FLS/DLS not enabled [2016-08-11 12:58:47,776][DEBUG][index.shard ] [Vader] [.kibana][0] scheduling refresher every 1s [2016-08-11 12:58:47,783][DEBUG][index.shard ] [Vader] [.kibana][0] state: [RECOVERING]->[POST_RECOVERY], reason [post recovery from shard_store] [2016-08-11 12:58:47,783][DEBUG][index.shard ] [Vader] [.kibana][0] recovery completed from [shard_store], took [37ms] [2016-08-11 12:58:47,783][DEBUG][cluster.action.shard ] [Vader] [.kibana][0] sending shard started for target shard [[.kibana][0], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[173], s[INITIALIZING], a[id=1ajQwD59Tem3O0UVwa7zEw], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]], indexUUID [tUl6piVEQc6T5-5XMfP_aA], message [after recovery from store] [2016-08-11 12:58:47,784][DEBUG][cluster.action.shard ] [Vader] received shard started for target shard [[.kibana][0], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[173], s[INITIALIZING], a[id=1ajQwD59Tem3O0UVwa7zEw], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]], indexUUID [tUl6piVEQc6T5-5XMfP_aA], message [after recovery from store] [2016-08-11 12:58:47,785][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [23][internal:cluster/shard/started] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:58:47,785][DEBUG][index.store ] [Vader] [person][3] store stats are refreshed with refresh_interval [10s] [2016-08-11 12:58:47,786][DEBUG][index.deletionpolicy ] [Vader] [person][3] Using [keep_only_last] deletion policy [2016-08-11 12:58:47,786][DEBUG][index.shard ] [Vader] [person][3] using [tiered] merge mergePolicy with expunge_deletes_allowed[10.0], floor_segment[2mb], max_merge_at_once[10], max_merge_at_once_explicit[30], max_merged_segment[5gb], segments_per_tier[10.0], reclaim_deletes_weight[2.0] [2016-08-11 12:58:47,786][DEBUG][index.shard ] [Vader] [person][3] state: [CREATED] [2016-08-11 12:58:47,787][DEBUG][index.translog ] [Vader] [person][3] interval [5s], flush_threshold_ops [2147483647], flush_threshold_size [512mb], flush_threshold_period [30m] [2016-08-11 12:58:47,787][DEBUG][index.shard ] [Vader] [person][3] state: [CREATED]->[RECOVERING], reason [from store] [2016-08-11 12:58:47,787][DEBUG][index.shard ] [Vader] [person][3] starting recovery from shard_store ... [2016-08-11 12:58:47,791][DEBUG][indices.memory ] [Vader] recalculating shard indexing buffer, total is [101.5mb] with [7] active shards, each shard set to indexing=[14.5mb], translog=[64kb] [2016-08-11 12:58:47,791][DEBUG][index.shard ] [Vader] [searchguard][0] updating index_buffer_size from [16.9mb] to [14.5mb]; IndexWriter now using [0] bytes [2016-08-11 12:58:47,791][DEBUG][index.shard ] [Vader] [.kibana][0] updating index_buffer_size from [16.9mb] to [14.5mb]; IndexWriter now using [0] bytes [2016-08-11 12:58:47,791][DEBUG][index.shard ] [Vader] [person][0] updating index_buffer_size from [16.9mb] to [14.5mb]; IndexWriter now using [0] bytes [2016-08-11 12:58:47,791][DEBUG][index.shard ] [Vader] [person][1] updating index_buffer_size from [16.9mb] to [14.5mb]; IndexWriter now using [0] bytes [2016-08-11 12:58:47,792][DEBUG][index.shard ] [Vader] [person][2] updating index_buffer_size from [16.9mb] to [14.5mb]; IndexWriter now using [0] bytes [2016-08-11 12:58:47,792][DEBUG][index.shard ] [Vader] [person][3] updateBufferSize: engine is closed; skipping [2016-08-11 12:58:47,792][DEBUG][index.shard ] [Vader] [person][4] updating index_buffer_size from [16.9mb] to [14.5mb]; IndexWriter now using [0] bytes [2016-08-11 12:58:47,792][DEBUG][index.shard ] [Vader] [person][0] state: [POST_RECOVERY]->[STARTED], reason [global state is [STARTED]] [2016-08-11 12:58:47,805][DEBUG][index.translog ] [Vader] [person][3] open uncommitted translog checkpoint Checkpoint{offset=43, numOps=0, translogFileGeneration= 62} [2016-08-11 12:58:47,807][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action indices:admin/exists from null/ [2016-08-11 12:58:47,808][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [] [2016-08-11 12:58:47,808][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [_sg_conf_request] [2016-08-11 12:58:47,808][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: null [2016-08-11 12:58:47,806][DEBUG][cluster.service ] [Vader] processing [shard-started ([person][1], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=uh6oeFIxTpmrTOHnfQzKcA], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]), reason [after recovery from store],shard-started ([searchguard][0], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[17], s[INITIALIZING], a[id=fMsw0djWRrmbpOCiUQIJXA], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.559Z]]), reason [after recovery from store],shard-started ([searchguard][0], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[17], s[INITIALIZING], a[id=fMsw0djWRrmbpOCiUQIJXA], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.559Z]]), reason [master {Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300} marked shard as initializing, but shard state is [POST_RECOVERY], mark shard as started],shard-started ([person][0], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=f1kewp78QdyukyJQJGFMLw], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]), reason [after recovery from store],shard-started ([person][1], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=uh6oeFIxTpmrTOHnfQzKcA], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]), reason [master {Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300} marked shard as initializing, but shard state is [POST_RECOVERY], mark shard as started],shard-started ([person][0], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=f1kewp78QdyukyJQJGFMLw], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]), reason [master {Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300} marked shard as initializing, but shard state is [POST_RECOVERY], mark shard as started]]: took 152ms done applying updated cluster_state (version: 5, uuid: OaayVrbpQtKhTXS2yg8cBA) [2016-08-11 12:58:47,808][DEBUG][cluster.service ] [Vader] processing [shard-started ([person][4], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=HBW8GNKST7mwbRo5pc7xlw], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]), reason [after recovery from store],shard-started ([person][4], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=HBW8GNKST7mwbRo5pc7xlw], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]), reason [master {Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300} marked shard as initializing, but shard state is [POST_RECOVERY], mark shard as started],shard-started ([.kibana][0], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[173], s[INITIALIZING], a[id=1ajQwD59Tem3O0UVwa7zEw], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]), reason [after recovery from store]]: execute [2016-08-11 12:58:47,810][DEBUG][com.floragunn.searchguard.configuration.ConfigurationLoader] searchguard index exists [2016-08-11 12:58:47,815][DEBUG][cluster.service ] [Vader] cluster state updated, version [6], source [shard-started ([person][4], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=HBW8GNKST7mwbRo5pc7xlw], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]), reason [after recovery from store],shard-started ([person][4], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=HBW8GNKST7mwbRo5pc7xlw], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]), reason [master {Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300} marked shard as initializing, but shard state is [POST_RECOVERY], mark shard as started],shard-started ([.kibana][0], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[173], s[INITIALIZING], a[id=1ajQwD59Tem3O0UVwa7zEw], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]), reason [after recovery from store]] [2016-08-11 12:58:47,815][DEBUG][cluster.service ] [Vader] publishing cluster state version [6] [2016-08-11 12:58:47,815][DEBUG][cluster.service ] [Vader] set local cluster state to version 6 [2016-08-11 12:58:47,816][DEBUG][index.shard ] [Vader] [.kibana][0] state: [POST_RECOVERY]->[STARTED], reason [global state is [STARTED]] [2016-08-11 12:58:47,817][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action indices:data/read/mget from null/ [2016-08-11 12:58:47,821][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [] [2016-08-11 12:58:47,821][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [_sg_conf_request] [2016-08-11 12:58:47,821][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: null [2016-08-11 12:58:47,823][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action indices:data/read/mget[shard] from null/ [2016-08-11 12:58:47,824][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [] [2016-08-11 12:58:47,824][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [_sg_conf_request] [2016-08-11 12:58:47,824][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: null [2016-08-11 12:58:47,826][DEBUG][index.shard ] [Vader] [person][4] state: [POST_RECOVERY]->[STARTED], reason [global state is [STARTED]] [2016-08-11 12:58:47,833][DEBUG][cluster.service ] [Vader] processing [shard-started ([person][4], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=HBW8GNKST7mwbRo5pc7xlw], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]), reason [after recovery from store],shard-started ([person][4], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=HBW8GNKST7mwbRo5pc7xlw], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]), reason [master {Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300} marked shard as initializing, but shard state is [POST_RECOVERY], mark shard as started],shard-started ([.kibana][0], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[173], s[INITIALIZING], a[id=1ajQwD59Tem3O0UVwa7zEw], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]), reason [after recovery from store]]: took 23ms done applying updated cluster_state (version: 6, uuid: Bbi-25fVRm2nLv6z3ahZCg) [2016-08-11 12:58:47,849][DEBUG][index.shard ] [Vader] [person][3] scheduling refresher every 1s [2016-08-11 12:58:47,851][DEBUG][index.shard ] [Vader] [person][3] state: [RECOVERING]->[POST_RECOVERY], reason [post recovery from shard_store] [2016-08-11 12:58:47,851][DEBUG][index.shard ] [Vader] [person][3] recovery completed from [shard_store], took [63ms] [2016-08-11 12:58:47,851][DEBUG][cluster.action.shard ] [Vader] [person][3] sending shard started for target shard [[person][3], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=slyVxRluTr-XgUXSl7ultQ], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]], indexUUID [mvvBFMS_RQaOGGaGHEGIng], message [after recovery from store] [2016-08-11 12:58:47,852][DEBUG][cluster.action.shard ] [Vader] received shard started for target shard [[person][3], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=slyVxRluTr-XgUXSl7ultQ], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]], indexUUID [mvvBFMS_RQaOGGaGHEGIng], message [after recovery from store] [2016-08-11 12:58:47,853][DEBUG][cluster.service ] [Vader] processing [shard-started ([person][3], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=slyVxRluTr-XgUXSl7ultQ], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]), reason [after recovery from store]]: execute [2016-08-11 12:58:47,854][INFO ][cluster.routing.allocation] [Vader] Cluster health status changed from [RED] to [YELLOW] (reason: [shards started [[person][3]] ...]). [2016-08-11 12:58:47,855][DEBUG][cluster.service ] [Vader] cluster state updated, version [7], source [shard-started ([person][3], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=slyVxRluTr-XgUXSl7ultQ], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]), reason [after recovery from store]] [2016-08-11 12:58:47,855][DEBUG][cluster.service ] [Vader] publishing cluster state version [7] [2016-08-11 12:58:47,855][DEBUG][cluster.service ] [Vader] set local cluster state to version 7 [2016-08-11 12:58:47,855][DEBUG][index.shard ] [Vader] [person][3] state: [POST_RECOVERY]->[STARTED], reason [global state is [STARTED]] [2016-08-11 12:58:47,856][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [25][internal:cluster/shard/started] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:58:47,862][DEBUG][cluster.service ] [Vader] processing [shard-started ([person][3], node[hnX5TnGhR_KVe9-J-iq5JQ], [P], v[121], s[INITIALIZING], a[id=slyVxRluTr-XgUXSl7ultQ], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-08-11T12:58:46.562Z]]), reason [after recovery from store]]: took 8ms done applying updated cluster_state (version: 7, uuid: Ch_4fWfOSPuAMYhwauomhQ) [2016-08-11 12:59:07,561][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] channel opened: [id: 0x1cd15d99, /127.0.0.1:48702 => /127.0.0.1:9300] [2016-08-11 12:59:07,582][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] channel closed: [id: 0x1cd15d99, /127.0.0.1:48702 => /127.0.0.1:9300] [2016-08-11 12:59:07,584][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] Node to Node encryption cipher is NONE/SSL_NULL_WITH_NULL_NULL [2016-08-11 12:59:09,483][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] channel opened: [id: 0x3e5063ab, /127.0.0.1:48704 => /127.0.0.1:9300] [2016-08-11 12:59:09,953][DEBUG][netty.handler.ssl.SslHandler] [id: 0x3e5063ab, /127.0.0.1:48704 => /127.0.0.1:9300] HANDSHAKEN: TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 [2016-08-11 12:59:09,953][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] Node to Node encryption cipher is TLSv1.2/TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 [2016-08-11 12:59:09,956][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 12:59:09,959][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 12:59:09,990][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] channel opened: [id: 0xfd0d057c, /127.0.0.1:48706 => /127.0.0.1:9300] [2016-08-11 12:59:09,997][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] channel opened: [id: 0x00c40ac4, /127.0.0.1:48708 => /127.0.0.1:9300] [2016-08-11 12:59:10,056][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] channel opened: [id: 0xa9506923, /127.0.0.1:48710 => /127.0.0.1:9300] [2016-08-11 12:59:10,072][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] channel opened: [id: 0xf19172aa, /127.0.0.1:48712 => /127.0.0.1:9300] [2016-08-11 12:59:10,090][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] channel opened: [id: 0xe98ffdcc, /127.0.0.1:48714 => /127.0.0.1:9300] [2016-08-11 12:59:10,100][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] channel opened: [id: 0x1b129d32, /127.0.0.1:48716 => /127.0.0.1:9300] [2016-08-11 12:59:10,101][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] channel opened: [id: 0xe0536dc8, /127.0.0.1:48718 => /127.0.0.1:9300] [2016-08-11 12:59:10,136][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] channel opened: [id: 0x59fc02e7, /127.0.0.1:48720 => /127.0.0.1:9300] [2016-08-11 12:59:10,137][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] channel opened: [id: 0x05a17020, /127.0.0.1:48722 => /127.0.0.1:9300] [2016-08-11 12:59:10,139][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] channel opened: [id: 0xf7a5723f, /127.0.0.1:48724 => /127.0.0.1:9300] [2016-08-11 12:59:10,177][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] channel opened: [id: 0xd7d399bb, /127.0.0.1:48726 => /127.0.0.1:9300] [2016-08-11 12:59:10,178][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] channel opened: [id: 0x7ee1aa71, /127.0.0.1:48728 => /127.0.0.1:9300] [2016-08-11 12:59:10,217][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] channel opened: [id: 0x4e2eb814, /127.0.0.1:48730 => /127.0.0.1:9300] [2016-08-11 12:59:10,456][DEBUG][netty.handler.ssl.SslHandler] [id: 0x00c40ac4, /127.0.0.1:48708 => /127.0.0.1:9300] HANDSHAKEN: TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 [2016-08-11 12:59:10,456][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] Node to Node encryption cipher is TLSv1.2/TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 [2016-08-11 12:59:10,687][DEBUG][netty.handler.ssl.SslHandler] [id: 0xfd0d057c, /127.0.0.1:48706 => /127.0.0.1:9300] HANDSHAKEN: TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 [2016-08-11 12:59:10,695][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] Node to Node encryption cipher is TLSv1.2/TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 [2016-08-11 12:59:11,025][DEBUG][netty.handler.ssl.SslHandler] [id: 0xf19172aa, /127.0.0.1:48712 => /127.0.0.1:9300] HANDSHAKEN: TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 [2016-08-11 12:59:11,025][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] Node to Node encryption cipher is TLSv1.2/TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 [2016-08-11 12:59:11,113][DEBUG][netty.handler.ssl.SslHandler] [id: 0x1b129d32, /127.0.0.1:48716 => /127.0.0.1:9300] HANDSHAKEN: TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 [2016-08-11 12:59:11,115][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] Node to Node encryption cipher is TLSv1.2/TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 [2016-08-11 12:59:11,127][DEBUG][netty.handler.ssl.SslHandler] [id: 0x05a17020, /127.0.0.1:48722 => /127.0.0.1:9300] HANDSHAKEN: TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 [2016-08-11 12:59:11,131][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] Node to Node encryption cipher is TLSv1.2/TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 [2016-08-11 12:59:11,186][DEBUG][netty.handler.ssl.SslHandler] [id: 0x59fc02e7, /127.0.0.1:48720 => /127.0.0.1:9300] HANDSHAKEN: TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 [2016-08-11 12:59:11,186][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] Node to Node encryption cipher is TLSv1.2/TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 [2016-08-11 12:59:11,212][DEBUG][netty.handler.ssl.SslHandler] [id: 0xd7d399bb, /127.0.0.1:48726 => /127.0.0.1:9300] HANDSHAKEN: TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 [2016-08-11 12:59:11,213][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] Node to Node encryption cipher is TLSv1.2/TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 [2016-08-11 12:59:11,239][DEBUG][netty.handler.ssl.SslHandler] [id: 0xf7a5723f, /127.0.0.1:48724 => /127.0.0.1:9300] HANDSHAKEN: TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 [2016-08-11 12:59:11,243][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] Node to Node encryption cipher is TLSv1.2/TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 [2016-08-11 12:59:11,294][DEBUG][netty.handler.ssl.SslHandler] [id: 0xe98ffdcc, /127.0.0.1:48714 => /127.0.0.1:9300] HANDSHAKEN: TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 [2016-08-11 12:59:11,294][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] Node to Node encryption cipher is TLSv1.2/TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 [2016-08-11 12:59:11,323][DEBUG][netty.handler.ssl.SslHandler] [id: 0x7ee1aa71, /127.0.0.1:48728 => /127.0.0.1:9300] HANDSHAKEN: TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 [2016-08-11 12:59:11,324][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] Node to Node encryption cipher is TLSv1.2/TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 [2016-08-11 12:59:11,346][DEBUG][netty.handler.ssl.SslHandler] [id: 0xa9506923, /127.0.0.1:48710 => /127.0.0.1:9300] HANDSHAKEN: TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 [2016-08-11 12:59:11,346][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] Node to Node encryption cipher is TLSv1.2/TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 [2016-08-11 12:59:11,388][DEBUG][netty.handler.ssl.SslHandler] [id: 0xe0536dc8, /127.0.0.1:48718 => /127.0.0.1:9300] HANDSHAKEN: TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 [2016-08-11 12:59:11,388][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] Node to Node encryption cipher is TLSv1.2/TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 [2016-08-11 12:59:11,389][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [1][cluster:monitor/health] received request [2016-08-11 12:59:11,389][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 12:59:11,389][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 12:59:11,391][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action cluster:monitor/health from 127.0.0.1:48718/ [2016-08-11 12:59:11,391][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [_sg_ssl_transport_peer_certificates=>[Ljava.security.cert.X509Certificate;@7d58ef5c, _sg_channel_type=>netty, _sg_ssl_transport_protocol=>TLSv1.2, _sg_ssl_transport_cipher=>TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, _sg_ssl_transport_principal=>CN=kirk,OU=client,O=client,L=Test,C=DE, _sg_remote_address=>127.0.0.1:48718, _sg_user=>User [name=CN=kirk,OU=client,O=client,L=Test,C=DE, roles=[]]] [2016-08-11 12:59:11,391][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 12:59:11,391][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: 127.0.0.1:48718 [2016-08-11 12:59:11,398][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [1][cluster:monitor/health] sent response [2016-08-11 12:59:11,438][DEBUG][netty.handler.ssl.SslHandler] [id: 0x4e2eb814, /127.0.0.1:48730 => /127.0.0.1:9300] HANDSHAKEN: TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 [2016-08-11 12:59:11,438][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] Node to Node encryption cipher is TLSv1.2/TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 [2016-08-11 12:59:11,445][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [2][indices:admin/exists] received request [2016-08-11 12:59:11,445][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 12:59:11,445][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 12:59:11,446][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action indices:admin/exists from 127.0.0.1:48720/ [2016-08-11 12:59:11,446][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [_sg_ssl_transport_cipher=>TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, _sg_ssl_transport_principal=>CN=kirk,OU=client,O=client,L=Test,C=DE, _sg_channel_type=>netty, _sg_user=>User [name=CN=kirk,OU=client,O=client,L=Test,C=DE, roles=[]], _sg_ssl_transport_peer_certificates=>[Ljava.security.cert.X509Certificate;@2646293f, _sg_remote_address=>127.0.0.1:48720, _sg_ssl_transport_protocol=>TLSv1.2] [2016-08-11 12:59:11,446][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 12:59:11,446][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: 127.0.0.1:48720 [2016-08-11 12:59:11,447][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [2][indices:admin/exists] sent response [2016-08-11 12:59:11,456][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [3][cluster:monitor/health] received request [2016-08-11 12:59:11,456][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 12:59:11,457][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 12:59:11,457][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action cluster:monitor/health from 127.0.0.1:48722/ [2016-08-11 12:59:11,457][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [_sg_ssl_transport_cipher=>TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, _sg_ssl_transport_principal=>CN=kirk,OU=client,O=client,L=Test,C=DE, _sg_channel_type=>netty, _sg_ssl_transport_protocol=>TLSv1.2, _sg_user=>User [name=CN=kirk,OU=client,O=client,L=Test,C=DE, roles=[]], _sg_remote_address=>127.0.0.1:48722, _sg_ssl_transport_peer_certificates=>[Ljava.security.cert.X509Certificate;@309c2fac] [2016-08-11 12:59:11,457][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 12:59:11,458][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: 127.0.0.1:48722 [2016-08-11 12:59:11,459][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [3][cluster:monitor/health] sent response [2016-08-11 12:59:11,646][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [4][indices:data/write/index] received request [2016-08-11 12:59:11,653][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 12:59:11,653][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 12:59:11,654][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action indices:data/write/index from 127.0.0.1:48724/ [2016-08-11 12:59:11,655][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [_sg_user=>User [name=CN=kirk,OU=client,O=client,L=Test,C=DE, roles=[]], _sg_remote_address=>127.0.0.1:48724, _sg_ssl_transport_protocol=>TLSv1.2, _sg_ssl_transport_cipher=>TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, _sg_channel_type=>netty, _sg_ssl_transport_peer_certificates=>[Ljava.security.cert.X509Certificate;@24f56c1f, _sg_ssl_transport_principal=>CN=kirk,OU=client,O=client,L=Test,C=DE] [2016-08-11 12:59:11,655][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 12:59:11,655][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: 127.0.0.1:48724 [2016-08-11 12:59:12,017][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [26][indices:data/write/index[p]] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:59:12,023][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [4][indices:data/write/index] sent response [2016-08-11 12:59:12,037][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [5][indices:data/write/index] received request [2016-08-11 12:59:12,039][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 12:59:12,039][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 12:59:12,039][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action indices:data/write/index from 127.0.0.1:48726/ [2016-08-11 12:59:12,040][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [_sg_ssl_transport_peer_certificates=>[Ljava.security.cert.X509Certificate;@3f3a8c83, _sg_ssl_transport_protocol=>TLSv1.2, _sg_user=>User [name=CN=kirk,OU=client,O=client,L=Test,C=DE, roles=[]], _sg_ssl_transport_principal=>CN=kirk,OU=client,O=client,L=Test,C=DE, _sg_channel_type=>netty, _sg_ssl_transport_cipher=>TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, _sg_remote_address=>127.0.0.1:48726] [2016-08-11 12:59:12,040][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 12:59:12,040][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: 127.0.0.1:48726 [2016-08-11 12:59:12,114][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [27][indices:data/write/index[p]] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:59:12,114][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [5][indices:data/write/index] sent response [2016-08-11 12:59:12,130][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [6][indices:data/write/index] received request [2016-08-11 12:59:12,131][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 12:59:12,131][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 12:59:12,131][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action indices:data/write/index from 127.0.0.1:48716/ [2016-08-11 12:59:12,131][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [_sg_ssl_transport_peer_certificates=>[Ljava.security.cert.X509Certificate;@79b5e7a9, _sg_remote_address=>127.0.0.1:48716, _sg_ssl_transport_cipher=>TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, _sg_channel_type=>netty, _sg_ssl_transport_principal=>CN=kirk,OU=client,O=client,L=Test,C=DE, _sg_ssl_transport_protocol=>TLSv1.2, _sg_user=>User [name=CN=kirk,OU=client,O=client,L=Test,C=DE, roles=[]]] [2016-08-11 12:59:12,131][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 12:59:12,131][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: 127.0.0.1:48716 [2016-08-11 12:59:12,181][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [28][indices:data/write/index[p]] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:59:12,182][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [6][indices:data/write/index] sent response [2016-08-11 12:59:12,199][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [7][indices:data/write/index] received request [2016-08-11 12:59:12,200][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 12:59:12,200][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 12:59:12,200][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action indices:data/write/index from 127.0.0.1:48718/ [2016-08-11 12:59:12,200][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [_sg_ssl_transport_protocol=>TLSv1.2, _sg_user=>User [name=CN=kirk,OU=client,O=client,L=Test,C=DE, roles=[]], _sg_remote_address=>127.0.0.1:48718, _sg_ssl_transport_cipher=>TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, _sg_ssl_transport_peer_certificates=>[Ljava.security.cert.X509Certificate;@c5769ce, _sg_channel_type=>netty, _sg_ssl_transport_principal=>CN=kirk,OU=client,O=client,L=Test,C=DE] [2016-08-11 12:59:12,200][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 12:59:12,200][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: 127.0.0.1:48718 [2016-08-11 12:59:12,249][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [29][indices:data/write/index[p]] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:59:12,259][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [7][indices:data/write/index] sent response [2016-08-11 12:59:12,267][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [8][indices:data/write/index] received request [2016-08-11 12:59:12,267][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 12:59:12,267][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 12:59:12,268][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action indices:data/write/index from 127.0.0.1:48720/ [2016-08-11 12:59:12,268][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [_sg_ssl_transport_cipher=>TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, _sg_channel_type=>netty, _sg_ssl_transport_principal=>CN=kirk,OU=client,O=client,L=Test,C=DE, _sg_ssl_transport_peer_certificates=>[Ljava.security.cert.X509Certificate;@15674190, _sg_user=>User [name=CN=kirk,OU=client,O=client,L=Test,C=DE, roles=[]], _sg_ssl_transport_protocol=>TLSv1.2, _sg_remote_address=>127.0.0.1:48720] [2016-08-11 12:59:12,271][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 12:59:12,271][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: 127.0.0.1:48720 [2016-08-11 12:59:12,322][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [30][indices:data/write/index[p]] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:59:12,325][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [9][cluster:admin/searchguard/config/update] received request [2016-08-11 12:59:12,325][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 12:59:12,326][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 12:59:12,326][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action cluster:admin/searchguard/config/update from 127.0.0.1:48722/ [2016-08-11 12:59:12,326][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [_sg_user=>User [name=CN=kirk,OU=client,O=client,L=Test,C=DE, roles=[]], _sg_ssl_transport_principal=>CN=kirk,OU=client,O=client,L=Test,C=DE, _sg_channel_type=>netty, _sg_ssl_transport_protocol=>TLSv1.2, _sg_remote_address=>127.0.0.1:48722, _sg_ssl_transport_cipher=>TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, _sg_ssl_transport_peer_certificates=>[Ljava.security.cert.X509Certificate;@48b50204] [2016-08-11 12:59:12,326][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 12:59:12,326][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: 127.0.0.1:48722 [2016-08-11 12:59:12,327][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action indices:admin/exists from null/ [2016-08-11 12:59:12,327][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [] [2016-08-11 12:59:12,327][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [_sg_conf_request] [2016-08-11 12:59:12,327][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: null [2016-08-11 12:59:12,328][DEBUG][com.floragunn.searchguard.configuration.ConfigurationLoader] searchguard index exists [2016-08-11 12:59:12,328][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action indices:data/read/mget from null/ [2016-08-11 12:59:12,328][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [] [2016-08-11 12:59:12,328][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [_sg_conf_request] [2016-08-11 12:59:12,328][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: null [2016-08-11 12:59:12,329][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action indices:data/read/mget[shard] from null/ [2016-08-11 12:59:12,329][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [] [2016-08-11 12:59:12,329][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [_sg_conf_request] [2016-08-11 12:59:12,329][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: null [2016-08-11 12:59:12,329][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [8][indices:data/write/index] sent response [2016-08-11 12:59:13,236][DEBUG][indices.memory ] [Vader] recalculating shard indexing buffer, total is [101.5mb] with [7] active shards, each shard set to indexing=[14.5mb], translog=[64kb] [2016-08-11 12:59:14,387][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 12:59:14,387][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 12:59:16,537][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action cluster:monitor/nodes/stats from null/ [2016-08-11 12:59:16,537][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [] [2016-08-11 12:59:16,537][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 12:59:16,537][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: null [2016-08-11 12:59:16,538][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] No user, will allow only standard discovery and monitoring actions [2016-08-11 12:59:16,540][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [33][cluster:monitor/nodes/stats[n]] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:59:16,541][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action indices:monitor/stats from null/ [2016-08-11 12:59:16,541][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [] [2016-08-11 12:59:16,541][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 12:59:16,541][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: null [2016-08-11 12:59:16,541][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] No user, will allow only standard discovery and monitoring actions [2016-08-11 12:59:16,560][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [34][indices:monitor/stats[n]] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:59:19,390][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 12:59:19,391][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 12:59:24,393][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 12:59:24,394][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 12:59:29,397][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 12:59:29,398][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 12:59:34,400][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 12:59:34,402][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 12:59:39,404][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 12:59:39,407][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 12:59:43,237][DEBUG][indices.memory ] [Vader] recalculating shard indexing buffer, total is [101.5mb] with [7] active shards, each shard set to indexing=[14.5mb], translog=[64kb] [2016-08-11 12:59:44,410][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 12:59:44,412][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 12:59:46,561][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action cluster:monitor/nodes/stats from null/ [2016-08-11 12:59:46,561][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [] [2016-08-11 12:59:46,562][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 12:59:46,562][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: null [2016-08-11 12:59:46,562][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] No user, will allow only standard discovery and monitoring actions [2016-08-11 12:59:46,563][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action indices:monitor/stats from null/ [2016-08-11 12:59:46,563][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [] [2016-08-11 12:59:46,564][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 12:59:46,564][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: null [2016-08-11 12:59:46,564][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] No user, will allow only standard discovery and monitoring actions [2016-08-11 12:59:46,563][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [35][cluster:monitor/nodes/stats[n]] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:59:46,583][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [36][indices:monitor/stats[n]] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 12:59:49,414][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 12:59:49,416][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 12:59:54,419][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 12:59:54,420][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 12:59:59,423][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 12:59:59,425][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 13:00:04,427][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 13:00:04,429][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 13:00:09,431][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 13:00:09,433][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 13:00:13,238][DEBUG][indices.memory ] [Vader] recalculating shard indexing buffer, total is [101.5mb] with [7] active shards, each shard set to indexing=[14.5mb], translog=[64kb] [2016-08-11 13:00:14,435][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 13:00:14,437][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 13:00:16,584][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action cluster:monitor/nodes/stats from null/ [2016-08-11 13:00:16,585][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [] [2016-08-11 13:00:16,585][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 13:00:16,585][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: null [2016-08-11 13:00:16,586][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] No user, will allow only standard discovery and monitoring actions [2016-08-11 13:00:16,586][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action indices:monitor/stats from null/ [2016-08-11 13:00:16,587][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [] [2016-08-11 13:00:16,587][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 13:00:16,587][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: null [2016-08-11 13:00:16,588][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] No user, will allow only standard discovery and monitoring actions [2016-08-11 13:00:16,587][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [37][cluster:monitor/nodes/stats[n]] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 13:00:16,596][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [38][indices:monitor/stats[n]] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 13:00:19,439][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 13:00:19,442][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 13:00:24,448][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 13:00:24,450][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 13:00:29,453][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 13:00:29,455][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 13:00:34,457][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 13:00:34,459][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 13:00:39,461][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 13:00:39,463][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 13:00:43,239][DEBUG][indices.memory ] [Vader] recalculating shard indexing buffer, total is [101.5mb] with [7] active shards, each shard set to indexing=[14.5mb], translog=[64kb] [2016-08-11 13:00:44,466][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 13:00:44,468][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 13:00:46,598][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action cluster:monitor/nodes/stats from null/ [2016-08-11 13:00:46,598][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [] [2016-08-11 13:00:46,598][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 13:00:46,599][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: null [2016-08-11 13:00:46,599][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] No user, will allow only standard discovery and monitoring actions [2016-08-11 13:00:46,600][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action indices:monitor/stats from null/ [2016-08-11 13:00:46,600][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [] [2016-08-11 13:00:46,601][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 13:00:46,601][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: null [2016-08-11 13:00:46,601][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] No user, will allow only standard discovery and monitoring actions [2016-08-11 13:00:46,600][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [39][cluster:monitor/nodes/stats[n]] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 13:00:46,612][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [40][indices:monitor/stats[n]] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 13:00:47,829][DEBUG][com.floragunn.searchguard.configuration.ConfigurationLoader] Cannot retrieve configuration (first object) due to null (null means timeout) [2016-08-11 13:00:47,829][WARN ][com.floragunn.searchguard.configuration.ConfigurationLoader] Cannot retrieve configuration (first object) due to timeout [2016-08-11 13:00:49,471][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 13:00:49,473][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 13:00:54,475][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 13:00:54,476][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 13:00:59,479][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 13:00:59,481][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 13:01:04,483][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 13:01:04,485][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 13:01:09,487][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 13:01:09,489][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 13:01:12,329][DEBUG][com.floragunn.searchguard.configuration.ConfigurationLoader] Cannot retrieve configuration (first object) due to null (null means timeout) [2016-08-11 13:01:12,330][WARN ][com.floragunn.searchguard.configuration.ConfigurationLoader] Cannot retrieve configuration (first object) due to timeout [2016-08-11 13:01:13,241][DEBUG][indices.memory ] [Vader] recalculating shard indexing buffer, total is [101.5mb] with [7] active shards, each shard set to indexing=[14.5mb], translog=[64kb] [2016-08-11 13:01:14,492][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 13:01:14,493][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 13:01:16,613][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action cluster:monitor/nodes/stats from null/ [2016-08-11 13:01:16,613][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [] [2016-08-11 13:01:16,614][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 13:01:16,614][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: null [2016-08-11 13:01:16,614][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] No user, will allow only standard discovery and monitoring actions [2016-08-11 13:01:16,615][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action indices:monitor/stats from null/ [2016-08-11 13:01:16,615][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [] [2016-08-11 13:01:16,616][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 13:01:16,616][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: null [2016-08-11 13:01:16,616][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] No user, will allow only standard discovery and monitoring actions [2016-08-11 13:01:16,615][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [41][cluster:monitor/nodes/stats[n]] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 13:01:16,631][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [42][indices:monitor/stats[n]] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 13:01:19,496][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 13:01:19,498][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 13:01:24,500][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 13:01:24,502][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 13:01:29,508][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 13:01:29,509][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 13:01:34,513][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 13:01:34,514][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 13:01:39,517][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 13:01:39,519][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 13:01:43,242][DEBUG][indices.memory ] [Vader] recalculating shard indexing buffer, total is [101.5mb] with [7] active shards, each shard set to indexing=[14.5mb], translog=[64kb] [2016-08-11 13:01:44,521][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 13:01:44,523][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 13:01:46,633][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action cluster:monitor/nodes/stats from null/ [2016-08-11 13:01:46,633][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [] [2016-08-11 13:01:46,634][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 13:01:46,634][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: null [2016-08-11 13:01:46,634][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] No user, will allow only standard discovery and monitoring actions [2016-08-11 13:01:46,635][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action indices:monitor/stats from null/ [2016-08-11 13:01:46,636][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [] [2016-08-11 13:01:46,636][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 13:01:46,636][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: null [2016-08-11 13:01:46,636][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] No user, will allow only standard discovery and monitoring actions [2016-08-11 13:01:46,635][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [43][cluster:monitor/nodes/stats[n]] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 13:01:46,642][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [44][indices:monitor/stats[n]] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 13:01:49,528][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 13:01:49,531][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 13:01:54,535][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 13:01:54,537][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 13:01:59,540][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 13:01:59,541][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 13:02:04,544][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] No issuer alternative names (san) found [2016-08-11 13:02:04,546][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService] Is not an inter cluster request [2016-08-11 13:02:08,718][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] channel closed: [id: 0x4e2eb814, /127.0.0.1:48730 => /127.0.0.1:9300] [2016-08-11 13:02:08,723][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] channel closed: [id: 0xf19172aa, /127.0.0.1:48712 => /127.0.0.1:9300] [2016-08-11 13:02:08,726][DEBUG][netty.handler.ssl.SslHandler] Failed to clean up SSLEngine. javax.net.ssl.SSLException: Inbound closed before receiving peer's close_notify: possible truncation attack? at sun.security.ssl.Alerts.getSSLException(Alerts.java:208) at sun.security.ssl.SSLEngineImpl.fatal(SSLEngineImpl.java:1666) at sun.security.ssl.SSLEngineImpl.fatal(SSLEngineImpl.java:1634) at sun.security.ssl.SSLEngineImpl.closeInbound(SSLEngineImpl.java:1561) at org.jboss.netty.handler.ssl.SslHandler.closeEngine(SslHandler.java:591) at org.jboss.netty.handler.ssl.SslHandler.channelDisconnected(SslHandler.java:583) at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:102) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:559) at org.jboss.netty.channel.Channels.fireChannelDisconnected(Channels.java:396) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.close(AbstractNioWorker.java:360) at org.jboss.netty.channel.socket.nio.NioWorker.read(NioWorker.java:93) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.process(AbstractNioWorker.java:108) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.run(AbstractNioWorker.java:89) at org.jboss.netty.channel.socket.nio.NioWorker.run(NioWorker.java:178) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 13:02:08,728][DEBUG][netty.handler.ssl.SslHandler] Failed to clean up SSLEngine. javax.net.ssl.SSLException: Inbound closed before receiving peer's close_notify: possible truncation attack? at sun.security.ssl.Alerts.getSSLException(Alerts.java:208) at sun.security.ssl.SSLEngineImpl.fatal(SSLEngineImpl.java:1666) at sun.security.ssl.SSLEngineImpl.fatal(SSLEngineImpl.java:1634) at sun.security.ssl.SSLEngineImpl.closeInbound(SSLEngineImpl.java:1561) at org.jboss.netty.handler.ssl.SslHandler.closeEngine(SslHandler.java:591) at org.jboss.netty.handler.ssl.SslHandler.channelDisconnected(SslHandler.java:583) at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:102) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:559) at org.jboss.netty.channel.Channels.fireChannelDisconnected(Channels.java:396) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.close(AbstractNioWorker.java:360) at org.jboss.netty.channel.socket.nio.NioWorker.read(NioWorker.java:93) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.process(AbstractNioWorker.java:108) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.run(AbstractNioWorker.java:89) at org.jboss.netty.channel.socket.nio.NioWorker.run(NioWorker.java:178) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 13:02:08,729][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] channel closed: [id: 0x00c40ac4, /127.0.0.1:48708 => /127.0.0.1:9300] [2016-08-11 13:02:08,729][DEBUG][netty.handler.ssl.SslHandler] Failed to clean up SSLEngine. javax.net.ssl.SSLException: Inbound closed before receiving peer's close_notify: possible truncation attack? at sun.security.ssl.Alerts.getSSLException(Alerts.java:208) at sun.security.ssl.SSLEngineImpl.fatal(SSLEngineImpl.java:1666) at sun.security.ssl.SSLEngineImpl.fatal(SSLEngineImpl.java:1634) at sun.security.ssl.SSLEngineImpl.closeInbound(SSLEngineImpl.java:1561) at org.jboss.netty.handler.ssl.SslHandler.closeEngine(SslHandler.java:591) at org.jboss.netty.handler.ssl.SslHandler.channelDisconnected(SslHandler.java:583) at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:102) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:559) at org.jboss.netty.channel.Channels.fireChannelDisconnected(Channels.java:396) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.close(AbstractNioWorker.java:360) at org.jboss.netty.channel.socket.nio.NioWorker.read(NioWorker.java:93) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.process(AbstractNioWorker.java:108) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.run(AbstractNioWorker.java:89) at org.jboss.netty.channel.socket.nio.NioWorker.run(NioWorker.java:178) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 13:02:08,731][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] channel closed: [id: 0x3e5063ab, /127.0.0.1:48704 => /127.0.0.1:9300] [2016-08-11 13:02:08,731][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] channel closed: [id: 0xd7d399bb, /127.0.0.1:48726 => /127.0.0.1:9300] [2016-08-11 13:02:08,731][DEBUG][netty.handler.ssl.SslHandler] Failed to clean up SSLEngine. javax.net.ssl.SSLException: Inbound closed before receiving peer's close_notify: possible truncation attack? at sun.security.ssl.Alerts.getSSLException(Alerts.java:208) at sun.security.ssl.SSLEngineImpl.fatal(SSLEngineImpl.java:1666) at sun.security.ssl.SSLEngineImpl.fatal(SSLEngineImpl.java:1634) at sun.security.ssl.SSLEngineImpl.closeInbound(SSLEngineImpl.java:1561) at org.jboss.netty.handler.ssl.SslHandler.closeEngine(SslHandler.java:591) at org.jboss.netty.handler.ssl.SslHandler.channelDisconnected(SslHandler.java:583) at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:102) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:559) at org.jboss.netty.channel.Channels.fireChannelDisconnected(Channels.java:396) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.close(AbstractNioWorker.java:360) at org.jboss.netty.channel.socket.nio.NioWorker.read(NioWorker.java:93) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.process(AbstractNioWorker.java:108) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.run(AbstractNioWorker.java:89) at org.jboss.netty.channel.socket.nio.NioWorker.run(NioWorker.java:178) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 13:02:08,731][DEBUG][netty.handler.ssl.SslHandler] Failed to clean up SSLEngine. javax.net.ssl.SSLException: Inbound closed before receiving peer's close_notify: possible truncation attack? at sun.security.ssl.Alerts.getSSLException(Alerts.java:208) at sun.security.ssl.SSLEngineImpl.fatal(SSLEngineImpl.java:1666) at sun.security.ssl.SSLEngineImpl.fatal(SSLEngineImpl.java:1634) at sun.security.ssl.SSLEngineImpl.closeInbound(SSLEngineImpl.java:1561) at org.jboss.netty.handler.ssl.SslHandler.closeEngine(SslHandler.java:591) at org.jboss.netty.handler.ssl.SslHandler.channelDisconnected(SslHandler.java:583) at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:102) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:559) at org.jboss.netty.channel.Channels.fireChannelDisconnected(Channels.java:396) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.close(AbstractNioWorker.java:360) at org.jboss.netty.channel.socket.nio.NioWorker.read(NioWorker.java:93) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.process(AbstractNioWorker.java:108) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.run(AbstractNioWorker.java:89) at org.jboss.netty.channel.socket.nio.NioWorker.run(NioWorker.java:178) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 13:02:08,732][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] channel closed: [id: 0x1b129d32, /127.0.0.1:48716 => /127.0.0.1:9300] [2016-08-11 13:02:08,732][DEBUG][netty.handler.ssl.SslHandler] Failed to clean up SSLEngine. javax.net.ssl.SSLException: Inbound closed before receiving peer's close_notify: possible truncation attack? at sun.security.ssl.Alerts.getSSLException(Alerts.java:208) at sun.security.ssl.SSLEngineImpl.fatal(SSLEngineImpl.java:1666) at sun.security.ssl.SSLEngineImpl.fatal(SSLEngineImpl.java:1634) at sun.security.ssl.SSLEngineImpl.closeInbound(SSLEngineImpl.java:1561) at org.jboss.netty.handler.ssl.SslHandler.closeEngine(SslHandler.java:591) at org.jboss.netty.handler.ssl.SslHandler.channelDisconnected(SslHandler.java:583) at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:102) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:559) at org.jboss.netty.channel.Channels.fireChannelDisconnected(Channels.java:396) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.close(AbstractNioWorker.java:360) at org.jboss.netty.channel.socket.nio.NioWorker.read(NioWorker.java:93) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.process(AbstractNioWorker.java:108) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.run(AbstractNioWorker.java:89) at org.jboss.netty.channel.socket.nio.NioWorker.run(NioWorker.java:178) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 13:02:08,734][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] channel closed: [id: 0x05a17020, /127.0.0.1:48722 => /127.0.0.1:9300] [2016-08-11 13:02:08,734][DEBUG][netty.handler.ssl.SslHandler] Failed to clean up SSLEngine. javax.net.ssl.SSLException: Inbound closed before receiving peer's close_notify: possible truncation attack? at sun.security.ssl.Alerts.getSSLException(Alerts.java:208) at sun.security.ssl.SSLEngineImpl.fatal(SSLEngineImpl.java:1666) at sun.security.ssl.SSLEngineImpl.fatal(SSLEngineImpl.java:1634) at sun.security.ssl.SSLEngineImpl.closeInbound(SSLEngineImpl.java:1561) at org.jboss.netty.handler.ssl.SslHandler.closeEngine(SslHandler.java:591) at org.jboss.netty.handler.ssl.SslHandler.channelDisconnected(SslHandler.java:583) at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:102) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:559) at org.jboss.netty.channel.Channels.fireChannelDisconnected(Channels.java:396) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.close(AbstractNioWorker.java:360) at org.jboss.netty.channel.socket.nio.NioWorker.read(NioWorker.java:93) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.process(AbstractNioWorker.java:108) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.run(AbstractNioWorker.java:89) at org.jboss.netty.channel.socket.nio.NioWorker.run(NioWorker.java:178) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 13:02:08,734][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] channel closed: [id: 0xe98ffdcc, /127.0.0.1:48714 => /127.0.0.1:9300] [2016-08-11 13:02:08,736][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] channel closed: [id: 0xf7a5723f, /127.0.0.1:48724 => /127.0.0.1:9300] [2016-08-11 13:02:08,736][DEBUG][netty.handler.ssl.SslHandler] Failed to clean up SSLEngine. javax.net.ssl.SSLException: Inbound closed before receiving peer's close_notify: possible truncation attack? at sun.security.ssl.Alerts.getSSLException(Alerts.java:208) at sun.security.ssl.SSLEngineImpl.fatal(SSLEngineImpl.java:1666) at sun.security.ssl.SSLEngineImpl.fatal(SSLEngineImpl.java:1634) at sun.security.ssl.SSLEngineImpl.closeInbound(SSLEngineImpl.java:1561) at org.jboss.netty.handler.ssl.SslHandler.closeEngine(SslHandler.java:591) at org.jboss.netty.handler.ssl.SslHandler.channelDisconnected(SslHandler.java:583) at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:102) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:559) at org.jboss.netty.channel.Channels.fireChannelDisconnected(Channels.java:396) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.close(AbstractNioWorker.java:360) at org.jboss.netty.channel.socket.nio.NioWorker.read(NioWorker.java:93) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.process(AbstractNioWorker.java:108) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.run(AbstractNioWorker.java:89) at org.jboss.netty.channel.socket.nio.NioWorker.run(NioWorker.java:178) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 13:02:08,736][DEBUG][netty.handler.ssl.SslHandler] Failed to clean up SSLEngine. javax.net.ssl.SSLException: Inbound closed before receiving peer's close_notify: possible truncation attack? at sun.security.ssl.Alerts.getSSLException(Alerts.java:208) at sun.security.ssl.SSLEngineImpl.fatal(SSLEngineImpl.java:1666) at sun.security.ssl.SSLEngineImpl.fatal(SSLEngineImpl.java:1634) at sun.security.ssl.SSLEngineImpl.closeInbound(SSLEngineImpl.java:1561) at org.jboss.netty.handler.ssl.SslHandler.closeEngine(SslHandler.java:591) at org.jboss.netty.handler.ssl.SslHandler.channelDisconnected(SslHandler.java:583) at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:102) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:559) at org.jboss.netty.channel.Channels.fireChannelDisconnected(Channels.java:396) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.close(AbstractNioWorker.java:360) at org.jboss.netty.channel.socket.nio.NioWorker.read(NioWorker.java:93) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.process(AbstractNioWorker.java:108) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.run(AbstractNioWorker.java:89) at org.jboss.netty.channel.socket.nio.NioWorker.run(NioWorker.java:178) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 13:02:08,742][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] channel closed: [id: 0xfd0d057c, /127.0.0.1:48706 => /127.0.0.1:9300] [2016-08-11 13:02:08,743][DEBUG][netty.handler.ssl.SslHandler] Failed to clean up SSLEngine. javax.net.ssl.SSLException: Inbound closed before receiving peer's close_notify: possible truncation attack? at sun.security.ssl.Alerts.getSSLException(Alerts.java:208) at sun.security.ssl.SSLEngineImpl.fatal(SSLEngineImpl.java:1666) at sun.security.ssl.SSLEngineImpl.fatal(SSLEngineImpl.java:1634) at sun.security.ssl.SSLEngineImpl.closeInbound(SSLEngineImpl.java:1561) at org.jboss.netty.handler.ssl.SslHandler.closeEngine(SslHandler.java:591) at org.jboss.netty.handler.ssl.SslHandler.channelDisconnected(SslHandler.java:583) at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:102) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:559) at org.jboss.netty.channel.Channels.fireChannelDisconnected(Channels.java:396) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.close(AbstractNioWorker.java:360) at org.jboss.netty.channel.socket.nio.NioWorker.read(NioWorker.java:93) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.process(AbstractNioWorker.java:108) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.run(AbstractNioWorker.java:89) at org.jboss.netty.channel.socket.nio.NioWorker.run(NioWorker.java:178) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 13:02:08,743][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] channel closed: [id: 0x59fc02e7, /127.0.0.1:48720 => /127.0.0.1:9300] [2016-08-11 13:02:08,743][DEBUG][netty.handler.ssl.SslHandler] Failed to clean up SSLEngine. javax.net.ssl.SSLException: Inbound closed before receiving peer's close_notify: possible truncation attack? at sun.security.ssl.Alerts.getSSLException(Alerts.java:208) at sun.security.ssl.SSLEngineImpl.fatal(SSLEngineImpl.java:1666) at sun.security.ssl.SSLEngineImpl.fatal(SSLEngineImpl.java:1634) at sun.security.ssl.SSLEngineImpl.closeInbound(SSLEngineImpl.java:1561) at org.jboss.netty.handler.ssl.SslHandler.closeEngine(SslHandler.java:591) at org.jboss.netty.handler.ssl.SslHandler.channelDisconnected(SslHandler.java:583) at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:102) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:559) at org.jboss.netty.channel.Channels.fireChannelDisconnected(Channels.java:396) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.close(AbstractNioWorker.java:360) at org.jboss.netty.channel.socket.nio.NioWorker.read(NioWorker.java:93) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.process(AbstractNioWorker.java:108) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.run(AbstractNioWorker.java:89) at org.jboss.netty.channel.socket.nio.NioWorker.run(NioWorker.java:178) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 13:02:08,748][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] channel closed: [id: 0x7ee1aa71, /127.0.0.1:48728 => /127.0.0.1:9300] [2016-08-11 13:02:08,748][DEBUG][netty.handler.ssl.SslHandler] Failed to clean up SSLEngine. javax.net.ssl.SSLException: Inbound closed before receiving peer's close_notify: possible truncation attack? at sun.security.ssl.Alerts.getSSLException(Alerts.java:208) at sun.security.ssl.SSLEngineImpl.fatal(SSLEngineImpl.java:1666) at sun.security.ssl.SSLEngineImpl.fatal(SSLEngineImpl.java:1634) at sun.security.ssl.SSLEngineImpl.closeInbound(SSLEngineImpl.java:1561) at org.jboss.netty.handler.ssl.SslHandler.closeEngine(SslHandler.java:591) at org.jboss.netty.handler.ssl.SslHandler.channelDisconnected(SslHandler.java:583) at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:102) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:559) at org.jboss.netty.channel.Channels.fireChannelDisconnected(Channels.java:396) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.close(AbstractNioWorker.java:360) at org.jboss.netty.channel.socket.nio.NioWorker.read(NioWorker.java:93) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.process(AbstractNioWorker.java:108) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.run(AbstractNioWorker.java:89) at org.jboss.netty.channel.socket.nio.NioWorker.run(NioWorker.java:178) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 13:02:08,749][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] channel closed: [id: 0xe0536dc8, /127.0.0.1:48718 => /127.0.0.1:9300] [2016-08-11 13:02:08,749][DEBUG][netty.handler.ssl.SslHandler] Failed to clean up SSLEngine. javax.net.ssl.SSLException: Inbound closed before receiving peer's close_notify: possible truncation attack? at sun.security.ssl.Alerts.getSSLException(Alerts.java:208) at sun.security.ssl.SSLEngineImpl.fatal(SSLEngineImpl.java:1666) at sun.security.ssl.SSLEngineImpl.fatal(SSLEngineImpl.java:1634) at sun.security.ssl.SSLEngineImpl.closeInbound(SSLEngineImpl.java:1561) at org.jboss.netty.handler.ssl.SslHandler.closeEngine(SslHandler.java:591) at org.jboss.netty.handler.ssl.SslHandler.channelDisconnected(SslHandler.java:583) at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:102) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:559) at org.jboss.netty.channel.Channels.fireChannelDisconnected(Channels.java:396) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.close(AbstractNioWorker.java:360) at org.jboss.netty.channel.socket.nio.NioWorker.read(NioWorker.java:93) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.process(AbstractNioWorker.java:108) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.run(AbstractNioWorker.java:89) at org.jboss.netty.channel.socket.nio.NioWorker.run(NioWorker.java:178) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 13:02:08,750][TRACE][com.floragunn.searchguard.ssl.transport.SearchGuardSSLNettyTransport] [Vader] channel closed: [id: 0xa9506923, /127.0.0.1:48710 => /127.0.0.1:9300] [2016-08-11 13:02:08,751][DEBUG][netty.handler.ssl.SslHandler] Failed to clean up SSLEngine. javax.net.ssl.SSLException: Inbound closed before receiving peer's close_notify: possible truncation attack? at sun.security.ssl.Alerts.getSSLException(Alerts.java:208) at sun.security.ssl.SSLEngineImpl.fatal(SSLEngineImpl.java:1666) at sun.security.ssl.SSLEngineImpl.fatal(SSLEngineImpl.java:1634) at sun.security.ssl.SSLEngineImpl.closeInbound(SSLEngineImpl.java:1561) at org.jboss.netty.handler.ssl.SslHandler.closeEngine(SslHandler.java:591) at org.jboss.netty.handler.ssl.SslHandler.channelDisconnected(SslHandler.java:583) at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:102) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:559) at org.jboss.netty.channel.Channels.fireChannelDisconnected(Channels.java:396) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.close(AbstractNioWorker.java:360) at org.jboss.netty.channel.socket.nio.NioWorker.read(NioWorker.java:93) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.process(AbstractNioWorker.java:108) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.run(AbstractNioWorker.java:89) at org.jboss.netty.channel.socket.nio.NioWorker.run(NioWorker.java:178) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) [2016-08-11 13:02:13,243][DEBUG][indices.memory ] [Vader] recalculating shard indexing buffer, total is [101.5mb] with [7] active shards, each shard set to indexing=[14.5mb], translog=[64kb] [2016-08-11 13:02:16,643][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action cluster:monitor/nodes/stats from null/ [2016-08-11 13:02:16,644][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [] [2016-08-11 13:02:16,644][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 13:02:16,644][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: null [2016-08-11 13:02:16,645][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] No user, will allow only standard discovery and monitoring actions [2016-08-11 13:02:16,645][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action indices:monitor/stats from null/ [2016-08-11 13:02:16,646][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [] [2016-08-11 13:02:16,646][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 13:02:16,646][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: null [2016-08-11 13:02:16,647][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] No user, will allow only standard discovery and monitoring actions [2016-08-11 13:02:16,646][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [45][cluster:monitor/nodes/stats[n]] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 13:02:16,660][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [46][indices:monitor/stats[n]] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 13:02:43,249][DEBUG][indices.memory ] [Vader] recalculating shard indexing buffer, total is [101.5mb] with [7] active shards, each shard set to indexing=[14.5mb], translog=[64kb] [2016-08-11 13:02:46,660][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action cluster:monitor/nodes/stats from null/ [2016-08-11 13:02:46,662][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [] [2016-08-11 13:02:46,662][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 13:02:46,663][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: null [2016-08-11 13:02:46,663][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] No user, will allow only standard discovery and monitoring actions [2016-08-11 13:02:46,663][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action indices:monitor/stats from null/ [2016-08-11 13:02:46,664][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [] [2016-08-11 13:02:46,664][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 13:02:46,665][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: null [2016-08-11 13:02:46,665][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] No user, will allow only standard discovery and monitoring actions [2016-08-11 13:02:46,664][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [47][cluster:monitor/nodes/stats[n]] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 13:02:46,672][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [48][indices:monitor/stats[n]] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 13:02:47,830][DEBUG][com.floragunn.searchguard.configuration.ConfigurationLoader] Cannot retrieve configuration (2 object) due to null (null means timeout) [2016-08-11 13:02:47,830][WARN ][com.floragunn.searchguard.configuration.ConfigurationLoader] Cannot retrieve configuration (2 object) due to timeout [2016-08-11 13:03:12,331][DEBUG][com.floragunn.searchguard.configuration.ConfigurationLoader] Cannot retrieve configuration (2 object) due to null (null means timeout) [2016-08-11 13:03:12,331][WARN ][com.floragunn.searchguard.configuration.ConfigurationLoader] Cannot retrieve configuration (2 object) due to timeout [2016-08-11 13:03:13,250][DEBUG][indices.memory ] [Vader] recalculating shard indexing buffer, total is [101.5mb] with [7] active shards, each shard set to indexing=[14.5mb], translog=[64kb] [2016-08-11 13:03:16,673][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action cluster:monitor/nodes/stats from null/ [2016-08-11 13:03:16,674][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [] [2016-08-11 13:03:16,674][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 13:03:16,675][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: null [2016-08-11 13:03:16,675][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] No user, will allow only standard discovery and monitoring actions [2016-08-11 13:03:16,676][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Action indices:monitor/stats from null/ [2016-08-11 13:03:16,676][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Context [] [2016-08-11 13:03:16,677][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] Header [] [2016-08-11 13:03:16,678][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] remote address: null [2016-08-11 13:03:16,678][TRACE][com.floragunn.searchguard.filter.SearchGuardFilter] No user, will allow only standard discovery and monitoring actions [2016-08-11 13:03:16,677][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [49][cluster:monitor/nodes/stats[n]] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}] [2016-08-11 13:03:16,689][TRACE][com.floragunn.searchguard.transport.SearchGuardTransportService.tracer] [Vader] [50][indices:monitor/stats[n]] received response from [{Vader}{hnX5TnGhR_KVe9-J-iq5JQ}{127.0.0.1}{127.0.0.1:9300}]