Authored by 谢勇

性能测试

@@ -16,14 +16,12 @@ import com.yoho.trace.sleuth.Spans; @@ -16,14 +16,12 @@ import com.yoho.trace.sleuth.Spans;
16 import com.yoho.trace.store.HBasePool; 16 import com.yoho.trace.store.HBasePool;
17 import com.yoho.trace.store.TraceSpanStore; 17 import com.yoho.trace.store.TraceSpanStore;
18 18
19 -import scala.util.Random;  
20 -  
21 /** 19 /**
22 * Created by markeloff on 2017/7/26. 20 * Created by markeloff on 2017/7/26.
23 */ 21 */
24 -public class TraceHbaseHandler implements TraceHandler, Serializable { 22 +public class TraceHbaseHandler implements TraceHandler , Serializable {
25 23
26 - Logger logger = LoggerFactory.getLogger(TraceHbaseHandler.class); 24 + Logger logger = LoggerFactory.getLogger(TraceHbaseHandler.class) ;
27 25
28 @Override 26 @Override
29 public void handle(final JavaDStream<Spans> kafkaMsgDStream) { 27 public void handle(final JavaDStream<Spans> kafkaMsgDStream) {
@@ -31,34 +29,30 @@ public class TraceHbaseHandler implements TraceHandler, Serializable { @@ -31,34 +29,30 @@ public class TraceHbaseHandler implements TraceHandler, Serializable {
31 kafkaMsgDStream.foreachRDD(new VoidFunction<JavaRDD<Spans>>() { 29 kafkaMsgDStream.foreachRDD(new VoidFunction<JavaRDD<Spans>>() {
32 @Override 30 @Override
33 public void call(JavaRDD<Spans> spansJavaRDD) throws Exception { 31 public void call(JavaRDD<Spans> spansJavaRDD) throws Exception {
  32 +
34 spansJavaRDD.foreachPartition(new VoidFunction<Iterator<Spans>>() { 33 spansJavaRDD.foreachPartition(new VoidFunction<Iterator<Spans>>() {
35 @Override 34 @Override
36 public void call(Iterator<Spans> spansIterator) throws Exception { 35 public void call(Iterator<Spans> spansIterator) throws Exception {
37 - // HTable traceTable = null ;  
38 - HTable[] tables = new HTable[3];  
39 - int count = 0;  
40 - long begin = System.currentTimeMillis(); 36 +
  37 + HTable traceTable = null ;
  38 + int count = 0 ;
  39 + long begin = System.currentTimeMillis() ;
41 try { 40 try {
42 - for (int i = 0; i < 3; i++) {  
43 - tables[i] = (HTable) HBasePool.getConnection().getTable(TableName.valueOf("trace"));  
44 - tables[i].setWriteBufferSize(1024 * 1024 * 20);  
45 - tables[i].setAutoFlush(false, true);  
46 - logger.info("flush spans to hbase, count {}, elapse {}", count,System.currentTimeMillis() - begin);  
47 - }  
48 - while (spansIterator.hasNext()) {  
49 - int random=new Random().nextInt(3);  
50 - count = count + TraceSpanStore.store(spansIterator.next(), tables[random]);  
51 - }  
52 - for (int i = 0; i < 3; i++) {  
53 - tables[i].flushCommits(); 41 + traceTable = (HTable) HBasePool.getConnection().getTable(TableName.valueOf("trace"));
  42 + traceTable.setWriteBufferSize(1024 * 1024 * 6);
  43 + traceTable.setAutoFlush(false, true);
  44 +
  45 + while(spansIterator.hasNext()){
  46 + count = count + TraceSpanStore.store(spansIterator.next(), traceTable) ;
54 } 47 }
55 48
56 - } finally { 49 + traceTable.flushCommits();
  50 +
  51 + logger.info("flush spans to hbase, count {}, elapse {}", count, System.currentTimeMillis()-begin );
  52 +
  53 + }finally {
57 try { 54 try {
58 - for (HTable hTable : tables) {  
59 - if (hTable != null)  
60 - hTable.close();  
61 - } 55 + if(traceTable != null ) traceTable.close();
62 } catch (IOException e) { 56 } catch (IOException e) {
63 e.printStackTrace(); 57 e.printStackTrace();
64 } 58 }
@@ -67,5 +61,7 @@ public class TraceHbaseHandler implements TraceHandler, Serializable { @@ -67,5 +61,7 @@ public class TraceHbaseHandler implements TraceHandler, Serializable {
67 }); 61 });
68 } 62 }
69 }); 63 });
  64 +
  65 +
70 } 66 }
71 } 67 }
@@ -4,7 +4,6 @@ import java.io.Serializable; @@ -4,7 +4,6 @@ import java.io.Serializable;
4 import java.util.List; 4 import java.util.List;
5 5
6 import org.apache.commons.collections.CollectionUtils; 6 import org.apache.commons.collections.CollectionUtils;
7 -import org.apache.hadoop.hbase.client.Durability;  
8 import org.apache.hadoop.hbase.client.HTable; 7 import org.apache.hadoop.hbase.client.HTable;
9 import org.apache.hadoop.hbase.client.Put; 8 import org.apache.hadoop.hbase.client.Put;
10 import org.apache.hadoop.hbase.util.Bytes; 9 import org.apache.hadoop.hbase.util.Bytes;
@@ -46,7 +45,7 @@ public class TraceSpanStore implements Serializable { @@ -46,7 +45,7 @@ public class TraceSpanStore implements Serializable {
46 45
47 Put put = new Put(Bytes.toBytes(rowkey)) ; 46 Put put = new Put(Bytes.toBytes(rowkey)) ;
48 //其实不推荐关闭WAL,不过关了的确可以提升性能...因为HBase在写数据前会先写WAL,以保证在异常情况下,HBase可以按照WAL的记录来恢复还未持久化的数据。 47 //其实不推荐关闭WAL,不过关了的确可以提升性能...因为HBase在写数据前会先写WAL,以保证在异常情况下,HBase可以按照WAL的记录来恢复还未持久化的数据。
49 - put.setDurability(Durability.SKIP_WAL); 48 + //put.setDurability(Durability.SKIP_WAL);
50 put.addColumn(Bytes.toBytes("span"),Bytes.toBytes("service"),Bytes.toBytes( spans.getHost().getServiceName() )) ; 49 put.addColumn(Bytes.toBytes("span"),Bytes.toBytes("service"),Bytes.toBytes( spans.getHost().getServiceName() )) ;
51 put.addColumn(Bytes.toBytes("span"),Bytes.toBytes("event"),Bytes.toBytes( logEvent )) ; 50 put.addColumn(Bytes.toBytes("span"),Bytes.toBytes("event"),Bytes.toBytes( logEvent )) ;
52 put.addColumn(Bytes.toBytes("span"),Bytes.toBytes("ip"),Bytes.toBytes( spans.getHost().getAddress() )) ; 51 put.addColumn(Bytes.toBytes("span"),Bytes.toBytes("ip"),Bytes.toBytes( spans.getHost().getAddress() )) ;