alter 'test_record', METHOD => 'table_att', 'coprocessor' => 'hdfs:///hbase_es/hbase-observer-elasticsearch-1.0-SNAPSHOT-zcestestrecord.jar|org.eminem.hbase.observer.HbaseDataSyncEsObserver|1001|es_cluster=zcits,es_type=zcestestrecord,es_index=zcestestrecord,es_port=9100,es_host=master'
Hbase中的多个表同步到ES会串数据,什么意思? 比如说,同步Hbase中的A、B表到ES中A`、B`,A表的数据都到B`中了。造成这种错误的原因就是上述两个构件使用了静态的方法和属性。如何改正,就是都改为非静态的方法和类,用到该构件的时候实例化。代码如下:
public class EsClient {
// ElasticSearch的集群名称
private String clusterName;
// ElasticSearch的host
private String[] nodeHost;
// ElasticSearch的端口(Java API用的是Transport端口,也就是TCP)
private int nodePort;
private TransportClient client = null;
private static final Log LOG = LogFactory.getLog(EsClient.class);
/**
* get Es config
*
* @return
*/
public EsClient(String clusterName, String nodeHost, int nodePort) {
this.clusterName = clusterName;
this.nodeHost = nodeHost.split("-");
this.nodePort = nodePort;
this.client = initEsClient();
}
public String getInfo() {
List fields = new ArrayList();
try {
for (Field f : EsClient.class.getDeclaredFields()) {
fields.add(f.getName() + "=" + f.get(this));
}
} catch (IllegalAccessException ex) {
ex.printStackTrace();
}
return StringUtils.join(fields, ", ");
}
public String getOneNodeHost() {
if (this.nodeHost == null || this.nodeHost.length == 0) {
return "";
}
Random rand = new Random();
return nodeHost[rand.nextInt(this.nodeHost.length)];
}
/**
* init ES client
*/
public TransportClient initEsClient() {
LOG.info("---------- Init ES Client " + this.clusterName + " -----------");
TransportClient client = null;
Settings settings = Settings.builder().put("cluster.name", this.clusterName).put("client.transport.sniff", true).build();
try {
client = new PreBuiltTransportClient(settings).addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName(getOneNodeHost()), this.nodePort));
} catch (UnknownHostException e) {
e.printStackTrace();
}
return client;
}
public void repeatInitEsClient() {
this.client = initEsClient();
}
/**
* @return the clusterName
*/
public String getClusterName() {
return clusterName;
}
/**
* @param clusterName the clusterName to set
*/
public void setClusterName(String clusterName) {
this.clusterName = clusterName;
}
/**
* @return the nodePort
*/
public int getNodePort() {
return nodePort;
}
/**
* @param nodePort the nodePort to set
*/
public void setNodePort(int nodePort) {
this.nodePort = nodePort;
}
/**
* @return the client
*/
public TransportClient getClient() {
return client;
}
/**
* @param client the client to set
*/
public void setClient(TransportClient client) {
this.client = client;
}
}
ElasticSearchBulkOperator构件:
public class ElasticSearchBulkOperator {
private static final Log LOG = LogFactory.getLog(ElasticSearchBulkOperator.class);
private static final int MAX_BULK_COUNT = 5000;
private BulkRequestBuilder bulkRequestBuilder = null;
private Lock commitLock = new ReentrantLock();
private ScheduledExecutorService scheduledExecutorService = null;
private EsClient esClient = null;
public ElasticSearchBulkOperator(final EsClient esClient) {
LOG.info("----------------- Init Bulk Operator for Table: " + " ----------------");
this.esClient = esClient;
// init es bulkRequestBuilder
this.bulkRequestBuilder = esClient.getClient().prepareBulk();
// init thread pool and set size 1
this.scheduledExecutorService = Executors.newScheduledThreadPool(1);
// create beeper thread( it will be sync data to ES cluster)use a commitLock to protected bulk es as thread-save
Runnable beeper = new Runnable() {
@Override
public void run() {
commitLock.lock();
try {
LOG.info("Scheduled Thread start run for ");
bulkRequest(0);
} catch (Exception ex) {
LOG.error("Time Bulk " + " index error : " + ex.getMessage());
} finally {
commitLock.unlock();
}
}
};
// set beeper thread(15 second to delay first execution , 25 second period between successive executions)
scheduledExecutorService.scheduleAtFixedRate(beeper, 15, 25, TimeUnit.SECONDS);
}
/**
* shutdown time task immediately
*/
public void shutdownScheduEx() {
if (null != scheduledExecutorService && !scheduledExecutorService.isShutdown()) {
scheduledExecutorService.shutdown();
}
}
/**
* bulk request when number of builders is grate then threshold
*
* @param threshold
*/
public void bulkRequest(int threshold) {
int count = bulkRequestBuilder.numberOfActions();
if (bulkRequestBuilder.numberOfActions() > threshold) {
try {
LOG.info("Bulk Request Run " + ", the row count is: " + count);
BulkResponse bulkItemResponse = bulkRequestBuilder.execute().actionGet();
if (bulkItemResponse.hasFailures()) {
LOG.error("------------- Begin: Error Response Items of Bulk Requests to ES ------------");
LOG.error(bulkItemResponse.buildFailureMessage());
LOG.error("------------- End: Error Response Items of Bulk Requests to ES ------------");
}
bulkRequestBuilder = esClient.getClient().prepareBulk();
} catch (Exception e) {// two cause: 1. transport client is closed 2. None of the configured nodes are available
LOG.error(" Bulk Request " + " index error : " + e.getMessage());
LOG.error("Reconnect the ES server...");
List tempRequests = bulkRequestBuilder.request().requests();
esClient.getClient().close();
esClient.repeatInitEsClient();
bulkRequestBuilder = esClient.getClient().prepareBulk();
bulkRequestBuilder.request().add(tempRequests);
}
}
}
/**
* add update builder to bulk use commitLock to protected bulk as
* thread-save
*
* @param builder
*/
public void addUpdateBuilderToBulk(UpdateRequestBuilder builder) {
commitLock.lock();
try {
bulkRequestBuilder.add(builder);
bulkRequest(MAX_BULK_COUNT);
} catch (Exception ex) {
LOG.error(" Add Bulk index error : " + ex.getMessage());
} finally {
commitLock.unlock();
}
}
/**
* add delete builder to bulk use commitLock to protected bulk as
* thread-save
*
* @param builder
*/
public void addDeleteBuilderToBulk(DeleteRequestBuilder builder) {
commitLock.lock();
try {
bulkRequestBuilder.add(builder);
bulkRequest(MAX_BULK_COUNT);
} catch (Exception ex) {
LOG.error(" delete Bulk index error : " + ex.getMessage());
} finally {
commitLock.unlock();
}
}
}
LOG.error("Reconnect the ES server...");
List tempRequests = bulkRequestBuilder.request().requests();
esClient.getClient().close();
esClient.repeatInitEsClient();
bulkRequestBuilder = esClient.getClient().prepareBulk();
bulkRequestBuilder.request().add(tempRequests);