Elasticsearch的Java API的Cookbook

1、查询索引的数据量

CountResponse response = client.prepareCount(indexname)
		        .setQuery(QueryBuilders.matchAllQuery()).setTypes(indextype)
		        .execute()
		        .actionGet();


2、判断一个索引是否存在

client.admin().indices().prepareExists(indexName).execute().actionGet().isExists();


3、判断索引中同时具有两个字段A和B的文档的个数

MetricsAggregationBuilder aggregation =
		        AggregationBuilders
		                .cardinality("agg").script("doc['A'].value + '#' + doc['B'].value");

4、设置布尔值查询条件,设置查询A、B、C三个条件,同时,设置过滤器,将时间过滤在dTFrom和dtTo的范围内

SearchRequestBuilder srb = client.prepareSearch(actual_index_arr).setTypes(indexType);
		
		
		//添加查询条件
		BoolQueryBuilder boolQuery = new BoolQueryBuilder();
		if(A!= null)
			boolQuery.must(QueryBuilders.matchQuery("A", A));
		if(B!= null)
			boolQuery.must(QueryBuilders.matchQuery("B", B));
		if(C!= null)
			boolQuery.must(QueryBuilders.matchQuery("C", C));		
		srb.setQuery(QueryBuilders.filteredQuery(boolQuery,
                FilterBuilders.rangeFilter("WRITETIME").from(dtFrom).to(dtTo))).addField("_id");//only return id

5、创建索引并设置文档集合的mapping

		if(isIndexExist(client, indexName) == false)
			client.admin().indices().prepareCreate(indexName).execute().actionGet();
		
		//read mapping
		BufferedReader br = new BufferedReader(new FileReader(file));
		String tmp = "";
		StringBuilder sb = new StringBuilder();
		while((tmp = br.readLine()) != null)
		{
			sb.append(tmp);
		}
		br.close();
		String ct = sb.toString();
		
		//make mapping
		client.admin().indices().preparePutMapping(indexName).setType(indexType).setSource(ct).execute().actionGet();	

6、嵌套聚集查询求和,类似 select k1,k2,sum(v) from t group by k1,k2

//设置group by k1, k2和sum(v)的聚集
TermsBuilder tb = AggregationBuilders.terms("k1BUILDER").field("k1").size(0);//
tb.subAggregation(AggregationBuilders.terms("k2BUILDER").field("k2").size(0).subAggregation(AggregationBuilders.sum("sum_by_value").field("v")));
srb.addAggregation(tb);		

//获得结果
SearchResponse sr = srb.get();		

//处理结果		
Terms agg = sr.getAggregations().get("k1BUILDER");		
//返回的聚集数据
for(Terms.Bucket entry : agg.getBuckets())
{			
	Terms sub_agg = entry.getAggregations().get("k2BUILDER");
	String eid_key = entry.getKey();   
	for(Terms.Bucket sub_entry : sub_agg.getBuckets())
	{	
		String k2_key = sub_entry.getKey();// Key		    
		Aggregations agg_its = sub_entry.getAggregations();
		Sum sm = agg_its.get("sum_by_value");
				
	}			
}

7、批量写入数据

BulkProcessor bulkProcessor = BulkProcessor.builder(
	client,  
	new BulkProcessor.Listener() {		            
		@Override
		public void beforeBulk(long executionId, BulkRequest request) {
			// TODO Auto-generated method stub
			
		}

		@Override
		public void afterBulk(long executionId,
				BulkRequest request, BulkResponse response) {
			// TODO Auto-generated method stub
			
		}

		@Override
		public void afterBulk(long executionId,
				BulkRequest request, Throwable failure) {
			// TODO Auto-generated method stub
			
		} 
	})
	.setBulkActions(bulkCacheSize) 	
	.setFlushInterval(TimeValue.timeValueSeconds(5)) 
	.setConcurrentRequests(1) 		       
	.build();
	
for(int i=0;i<json_lst.size();i++)
{						
	//使用记录里的ID作为主键,以保持数据的唯一性
	bulkProcessor.add(new IndexRequest(indexname, typename,  id_lst.get(i)).source(json_lst.get(i)));			
}
bulkProcessor.flush();
bulkProcessor.close();



你可能感兴趣的:(Elasticsearch的Java API的Cookbook)