作者 xudada

Merge remote-tracking branch 'origin/kafka' into kafka

... ... @@ -48,14 +48,12 @@ public class KafkaInitialConfiguration {
public KafkaConsumer<String, String> consumer(){
Properties props = new Properties();
props.put("bootstrap.servers", ServerListForMap());
props.put("group.id", "test");
props.put("group.id", "systemGroup");
props.put("enable.auto.commit", "true");
props.put("auto.offset.reset", "earliest");
props.put("auto.commit.interval.ms", "1000");
props.put("auto.commit.interval.ms", "1000");
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
return consumer;
return new KafkaConsumer<>(props);
}
}
... ...
... ... @@ -7,6 +7,7 @@ import com.sunyo.wlpt.message.bus.service.response.ResultJson;
import com.sunyo.wlpt.message.bus.service.service.BusQueueService;
import com.sunyo.wlpt.message.bus.service.service.KafkaService;
import com.sunyo.wlpt.message.bus.service.service.UserInfoService;
import org.apache.commons.lang.StringUtils;
import org.apache.kafka.clients.admin.AdminClient;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
... ... @@ -201,4 +202,12 @@ public class BusQueueController {
return new ResultJson<List<ConsumerGroupOffsets>>("200","success",result);
}
@PostMapping("clean")
public ResultJson clean(@RequestBody BusQueue busQueue){
if (StringUtils.isNotBlank(busQueue.getQueueName())){
boolean result = kafkaService.delTopicPartitionMessage(busQueue.getQueueName());
return result ? new ResultJson("200","success"):new ResultJson("400","faild");
}
return new ResultJson("400","缺少topic信息");
}
}
... ...
... ... @@ -8,11 +8,26 @@ import java.util.concurrent.ExecutionException;
public interface KafkaService {
/**
* 增加一个TOPIC
* @param TopicName
* @param partitionNum
* @return
*/
boolean addTopic(String TopicName,int partitionNum);
public void updateAdminclient();
//by xyh
boolean ediPartition(BusQueue record)throws ExecutionException, InterruptedException;
/**
* topic信息消费状况监控监控
* @return
*/
List<ConsumerGroupOffsets> queueMonitor();
/**
* 清空topic里面的消息
*/
boolean delTopicPartitionMessage(String topicName);
}
... ...
... ... @@ -5,16 +5,17 @@ import com.sunyo.wlpt.message.bus.service.domain.BusQueue;
import com.sunyo.wlpt.message.bus.service.domain.BusServer;
import com.sunyo.wlpt.message.bus.service.mapper.BusServerMapper;
import com.sunyo.wlpt.message.bus.service.mapper.ConsumerGroupMapper;
import com.sunyo.wlpt.message.bus.service.model.ConsumerGroup;
import com.sunyo.wlpt.message.bus.service.model.ConsumerGroupOffsets;
import com.sunyo.wlpt.message.bus.service.service.KafkaService;
import kafka.tools.ConsoleConsumer;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.admin.*;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.TopicPartitionReplica;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaAdmin;
import org.springframework.stereotype.Service;
... ... @@ -188,4 +189,47 @@ public class KafkaServiceImp implements KafkaService {
return KAFKA_SERVERS;
}
@Override
public boolean delTopicPartitionMessage(String topic) {
Map<TopicPartition, RecordsToDelete> recordsToDeleteMap = new HashMap<TopicPartition, RecordsToDelete>(16);
DeleteRecordsResult deleteRecordsResult = null;
//获取topic的partition信息
Map<Integer, Long> partitionInfoMap = getPartitionsForTopic(topic);
for (Map.Entry<Integer, Long> entry2 : partitionInfoMap.entrySet()) {
TopicPartition topicPartition = new TopicPartition(topic, (int)entry2.getKey());
RecordsToDelete recordsToDelete = RecordsToDelete.beforeOffset((long)entry2.getValue());
recordsToDeleteMap.put(topicPartition, recordsToDelete);
}
deleteRecordsResult = KAFKA_ADMIN_CLIENT.deleteRecords((Map)recordsToDeleteMap);
Map<TopicPartition, KafkaFuture<DeletedRecords>> lowWatermarks = (Map<TopicPartition, KafkaFuture<DeletedRecords>>)deleteRecordsResult.lowWatermarks();
lowWatermarks.entrySet().forEach(entry -> {
try {
log.info("删除信息:TOPIC-{},partition-{},lastoffset-{},删除结果-{}", entry.getKey().topic(),
entry.getKey().partition(),
((DeletedRecords)((KafkaFuture)entry.getValue()).get()).lowWatermark(),
((KafkaFuture)entry.getValue()).isDone());
}
catch (InterruptedException | ExecutionException ex2) {
log.error(ex2.toString());
}
});
return true;
}
private Map<Integer, Long> getPartitionsForTopic(String topic) {
Collection<PartitionInfo> partitionInfos = (Collection<PartitionInfo>)KAFKA_CONSUMER.partitionsFor(topic);
List<TopicPartition> tp = new ArrayList<TopicPartition>();
List<TopicPartition> list = new ArrayList<>();
Map<Integer, Long> map = new HashMap<>(16);
partitionInfos.forEach(str -> {
list.add(new TopicPartition(topic, str.partition()));
KAFKA_CONSUMER.assign((Collection)list);
KAFKA_CONSUMER.seekToEnd((Collection)list);
map.put(str.partition(), KAFKA_CONSUMER.position(new TopicPartition(topic, str.partition())));
});
return map;
}
}
... ...