作者 朱兆平

增加清除TOPIC消息

... ... @@ -48,7 +48,7 @@ public class KafkaInitialConfiguration {
public KafkaConsumer<String, String> consumer(){
Properties props = new Properties();
props.put("bootstrap.servers", ServerListForMap());
props.put("group.id", "test");
props.put("group.id", "systemGroup");
props.put("enable.auto.commit", "true");
props.put("auto.offset.reset", "earliest");
props.put("auto.commit.interval.ms", "1000");
... ...
... ... @@ -7,6 +7,7 @@ import com.sunyo.wlpt.message.bus.service.response.ResultJson;
import com.sunyo.wlpt.message.bus.service.service.BusQueueService;
import com.sunyo.wlpt.message.bus.service.service.KafkaService;
import com.sunyo.wlpt.message.bus.service.service.UserInfoService;
import org.apache.commons.lang.StringUtils;
import org.apache.kafka.clients.admin.AdminClient;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
... ... @@ -199,4 +200,12 @@ public class BusQueueController {
return new ResultJson<List<ConsumerGroupOffsets>>("200","success",result);
}
@PostMapping("clean")
public ResultJson clean(@RequestBody BusQueue busQueue){
if (StringUtils.isNotBlank(busQueue.getQueueName())){
boolean result = kafkaService.delTopicPartitionMessage(busQueue.getQueueName());
return result ? new ResultJson("200","success"):new ResultJson("400","faild");
}
return new ResultJson("400","缺少topic信息");
}
}
... ...
... ... @@ -6,9 +6,24 @@ import java.util.List;
public interface KafkaService {
/**
* 增加一个TOPIC
* @param TopicName
* @param partitionNum
* @return
*/
boolean addTopic(String TopicName,int partitionNum);
public void updateAdminclient();
/**
* topic信息消费状况监控监控
* @return
*/
List<ConsumerGroupOffsets> queueMonitor();
/**
* 清空topic里面的消息
*/
boolean delTopicPartitionMessage(String topicName);
}
... ...
... ... @@ -4,16 +4,17 @@ package com.sunyo.wlpt.message.bus.service.service.kafka;
import com.sunyo.wlpt.message.bus.service.domain.BusServer;
import com.sunyo.wlpt.message.bus.service.mapper.BusServerMapper;
import com.sunyo.wlpt.message.bus.service.mapper.ConsumerGroupMapper;
import com.sunyo.wlpt.message.bus.service.model.ConsumerGroup;
import com.sunyo.wlpt.message.bus.service.model.ConsumerGroupOffsets;
import com.sunyo.wlpt.message.bus.service.service.KafkaService;
import kafka.tools.ConsoleConsumer;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.admin.*;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.TopicPartitionReplica;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaAdmin;
import org.springframework.stereotype.Service;
... ... @@ -176,4 +177,47 @@ public class KafkaServiceImp implements KafkaService {
return KAFKA_SERVERS;
}
@Override
public boolean delTopicPartitionMessage(String topic) {
Map<TopicPartition, RecordsToDelete> recordsToDeleteMap = new HashMap<TopicPartition, RecordsToDelete>(16);
DeleteRecordsResult deleteRecordsResult = null;
//获取topic的partition信息
Map<Integer, Long> partitionInfoMap = getPartitionsForTopic(topic);
for (Map.Entry<Integer, Long> entry2 : partitionInfoMap.entrySet()) {
TopicPartition topicPartition = new TopicPartition(topic, (int)entry2.getKey());
RecordsToDelete recordsToDelete = RecordsToDelete.beforeOffset((long)entry2.getValue());
recordsToDeleteMap.put(topicPartition, recordsToDelete);
}
deleteRecordsResult = KAFKA_ADMIN_CLIENT.deleteRecords((Map)recordsToDeleteMap);
Map<TopicPartition, KafkaFuture<DeletedRecords>> lowWatermarks = (Map<TopicPartition, KafkaFuture<DeletedRecords>>)deleteRecordsResult.lowWatermarks();
lowWatermarks.entrySet().forEach(entry -> {
try {
log.info("删除信息:TOPIC-{},partition-{},lastoffset-{},删除结果-{}", entry.getKey().topic(),
entry.getKey().partition(),
((DeletedRecords)((KafkaFuture)entry.getValue()).get()).lowWatermark(),
((KafkaFuture)entry.getValue()).isDone());
}
catch (InterruptedException | ExecutionException ex2) {
log.error(ex2.toString());
}
});
return true;
}
private Map<Integer, Long> getPartitionsForTopic(String topic) {
Collection<PartitionInfo> partitionInfos = (Collection<PartitionInfo>)KAFKA_CONSUMER.partitionsFor(topic);
List<TopicPartition> tp = new ArrayList<TopicPartition>();
List<TopicPartition> list = new ArrayList<>();
Map<Integer, Long> map = new HashMap<>(16);
partitionInfos.forEach(str -> {
list.add(new TopicPartition(topic, str.partition()));
KAFKA_CONSUMER.assign((Collection)list);
KAFKA_CONSUMER.seekToEnd((Collection)list);
map.put(str.partition(), KAFKA_CONSUMER.position(new TopicPartition(topic, str.partition())));
});
return map;
}
}
... ...