正在显示
9 个修改的文件
包含
283 行增加
和
9 行删除
| @@ -161,6 +161,13 @@ message-bus: | @@ -161,6 +161,13 @@ message-bus: | ||
| 161 | password: 111111 | 161 | password: 111111 |
| 162 | #心跳间隔时间默认10秒,单位毫秒 | 162 | #心跳间隔时间默认10秒,单位毫秒 |
| 163 | heartbit-interval: 10000 | 163 | heartbit-interval: 10000 |
| 164 | + consumer-group-id: HYYWGroup | ||
| 165 | +kafka: | ||
| 166 | + bootstrap-servers: 192.168.1.73:32771 | ||
| 167 | + consumer: | ||
| 168 | + properties: | ||
| 169 | + security: | ||
| 170 | + protocol: PLAINTEXT://192.168.1.73:32771 | ||
| 164 | info: | 171 | info: |
| 165 | version: 1.0 | 172 | version: 1.0 |
| 166 | description: "消息总线-消息转发服务。[转发大数据小组消息到总线上]" | 173 | description: "消息总线-消息转发服务。[转发大数据小组消息到总线上]" |
| @@ -6,7 +6,7 @@ | @@ -6,7 +6,7 @@ | ||
| 6 | <packaging>jar</packaging> | 6 | <packaging>jar</packaging> |
| 7 | <groupId>com.tianbo</groupId> | 7 | <groupId>com.tianbo</groupId> |
| 8 | <artifactId>messagebus-trans-message</artifactId> | 8 | <artifactId>messagebus-trans-message</artifactId> |
| 9 | - <version>1.0-feign</version> | 9 | + <version>1.0-feign-kafka</version> |
| 10 | <description>消息转发服务</description> | 10 | <description>消息转发服务</description> |
| 11 | 11 | ||
| 12 | <parent> | 12 | <parent> |
| @@ -33,6 +33,12 @@ | @@ -33,6 +33,12 @@ | ||
| 33 | <groupId>org.springframework.cloud</groupId> | 33 | <groupId>org.springframework.cloud</groupId> |
| 34 | <artifactId>spring-cloud-starter-netflix-eureka-client</artifactId> | 34 | <artifactId>spring-cloud-starter-netflix-eureka-client</artifactId> |
| 35 | </dependency> | 35 | </dependency> |
| 36 | + <!-- 配置start kafka--> | ||
| 37 | + <dependency> | ||
| 38 | + <groupId>org.springframework.kafka</groupId> | ||
| 39 | + <artifactId>spring-kafka</artifactId> | ||
| 40 | + </dependency> | ||
| 41 | + <!-- 配置end kafka--> | ||
| 36 | <!--lombok--> | 42 | <!--lombok--> |
| 37 | <dependency> | 43 | <dependency> |
| 38 | <groupId>org.projectlombok</groupId> | 44 | <groupId>org.projectlombok</groupId> |
| @@ -29,7 +29,7 @@ public class MessageTransApplication { | @@ -29,7 +29,7 @@ public class MessageTransApplication { | ||
| 29 | @Bean | 29 | @Bean |
| 30 | public TaskScheduler taskScheduler() { | 30 | public TaskScheduler taskScheduler() { |
| 31 | ThreadPoolTaskScheduler taskScheduler = new ThreadPoolTaskScheduler(); | 31 | ThreadPoolTaskScheduler taskScheduler = new ThreadPoolTaskScheduler(); |
| 32 | - taskScheduler.setPoolSize(10); | 32 | + taskScheduler.setPoolSize(3); |
| 33 | return taskScheduler; | 33 | return taskScheduler; |
| 34 | } | 34 | } |
| 35 | 35 |
| 1 | +package com.tianbo.messagebus.config; | ||
| 2 | + | ||
| 3 | +import org.apache.kafka.clients.consumer.ConsumerConfig; | ||
| 4 | +import org.springframework.beans.factory.annotation.Value; | ||
| 5 | +import org.springframework.context.annotation.Bean; | ||
| 6 | +import org.springframework.context.annotation.Configuration; | ||
| 7 | +import org.springframework.kafka.annotation.EnableKafka; | ||
| 8 | +import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; | ||
| 9 | +import org.springframework.kafka.config.KafkaListenerContainerFactory; | ||
| 10 | +import org.springframework.kafka.core.ConsumerFactory; | ||
| 11 | +import org.springframework.kafka.core.DefaultKafkaConsumerFactory; | ||
| 12 | +import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; | ||
| 13 | + | ||
| 14 | +import java.util.HashMap; | ||
| 15 | +import java.util.Map; | ||
| 16 | + | ||
| 17 | + | ||
| 18 | +@Configuration | ||
| 19 | +@EnableKafka | ||
| 20 | +public class KafkaConsumerConfig { | ||
| 21 | + | ||
| 22 | + @Value("${kafka.bootstrap-servers}") | ||
| 23 | + private String servers; | ||
| 24 | +// @Value("${kafka.producer.enable-auto-commit}") | ||
| 25 | +// private boolean enableAutoCommit; | ||
| 26 | +// @Value("${kafka.consumer.session-timeout}") | ||
| 27 | +// private String sessionTimeout; | ||
| 28 | +// @Value("${kafka.producer.auto-commit-interval}") | ||
| 29 | +// private String autoCommitInterval; | ||
| 30 | + //@Value("${kafka.producer.auto-offset-reset}") | ||
| 31 | + // private String autoOffsetReset; | ||
| 32 | +// @Value("${kafka.consumer.concurrency}") | ||
| 33 | +// private int concurrency; | ||
| 34 | + //private static Map<String, Object> propsMap; | ||
| 35 | + @Bean | ||
| 36 | + public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory() { | ||
| 37 | + ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>(); | ||
| 38 | + factory.setConsumerFactory(consumerFactory()); | ||
| 39 | + factory.setConcurrency(10); | ||
| 40 | + factory.setBatchListener(true); | ||
| 41 | + factory.getContainerProperties().setPollTimeout(1500); | ||
| 42 | + //配置手动提交offset | ||
| 43 | + return factory; | ||
| 44 | + } | ||
| 45 | + | ||
| 46 | + public ConsumerFactory<String, String> consumerFactory() { | ||
| 47 | + return new DefaultKafkaConsumerFactory<>(consumerConfigs()); | ||
| 48 | + } | ||
| 49 | + | ||
| 50 | + | ||
| 51 | + public Map<String, Object> consumerConfigs() { | ||
| 52 | + //if(propsMap==null){ | ||
| 53 | + Map<String, Object> propsMap = new HashMap<>(); | ||
| 54 | + propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, servers); | ||
| 55 | + propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false); | ||
| 56 | +// propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 100); | ||
| 57 | + propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 10000); | ||
| 58 | + propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer"); | ||
| 59 | + propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer"); | ||
| 60 | + //propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, "test"); | ||
| 61 | + propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); | ||
| 62 | + propsMap.put(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG,1048576*5); | ||
| 63 | + propsMap.put(ConsumerConfig.CONNECTIONS_MAX_IDLE_MS_CONFIG,60000); | ||
| 64 | + propsMap.put(ConsumerConfig.FETCH_MIN_BYTES_CONFIG,50); | ||
| 65 | + propsMap.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG,400); | ||
| 66 | + propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 20); | ||
| 67 | + propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 10*60*1000); | ||
| 68 | + //} | ||
| 69 | + return propsMap; | ||
| 70 | + } | ||
| 71 | + | ||
| 72 | +// @Bean | ||
| 73 | +// public Listener listener() { | ||
| 74 | +// return new Listener(); | ||
| 75 | +// } | ||
| 76 | + | ||
| 77 | +} | ||
| 78 | + |
| 1 | +package com.tianbo.messagebus.kafka; | ||
| 2 | + | ||
| 3 | + | ||
| 4 | +import lombok.extern.slf4j.Slf4j; | ||
| 5 | +import org.apache.kafka.clients.consumer.KafkaConsumer; | ||
| 6 | + | ||
| 7 | +import java.util.HashMap; | ||
| 8 | +import java.util.Map; | ||
| 9 | + | ||
| 10 | +/** | ||
| 11 | + * 消费者缓存类 | ||
| 12 | + */ | ||
| 13 | +@Slf4j | ||
| 14 | +public class ConsumersCache { | ||
| 15 | + public static Map<String, KafkaConsumer<String, String>> consumerMap; | ||
| 16 | + | ||
| 17 | + public static Map<String, KafkaConsumer<String, String>> getConsumerMap() { | ||
| 18 | + if (consumerMap !=null){ | ||
| 19 | + return consumerMap; | ||
| 20 | + } | ||
| 21 | + log.trace("初始化消费者缓存"); | ||
| 22 | + consumerMap = new HashMap<String, KafkaConsumer<String, String>>(); | ||
| 23 | + return consumerMap; | ||
| 24 | + } | ||
| 25 | +} |
| @@ -8,6 +8,7 @@ import org.springframework.web.bind.annotation.*; | @@ -8,6 +8,7 @@ import org.springframework.web.bind.annotation.*; | ||
| 8 | import java.util.List; | 8 | import java.util.List; |
| 9 | 9 | ||
| 10 | @FeignClient(name = "kafka-server-producer", | 10 | @FeignClient(name = "kafka-server-producer", |
| 11 | + url = "http://127.0.0.1:8080/", | ||
| 11 | fallback = KafkaSendFallback.class ) | 12 | fallback = KafkaSendFallback.class ) |
| 12 | public interface KafkaSendApi { | 13 | public interface KafkaSendApi { |
| 13 | 14 |
| @@ -6,7 +6,6 @@ import com.tianbo.messagebus.model.MSGS; | @@ -6,7 +6,6 @@ import com.tianbo.messagebus.model.MSGS; | ||
| 6 | import lombok.extern.slf4j.Slf4j; | 6 | import lombok.extern.slf4j.Slf4j; |
| 7 | import org.springframework.stereotype.Service; | 7 | import org.springframework.stereotype.Service; |
| 8 | 8 | ||
| 9 | -import java.util.List; | ||
| 10 | 9 | ||
| 11 | @Slf4j | 10 | @Slf4j |
| 12 | @Service("myKafkaSend") | 11 | @Service("myKafkaSend") |
| @@ -14,7 +13,6 @@ public class KafkaSendFallback implements KafkaSendApi { | @@ -14,7 +13,6 @@ public class KafkaSendFallback implements KafkaSendApi { | ||
| 14 | 13 | ||
| 15 | @Override | 14 | @Override |
| 16 | public ResultJson send(MSGS msgs) { | 15 | public ResultJson send(MSGS msgs) { |
| 17 | - log.info("发送消息失败"); | ||
| 18 | - return new ResultJson<>("400","发送消息失败"); | 16 | + return new ResultJson<>("10400","发送消息失败"); |
| 19 | } | 17 | } |
| 20 | } | 18 | } |
| 1 | +package com.tianbo.messagebus.service; | ||
| 2 | + | ||
| 3 | +import com.alibaba.fastjson.JSON; | ||
| 4 | +import com.alibaba.fastjson.JSONObject; | ||
| 5 | +import com.tianbo.messagebus.config.KafkaConsumerConfig; | ||
| 6 | +import com.tianbo.messagebus.controller.response.ResultJson; | ||
| 7 | +import com.tianbo.messagebus.kafka.ConsumersCache; | ||
| 8 | +import com.tianbo.messagebus.model.HEADER; | ||
| 9 | +import com.tianbo.messagebus.model.MSG; | ||
| 10 | +import com.tianbo.messagebus.model.MSGS; | ||
| 11 | +import com.tianbo.messagebus.myinterface.KafkaSendApi; | ||
| 12 | +import lombok.extern.slf4j.Slf4j; | ||
| 13 | +import org.apache.commons.lang.StringUtils; | ||
| 14 | +import org.apache.kafka.clients.consumer.ConsumerConfig; | ||
| 15 | +import org.apache.kafka.clients.consumer.ConsumerRecord; | ||
| 16 | +import org.apache.kafka.clients.consumer.ConsumerRecords; | ||
| 17 | +import org.apache.kafka.clients.consumer.KafkaConsumer; | ||
| 18 | +import org.springframework.beans.factory.annotation.Autowired; | ||
| 19 | +import org.springframework.beans.factory.annotation.Value; | ||
| 20 | +import org.springframework.scheduling.annotation.Scheduled; | ||
| 21 | +import org.springframework.stereotype.Service; | ||
| 22 | + | ||
| 23 | +import java.time.Duration; | ||
| 24 | +import java.util.Arrays; | ||
| 25 | +import java.util.Map; | ||
| 26 | + | ||
| 27 | +@Service | ||
| 28 | +@Slf4j | ||
| 29 | +public class KafkaReadProcessor { | ||
| 30 | + | ||
| 31 | + /** | ||
| 32 | + * 账号名/TOPIC名称 | ||
| 33 | + */ | ||
| 34 | + @Value("${message-bus.auth.username}") | ||
| 35 | + private String userName; | ||
| 36 | + | ||
| 37 | + @Value("${message-bus.consumer-group-id}") | ||
| 38 | + private String groupName; | ||
| 39 | + | ||
| 40 | + @Autowired | ||
| 41 | + KafkaSendApi kafkaSendApi; | ||
| 42 | + | ||
| 43 | + @Value("${kafka.bootstrap-servers}") | ||
| 44 | + private String servers; | ||
| 45 | + /** | ||
| 46 | + * 失败重发请求次数 | ||
| 47 | + */ | ||
| 48 | + private static final int RETRY_TIMES= 10; | ||
| 49 | + | ||
| 50 | + @Scheduled(fixedRate = 6000) | ||
| 51 | + public void msgProcess(){ | ||
| 52 | + try{ | ||
| 53 | + if (StringUtils.isNotEmpty(userName) && StringUtils.isNotEmpty(groupName)){ | ||
| 54 | + log.info("1.【开始】用[{}]组读取topic[{}]->",groupName,userName); | ||
| 55 | + Map<String, Object> map=new KafkaConsumerConfig().consumerConfigs(); | ||
| 56 | + map.put(ConsumerConfig.GROUP_ID_CONFIG, groupName); | ||
| 57 | + map.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,servers); | ||
| 58 | + log.info("----2.消费者组为:{}----",groupName); | ||
| 59 | + | ||
| 60 | + //针对三个partition创建三个消费者,并缓存 | ||
| 61 | + for (int i = 1; i <=3 ; i++) { | ||
| 62 | + KafkaConsumer<String, String> consumer; | ||
| 63 | + String consumerName = userName+"-"+i; | ||
| 64 | + if (ConsumersCache.getConsumerMap().containsKey(consumerName)){ | ||
| 65 | + consumer = ConsumersCache.consumerMap.get(consumerName); | ||
| 66 | + log.info("[loop-start]3.从缓存中获取到消费者:{}的消费者信息[{}]。",consumerName,consumer); | ||
| 67 | + }else { | ||
| 68 | + map.put(ConsumerConfig.CLIENT_ID_CONFIG,consumerName); | ||
| 69 | + consumer =new KafkaConsumer<String, String>(map); | ||
| 70 | + ConsumersCache.consumerMap.put(consumerName,consumer); | ||
| 71 | + log.info("3.缓存中没有消费者{}的信息,创建新的消费者信息",consumerName); | ||
| 72 | + } | ||
| 73 | + | ||
| 74 | + consumer.subscribe(Arrays.asList(userName)); | ||
| 75 | + ConsumerRecords<String, String> records = consumer.poll(Duration.ofSeconds(3)); | ||
| 76 | + log.info("----4.消费者:{}此次成功消费数据{}条----",consumerName,records.count()); | ||
| 77 | + | ||
| 78 | + if(!records.isEmpty()){ | ||
| 79 | + for (ConsumerRecord<String, String> record : records) { | ||
| 80 | + String msg = record.value(); | ||
| 81 | + log.info("[loop-start]5.开始处理消息{}",msg); | ||
| 82 | + MSGS msgs = transMsg(msg); | ||
| 83 | + | ||
| 84 | + boolean sendResult = sendmsg(msgs); | ||
| 85 | + if(!sendResult){ | ||
| 86 | + log.error("!!!!!!消息--->{}<---转发失败!!!!!!,尝试重发",msg); | ||
| 87 | + //todo:消息备份或者重发? | ||
| 88 | + reTrySend(msgs); | ||
| 89 | + } | ||
| 90 | + | ||
| 91 | + } | ||
| 92 | + consumer.commitSync(); | ||
| 93 | + log.info("5.消费者{}-{}消费提交成功",consumerName,groupName); | ||
| 94 | + | ||
| 95 | + }else { | ||
| 96 | + log.info("----[END]5.消费者的TOPIC没有新的消费数据即将返回----"); | ||
| 97 | + } | ||
| 98 | + } | ||
| 99 | + | ||
| 100 | + } | ||
| 101 | + | ||
| 102 | + }catch (Exception e){ | ||
| 103 | + e.printStackTrace(); | ||
| 104 | + } | ||
| 105 | + | ||
| 106 | + } | ||
| 107 | + | ||
| 108 | + public MSGS transMsg(String msg){ | ||
| 109 | + JSONObject rootJson = JSON.parseObject(msg); | ||
| 110 | + JSONObject msgJson = rootJson.getJSONObject("MSG"); | ||
| 111 | + JSONObject body = msgJson.getJSONObject("BODY"); | ||
| 112 | + | ||
| 113 | + HEADER msgHeader = msgJson.getObject("HEADER",HEADER.class); | ||
| 114 | + msgHeader.setSNDR(userName); | ||
| 115 | + | ||
| 116 | + MSG transMsg= new MSG(); | ||
| 117 | + String transBody = body.toJSONString(); | ||
| 118 | + transMsg.setHEADER(msgHeader); | ||
| 119 | + transMsg.setBODY(transBody); | ||
| 120 | + | ||
| 121 | + MSGS msgs = new MSGS(); | ||
| 122 | + msgs.setMSG(transMsg); | ||
| 123 | + return msgs; | ||
| 124 | + } | ||
| 125 | + | ||
| 126 | + public boolean sendmsg(MSGS msgs){ | ||
| 127 | + ResultJson response = kafkaSendApi.send(msgs); | ||
| 128 | + | ||
| 129 | + if ("200".equals(response.getCode())){ | ||
| 130 | + log.info("………………6-消息发送成功{}………………",response.toString()); | ||
| 131 | + return true; | ||
| 132 | + } | ||
| 133 | + log.info("400-消息发送失败->{}",response.toString()); | ||
| 134 | + return false; | ||
| 135 | + } | ||
| 136 | + | ||
| 137 | + /** | ||
| 138 | + * feign重发消息 | ||
| 139 | + */ | ||
| 140 | + public void reTrySend(MSGS msgs){ | ||
| 141 | + log.error("***进入重发***"); | ||
| 142 | + for (int i = 0; i < RETRY_TIMES; i++) { | ||
| 143 | + boolean sendResult = sendmsg(msgs); | ||
| 144 | + if (sendResult){ | ||
| 145 | + log.error("***重发成功***"); | ||
| 146 | + break; | ||
| 147 | + } | ||
| 148 | + } | ||
| 149 | + log.error("***已尝试重发>>>{}<<<次,重发失败***",RETRY_TIMES); | ||
| 150 | + } | ||
| 151 | +} |
| @@ -81,7 +81,7 @@ public class MessageBusProcessor { | @@ -81,7 +81,7 @@ public class MessageBusProcessor { | ||
| 81 | /** | 81 | /** |
| 82 | * 失败重发请求次数 | 82 | * 失败重发请求次数 |
| 83 | */ | 83 | */ |
| 84 | - private static final int RETRY_TIMES= 100; | 84 | + private static final int RETRY_TIMES= 10; |
| 85 | 85 | ||
| 86 | /** | 86 | /** |
| 87 | * HTTP请求框架 | 87 | * HTTP请求框架 |
| @@ -399,12 +399,14 @@ public class MessageBusProcessor { | @@ -399,12 +399,14 @@ public class MessageBusProcessor { | ||
| 399 | /** | 399 | /** |
| 400 | * feigin从服务直接获取消息 | 400 | * feigin从服务直接获取消息 |
| 401 | */ | 401 | */ |
| 402 | - @Scheduled(fixedRate = 1000) | 402 | +// @Scheduled(fixedRate = 6000) |
| 403 | public void getDataFromFeigin(){ | 403 | public void getDataFromFeigin(){ |
| 404 | 404 | ||
| 405 | + try{ | ||
| 406 | + | ||
| 405 | log.info("1-开始执行获取任务"); | 407 | log.info("1-开始执行获取任务"); |
| 406 | ResultJson listResultJson = kafkaReciveApi.recive("HYYW"); | 408 | ResultJson listResultJson = kafkaReciveApi.recive("HYYW"); |
| 407 | - List<String> dataList = new ArrayList<>(); | 409 | + List dataList = new ArrayList<>(); |
| 408 | if(listResultJson.getData() instanceof List){ | 410 | if(listResultJson.getData() instanceof List){ |
| 409 | dataList = (List) listResultJson.getData(); | 411 | dataList = (List) listResultJson.getData(); |
| 410 | } | 412 | } |
| @@ -412,7 +414,7 @@ public class MessageBusProcessor { | @@ -412,7 +414,7 @@ public class MessageBusProcessor { | ||
| 412 | if ("200".equals(listResultJson.getCode()) && listResultJson.getData()!=null && dataList.size()>0){ | 414 | if ("200".equals(listResultJson.getCode()) && listResultJson.getData()!=null && dataList.size()>0){ |
| 413 | log.info("3-开始处理获取数据"); | 415 | log.info("3-开始处理获取数据"); |
| 414 | for (int i = 0; i <dataList.size() ; i++) { | 416 | for (int i = 0; i <dataList.size() ; i++) { |
| 415 | - String msg = dataList.get(i); | 417 | + String msg = ((List<String>) dataList).get(i); |
| 416 | log.info("4-循环处理消息[{}]--->{}<---",i,msg); | 418 | log.info("4-循环处理消息[{}]--->{}<---",i,msg); |
| 417 | JSONObject rootJson = JSON.parseObject(msg); | 419 | JSONObject rootJson = JSON.parseObject(msg); |
| 418 | JSONObject msgJson = rootJson.getJSONObject("MSG"); | 420 | JSONObject msgJson = rootJson.getJSONObject("MSG"); |
| @@ -435,6 +437,12 @@ public class MessageBusProcessor { | @@ -435,6 +437,12 @@ public class MessageBusProcessor { | ||
| 435 | } | 437 | } |
| 436 | } | 438 | } |
| 437 | 439 | ||
| 440 | + }catch (Exception e){ | ||
| 441 | + log.error("000-获取消息出错{}",e.toString()); | ||
| 442 | + e.printStackTrace(); | ||
| 443 | + } | ||
| 444 | + | ||
| 445 | + | ||
| 438 | } | 446 | } |
| 439 | 447 | ||
| 440 | /** | 448 | /** |
-
请 注册 或 登录 后发表评论