将CloudWatch Logs与Cloudhub Mule集成

在此博客中,我将解释如何为您的Mule CloudHub应用程序启用AWS Cloudwatch日志 。 AWS提供了Cloudwatch Logs Services,以便您可以更好地管理日志。 它比松散便宜。 由于cloudhub会自动翻转超过100 MB的日志,因此我们需要一种机制来更有效地管理日志。 为此,我们创建了这个自定义附加程序,它将日志发送到cloudwatch。

package com.javaroots.appenders;import static java.util.Comparator.comparing;
import static java.util.stream.Collectors.toList;import java.io.Serializable;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Formatter;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.core.Filter;
import org.apache.logging.log4j.core.Layout;
import org.apache.logging.log4j.core.LogEvent;
import org.apache.logging.log4j.core.appender.AbstractAppender;
import org.apache.logging.log4j.core.config.plugins.Plugin;
import org.apache.logging.log4j.core.config.plugins.PluginAttribute;
import org.apache.logging.log4j.core.config.plugins.PluginElement;
import org.apache.logging.log4j.core.config.plugins.PluginFactory;
import org.apache.logging.log4j.status.StatusLogger;import com.amazonaws.regions.Regions;
import com.amazonaws.services.logs.AWSLogs;
import com.amazonaws.services.logs.model.CreateLogGroupRequest;
import com.amazonaws.services.logs.model.CreateLogStreamRequest;
import com.amazonaws.services.logs.model.CreateLogStreamResult;
import com.amazonaws.services.logs.model.DataAlreadyAcceptedException;
import com.amazonaws.services.logs.model.DescribeLogGroupsRequest;
import com.amazonaws.services.logs.model.DescribeLogStreamsRequest;
import com.amazonaws.services.logs.model.InputLogEvent;
import com.amazonaws.services.logs.model.InvalidSequenceTokenException;
import com.amazonaws.services.logs.model.LogGroup;
import com.amazonaws.services.logs.model.LogStream;
import com.amazonaws.services.logs.model.PutLogEventsRequest;
import com.amazonaws.services.logs.model.PutLogEventsResult;@Plugin(name = "CLOUDW", category = "Core", elementType = "appender", printObject = true)
public class CloudwatchAppender extends AbstractAppender {/*** */private static final long serialVersionUID = 12321345L;private static Logger logger2 = LogManager.getLogger(CloudwatchAppender.class);private final Boolean DEBUG_MODE = System.getProperty("log4j.debug") != null;/*** Used to make sure that on close() our daemon thread isn't also trying to sendMessage()s*/private Object sendMessagesLock = new Object();/*** The queue used to buffer log entries*/private LinkedBlockingQueue loggingEventsQueue;/*** the AWS Cloudwatch Logs API client*/private AWSLogs awsLogsClient;private AtomicReference lastSequenceToken = new AtomicReference<>();/*** The AWS Cloudwatch Log group name*/private String logGroupName;/*** The AWS Cloudwatch Log stream name*/private String logStreamName;/*** The queue / buffer size*/private int queueLength = 1024;/*** The maximum number of log entries to send in one go to the AWS Cloudwatch Log service*/private int messagesBatchSize = 128;private AtomicBoolean cloudwatchAppenderInitialised = new AtomicBoolean(false);private CloudwatchAppender(final String name,final Layout layout,final Filter filter,final boolean ignoreExceptions,String logGroupName, String logStreamName,Integer queueLength,Integer messagesBatchSize) {super(name, filter, layout, ignoreExceptions);this.logGroupName = logGroupName;this.logStreamName = logStreamName;this.queueLength = queueLength;this.messagesBatchSize = messagesBatchSize;this.activateOptions();}@Overridepublic void append(LogEvent event) {if (cloudwatchAppenderInitialised.get()) {loggingEventsQueue.offer(event);} else {// just do nothing}}public void activateOptions() {if (isBlank(logGroupName) || isBlank(logStreamName)) {logger2.error("Could not initialise CloudwatchAppender because either or both LogGroupName(" + logGroupName + ") and LogStreamName(" + logStreamName + ") are null or empty");this.stop();} else {//below lines work with aws version 1.9.40 for local build//this.awsLogsClient = new AWSLogsClient();//awsLogsClient.setRegion(Region.getRegion(Regions.AP_SOUTHEAST_2));this.awsLogsClient = com.amazonaws.services.logs.AWSLogsClientBuilder.standard().withRegion(Regions.AP_SOUTHEAST_2).build();loggingEventsQueue = new LinkedBlockingQueue<>(queueLength);try {initializeCloudwatchResources();initCloudwatchDaemon();cloudwatchAppenderInitialised.set(true);} catch (Exception e) {logger2.error("Could not initialise Cloudwatch Logs for LogGroupName: " + logGroupName + " and LogStreamName: " + logStreamName, e);if (DEBUG_MODE) {System.err.println("Could not initialise Cloudwatch Logs for LogGroupName: " + logGroupName + " and LogStreamName: " + logStreamName);e.printStackTrace();}}}}private void initCloudwatchDaemon() {Thread t = new Thread(() -> {while (true) {try {if (loggingEventsQueue.size() > 0) {sendMessages();}Thread.currentThread().sleep(20L);} catch (InterruptedException e) {if (DEBUG_MODE) {e.printStackTrace();}}}});t.setName("CloudwatchThread");t.setDaemon(true);t.start();}private void sendMessages() {synchronized (sendMessagesLock) {LogEvent polledLoggingEvent;final Layout layout = getLayout();List loggingEvents = new ArrayList<>();try {while ((polledLoggingEvent = loggingEventsQueue.poll()) != null && loggingEvents.size() <= messagesBatchSize) {loggingEvents.add(polledLoggingEvent);}List inputLogEvents = loggingEvents.stream().map(loggingEvent -> new InputLogEvent().withTimestamp(loggingEvent.getTimeMillis()).withMessage(layout == null ?loggingEvent.getMessage().getFormattedMessage():new String(layout.toByteArray(loggingEvent), StandardCharsets.UTF_8))).sorted(comparing(InputLogEvent::getTimestamp)).collect(toList());if (!inputLogEvents.isEmpty()) {PutLogEventsRequest putLogEventsRequest = new PutLogEventsRequest(logGroupName,logStreamName,inputLogEvents);try {putLogEventsRequest.setSequenceToken(lastSequenceToken.get());PutLogEventsResult result = awsLogsClient.putLogEvents(putLogEventsRequest);lastSequenceToken.set(result.getNextSequenceToken());} catch (DataAlreadyAcceptedException dataAlreadyAcceptedExcepted) {putLogEventsRequest.setSequenceToken(dataAlreadyAcceptedExcepted.getExpectedSequenceToken());PutLogEventsResult result = awsLogsClient.putLogEvents(putLogEventsRequest);lastSequenceToken.set(result.getNextSequenceToken());if (DEBUG_MODE) {dataAlreadyAcceptedExcepted.printStackTrace();}} catch (InvalidSequenceTokenException invalidSequenceTokenException) {putLogEventsRequest.setSequenceToken(invalidSequenceTokenException.getExpectedSequenceToken());PutLogEventsResult result = awsLogsClient.putLogEvents(putLogEventsRequest);lastSequenceToken.set(result.getNextSequenceToken());if (DEBUG_MODE) {invalidSequenceTokenException.printStackTrace();}}}} catch (Exception e) {if (DEBUG_MODE) {logger2.error(" error inserting cloudwatch:",e);e.printStackTrace();}}}}private void initializeCloudwatchResources() {DescribeLogGroupsRequest describeLogGroupsRequest = new DescribeLogGroupsRequest();describeLogGroupsRequest.setLogGroupNamePrefix(logGroupName);Optional logGroupOptional = awsLogsClient.describeLogGroups(describeLogGroupsRequest).getLogGroups().stream().filter(logGroup -> logGroup.getLogGroupName().equals(logGroupName)).findFirst();if (!logGroupOptional.isPresent()) {CreateLogGroupRequest createLogGroupRequest = new CreateLogGroupRequest().withLogGroupName(logGroupName);awsLogsClient.createLogGroup(createLogGroupRequest);}DescribeLogStreamsRequest describeLogStreamsRequest = new DescribeLogStreamsRequest().withLogGroupName(logGroupName).withLogStreamNamePrefix(logStreamName);Optional logStreamOptional = awsLogsClient.describeLogStreams(describeLogStreamsRequest).getLogStreams().stream().filter(logStream -> logStream.getLogStreamName().equals(logStreamName)).findFirst();if (!logStreamOptional.isPresent()) {CreateLogStreamRequest createLogStreamRequest = new CreateLogStreamRequest().withLogGroupName(logGroupName).withLogStreamName(logStreamName);CreateLogStreamResult o = awsLogsClient.createLogStream(createLogStreamRequest);}}private boolean isBlank(String string) {return null == string || string.trim().length() == 0;}protected String getSimpleStacktraceAsString(final Throwable thrown) {final StringBuilder stackTraceBuilder = new StringBuilder();for (StackTraceElement stackTraceElement : thrown.getStackTrace()) {new Formatter(stackTraceBuilder).format("%s.%s(%s:%d)%n",stackTraceElement.getClassName(),stackTraceElement.getMethodName(),stackTraceElement.getFileName(),stackTraceElement.getLineNumber());}return stackTraceBuilder.toString();}@Overridepublic void start() {super.start();}@Overridepublic void stop() {super.stop();while (loggingEventsQueue != null && !loggingEventsQueue.isEmpty()) {this.sendMessages();}}@Overridepublic String toString() {return CloudwatchAppender.class.getSimpleName() + "{"+ "name=" + getName() + " loggroupName=" + logGroupName+" logstreamName=" + logStreamName;}@PluginFactory@SuppressWarnings("unused")public static CloudwatchAppender createCloudWatchAppender(@PluginAttribute(value = "queueLength" ) Integer queueLength,@PluginElement("Layout") Layout layout,@PluginAttribute(value = "logGroupName") String logGroupName,@PluginAttribute(value = "logStreamName") String logStreamName,@PluginAttribute(value = "name") String name,@PluginAttribute(value = "ignoreExceptions", defaultBoolean = false) Boolean ignoreExceptions,@PluginAttribute(value = "messagesBatchSize") Integer messagesBatchSize){return new CloudwatchAppender(name, layout, null, ignoreExceptions, logGroupName, logStreamName ,queueLength,messagesBatchSize);}
}

我们在pom.xml文件中添加依赖项。

<dependency><groupId>com.amazonaws</groupId><artifactId>aws-java-sdk-logs</artifactId><!-- for local 3.8.5 we need to use this version cloudhub 3.8.5 has jackson 2.6.6 --><!-- <version>1.9.40</version> --><version>1.11.105</version><exclusions><exclusion>  <!-- declare the exclusion here --><groupId>org.apache.logging.log4j</groupId><artifactId>log4j-1.2-api</artifactId></exclusion><exclusion>  <!-- declare the exclusion here --><groupId>com.fasterxml.jackson.core</groupId><artifactId>jackson-core</artifactId></exclusion><exclusion>  <!-- declare the exclusion here --><groupId>com.fasterxml.jackson.core</groupId><artifactId>jackson-databind</artifactId></exclusion></exclusions></dependency><!-- https://mvnrepository.com/artifact/org.apache.logging.log4j/log4j-api --><dependency><groupId>org.apache.logging.log4j</groupId><artifactId>log4j-api</artifactId><version>2.5</version></dependency><!-- https://mvnrepository.com/artifact/org.apache.logging.log4j/log4j-core --><dependency><groupId>org.apache.logging.log4j</groupId><artifactId>log4j-core</artifactId><version>2.5</version></dependency>

现在我们需要修改我们的log4j2.xml。 还要添加自定义cloudwatch附加程序和CloudhubLogs附加程序,以便我们也可以获取cloudhub上的日志。

<?xml version="1.0" encoding="utf-8"?>
<Configuration status="trace" packages="au.edu.vu.appenders,com.mulesoft.ch.logging.appender"><!--These are some of the loggers you can enable. There are several more you can find in the documentation. Besides this log4j configuration, you can also use Java VM environment variablesto enable other logs like network (-Djavax.net.debug=ssl or all) and Garbage Collector (-XX:+PrintGC). These will be append to the console, so you will see them in the mule_ee.log file. --><Appenders><CLOUDW name="CloudW" logGroupName="test-log-stream" logStreamName="test44" messagesBatchSize="${sys:cloudwatch.msg.batch.size}" queueLength="${sys:cloudwatch.queue.length}"><PatternLayout pattern="%d [%t] %-5p %c - %m%n"/></CLOUDW><Log4J2CloudhubLogAppender name="CLOUDHUB"addressProvider="com.mulesoft.ch.logging.DefaultAggregatorAddressProvider"applicationContext="com.mulesoft.ch.logging.DefaultApplicationContext"appendRetryIntervalMs="${sys:logging.appendRetryInterval}"appendMaxAttempts="${sys:logging.appendMaxAttempts}"batchSendIntervalMs="${sys:logging.batchSendInterval}"batchMaxRecords="${sys:logging.batchMaxRecords}"memBufferMaxSize="${sys:logging.memBufferMaxSize}"journalMaxWriteBatchSize="${sys:logging.journalMaxBatchSize}"journalMaxFileSize="${sys:logging.journalMaxFileSize}"clientMaxPacketSize="${sys:logging.clientMaxPacketSize}"clientConnectTimeoutMs="${sys:logging.clientConnectTimeout}"clientSocketTimeoutMs="${sys:logging.clientSocketTimeout}"serverAddressPollIntervalMs="${sys:logging.serverAddressPollInterval}"serverHeartbeatSendIntervalMs="${sys:logging.serverHeartbeatSendIntervalMs}"statisticsPrintIntervalMs="${sys:logging.statisticsPrintIntervalMs}"><PatternLayout pattern="[%d{MM-dd HH:mm:ss}] %-5p %c{1} [%t] CUSTOM: %m%n"/></Log4J2CloudhubLogAppender></Appenders><Loggers><!-- Http Logger shows wire traffic on DEBUG --><AsyncLogger name="org.mule.module.http.internal.HttpMessageLogger" level="WARN"/><!-- JDBC Logger shows queries and parameters values on DEBUG --><AsyncLogger name="com.mulesoft.mule.transport.jdbc" level="WARN"/><!-- CXF is used heavily by Mule for web services --><AsyncLogger name="org.apache.cxf" level="WARN"/><!-- Apache Commons tend to make a lot of noise which can clutter the log--><AsyncLogger name="org.apache" level="WARN"/><!-- Reduce startup noise --><AsyncLogger name="org.springframework.beans.factory" level="WARN"/><!-- Mule classes --><AsyncLogger name="org.mule" level="INFO"/><AsyncLogger name="com.mulesoft" level="INFO"/><!-- Reduce DM verbosity --><AsyncLogger name="org.jetel" level="WARN"/><AsyncLogger name="Tracking" level="WARN"/><AsyncRoot level="INFO"><AppenderRef ref="CLOUDHUB" level="INFO"/><AppenderRef ref="CloudW" level="INFO"/></AsyncRoot></Loggers>
</Configuration>

最后,我们需要在cloudhub运行时管理器上禁用cloudhub日志。

这适用于cloudhub mule运行时版本3.8.4。 cloudhub 3.8.5版本存在一些问题,该版本已正确初始化并发送日志,但是缺少事件和消息。

翻译自: https://www.javacodegeeks.com/2017/10/integrate-cloudwatch-logs-cloudhub-mule.html

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/news/349891.shtml

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

android网络重试机制,okhttp源码解析(四):重试机制

前言这一篇我们分析okhttp的重试机制&#xff0c;一般如果网络请求失败&#xff0c;我们会考虑连续请求多次&#xff0c;增大网络请求成功的概率&#xff0c;那么okhttp是怎么实现这个功能的呢&#xff1f;正文首先还是回到之前的InterceptorChain&#xff1a;Response getResp…

构造入门

构造死磕 什么是构造 小学中学奥数先用数学解决再编程实现的构造题一般算法无法解决\(NOI\)难度\(PJ-\)代码量坑构造举例 CF743C Vladik and fractions 题目让我们构造一组数字,满足\(\frac{2}{n} \frac{1}{x} \frac{1}{y} \frac{1}{z}\)第一眼看到就想到听老师讲了半天才知…

王成录华为鸿蒙系统,华为手机销量仍在增长!华为王成录:手机会是鸿蒙OS系统的中心...

【天极网手机频道】由于制裁&#xff0c;华为遭到前所未有的断供&#xff0c;外界有不少声音都在担心华为手机出货量将会出现暴跌。不过昨日华为开发者大会举办期间&#xff0c;华为消费者业务软件部总裁王成录表示&#xff0c;华为手机销量仍在增长中。昨天的华为开发者大会&a…

28线程

进程&#xff1a;计算机执行的任务 线程&#xff1a;执行任务中的小任务 多线程 计算机再执行过程中&#xff0c;再同一时间只能让cpu的一个核执行一个进程。进程有多个线程构成&#xff0c;再同一时刻Cpu只能处理一个线程。 引入多线程 当线程被cpu执行时cpu开始工作&#xff…

什么是javax.ws.rs.core.context? [第3部分]

如何使用Context批注 在什么是javax.ws.rs.core.context的第2部分中&#xff1f; 您学习了如何使用Context批注从SecurityContext类的注入实例检索安全信息&#xff0c;以及如何通过ResourceContext实例使用JAX-RS资源类。 在本文中&#xff0c;您将学习如何将Context批注与请…

html 字幕飘动效果,html 滚动字幕 制作滚动字幕效果 参数

制作滚动字幕效果&#xff1a;marquee标签 如下:&#xff1c;MARQUEE directionup height146 οnmοuseοutstart() οnmοuseοverstop() scrollAmount4&#xff1e; &#xff1c;/marquee&#xff1e;参数说明&#xff1a;direction滚动方向&#xff1a;up向上滚动&#xff0…

静态点分治总结

点分治是世界上最好的算法QwQ 点分治可以解决各种树上的边权点权问题&#xff0c;然后如果你发现这个题好像问的特别玄学&#xff0c;lca&#xff0c;树差都做不了&#xff0c;树上动‘龟’更做不了&#xff0c;只能暴力时&#xff0c;这个题大多数情况就是点分治了 点分治的思…

html节点上下移动,关于前端:数组元素上下移动

/*** 上、下挪动* param {number} code 下标* param {number} dir 1上移 0下移*/onMove(code, dir) {let moveComm (curIndex, nextIndex) > {let arr this.commodityInfoarr[curIndex] arr.splice(nextIndex, 1, arr[curIndex])[0]return arr}this.commodityInfo.some((…

mybatis插入数据后返回自增主键ID详解

1.场景介绍: ​ 开发过程中我们经常性的会用到许多的中间表,用于数据之间的对应和关联.这个时候我们关联最多的就是ID,我们在一张表中插入数据后级联增加到关联表中.我们熟知的mybatis在插入数据后返回的是插入成功的条数,那么这个时候我们想要得到相应的这条新增数据的ID,该怎…

spring 属性占位符_Spring属性占位符配置器–一些不太明显的选项

spring 属性占位符Spring的PropertySourcesPlaceholderConfigurer用于从XML或Java Config中定义的Spring bean定义外部化属性。 PlaceholderConfigurer支持的一些选项在文档中并不明显&#xff0c;但很有趣&#xff0c;并且可能有用。 首先&#xff0c;以Spring文档中的示例为…

红包雨效果html,js+css实现红包雨效果

//每一个红包都是相对于父元素定位&#xff0c;通过z-index来设置层级let zIndex 1;function bindEvent() {$redPackage.on(click,.js-RedPackageBox,function() {//拿到每个红包的数据const data $(this).data(txt);}}//生成mix-max的随机数function getRandom(min,max) {re…

Delphi下实现全屏快速找图找色 二、矩阵遍历

二、矩阵遍历  矩阵遍历是一个数据结构方面的问题。假设有一个矩阵Matrix&#xff0c;它共有RowCount行&#xff0c;每行有ColCount列&#xff0c;当利用y表示行数&#xff0c;x表示列数&#xff0c;那么利用Matrix[y,x]就可以访问矩阵中的任意元素。假设有一个1010大小的矩阵…

Eclipse MicroProfile:您需要了解的5件事

针对微服务架构优化企业Java Eclipse MicroProfile计划是在JavaOne 2016上发起的&#xff0c;JavaOne是服务器供应商和Java用户组的创意&#xff0c;目的是解决企业Java微服务领域的缺点。 Java EE的发布速度减慢到无法应对Swift发展的微服务趋势的挑战的程度。 MicroProfile通…

html 选择不能重复,人生,就是一次无法重复的选择(深度好文)

作者&#xff1a;彩云追月欧洲著名的政治家托马斯 莫尔说&#xff1a;“在人生中最艰难的是选择”。漫漫人生路&#xff0c;有无数的选择&#xff0c;不同的选择&#xff0c;可能会决定我们不同的人生道路。下面的故事也许对你有所启迪&#xff1a;一天&#xff0c;几个学生问…

用HTML做软件UI用到的的一些技术

做WEB开发的想把网页做成应用程序的界面&#xff0c;开发应用程序的又想把程序界面做得和WEB一样。本文介绍一下用HTML做软件UI用到的的一些技术。 其实HTML UI也不是什么新鲜事了&#xff0c;Norton Antivirus从几年前的版本就开始用了&#xff0c;vs.net2002中的开始页也用了…

html css导航栏字体图标,HTML+CSS入门之两种图标字体库

本篇教程介绍了HTMLCSS入门之两种图标字体库&#xff0c;希望阅读本篇文章以后大家有所收获&#xff0c;帮助大家HTMLCSS入门。<## 0. 前言比较基础的图标加载&#xff1a;和块元素的背景background: url(./x.png).页面多图标时&#xff0c;使用雪碧图(多个png压缩成一个png…

垃圾收集算法,垃圾收集器_弱,弱,最弱,利用专家参考来管理垃圾收集器

垃圾收集算法,垃圾收集器何时以及何时不使用Java中的专家引用 弱引用&#xff0c;软引用和幻像引用既危险又强大。 如果以错误的方式使用它们&#xff0c;则会破坏JVM性能。 但是&#xff0c;如果使用正确的方法&#xff0c;它们可以大大提高性能和程序清晰度。 弱引用和软引用…

ESP8266—“ICACHE_FLASH_ATTR”宏

问&#xff1a;ESP8266_NONOS_SDK中ICACHE_FLASH_ATTR宏的用途是什么&#xff1f;我看到它取决于ICACHE_FLASH&#xff0c;但我不知道何时应该定义该符号。什么时候需要包括它&#xff1f;答&#xff1a;对于ESP8266_NONOS_SDK&#xff0c;用ICACHE_FLASH_ATTR编译的函数编译到…

layui网页html编辑器,layui使用富文本编辑器

HTML代码&#xff1a;这里的原理是你输入的内容会经过处理插入到文本区域textarea中js代码&#xff1a;/*** 文本编辑器*/layui.use([form, layedit], function(){var layedit layui.layedit;//上传图片,必须放在 创建一个编辑器前面layedit.set({uploadImage: {url: upload /…

Java应用程序性能监视:复杂的分布式应用程序的端到端性能

通过从应用程序中学习企业APM产品&#xff0c;发现更快&#xff0c;更高效的性能监控。 参加AppDynamics APM导览&#xff01; 在最复杂和分布式环境中端到端监视Java应用程序性能-专注于业务事务。 自动发现的业务交易&#xff0c;动态基准&#xff0c;代码级诊断和虚拟作战室…