diff --git a/CHANGELOG.md b/CHANGELOG.md index 196d875621..1d2920d99a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,39 @@ Noteworthy changes to the agent are documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## Version 8.14.0 +## New features and improvements + +* The Java agent supports disabling AI Monitoring at the account/organization level [1972](https://github.com/newrelic/newrelic-java-agent/pull/1972) +* HikariCP instrumentation now captures additional metrics [1976](https://github.com/newrelic/newrelic-java-agent/pull/1976) +* Adds new instrumentation module for `kafka-clients-metrics-3.7.0` [2001](https://github.com/newrelic/newrelic-java-agent/pull/2001) +* Adds new instrumentation module for `jedis-5.0.0` [1969](https://github.com/newrelic/newrelic-java-agent/pull/1969) +* Adds new instrumentation module for `vertx-sqlclient-4.4.2` [2004](https://github.com/newrelic/newrelic-java-agent/pull/2004) +* The `newrelic-scala-api` for Scala 3 will now be published to Maven [1995](https://github.com/newrelic/newrelic-java-agent/pull/1995) +* New AWS MQ attributes will be added to spans [1977](https://github.com/newrelic/newrelic-java-agent/pull/1977) +* Clarify Javadoc comments for `@Trace` API [2009](https://github.com/newrelic/newrelic-java-agent/pull/2009) + +## Fixes + +* Fixes a `netty-reactor` issue that was causing high memory usage [1978](https://github.com/newrelic/newrelic-java-agent/pull/1978) +* Netty instrumentation will start transactions for HTTP/2 requests [1994](https://github.com/newrelic/newrelic-java-agent/pull/1994) + +## Deprecations + +The following instrumentation modules are deprecated and will be removed in the next major release: + +- `aws-wrap-0.7.0` +- `java.completable-future-jdk8` +- `play-2.3` +- `spring-3.0.0` +- `netty-3.4` +- `Struts v1` + +## IAST + +- CSEC Version bump to 1.4.1 [2010](https://github.com/newrelic/newrelic-java-agent/pull/2010) +- Changelog: https://github.com/newrelic/csec-java-agent/releases/tag/1.4.1 + ## Version 8.13.0 ## New features and improvements @@ -28,14 +61,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 * Add security-related class excludes during normal class transformer creation [1918](https://github.com/newrelic/newrelic-java-agent/pull/1918) * Add null checks to vertx 4.5.1 instrumentation [1927](https://github.com/newrelic/newrelic-java-agent/pull/1927) - - ## IAST * CSEC Version bump to 1.4.0 [1956](https://github.com/newrelic/newrelic-java-agent/pull/1956) * [Changelog](https://github.com/newrelic/csec-java-agent/releases/tag/1.4.0) - ## Deprecations - The browser footer injection APIs have been deprecated and will be removed in a future agent release. The header injection API now adds both the header and footer scripts. [1679](https://github.com/newrelic/newrelic-java-agent/pull/1679) diff --git a/agent-bridge-datastore/src/main/java/com/newrelic/agent/bridge/datastore/R2dbcOperation.java b/agent-bridge-datastore/src/main/java/com/newrelic/agent/bridge/datastore/R2dbcOperation.java index 693d6acb87..809c9e5df9 100644 --- a/agent-bridge-datastore/src/main/java/com/newrelic/agent/bridge/datastore/R2dbcOperation.java +++ b/agent-bridge-datastore/src/main/java/com/newrelic/agent/bridge/datastore/R2dbcOperation.java @@ -13,7 +13,7 @@ import java.util.regex.Pattern; public class R2dbcOperation { - + public static final OperationAndTableName UNKNOWN_OPERATION_AND_TABLE_NAME = new OperationAndTableName("unknown", "unknown"); static final Pattern VALID_METRIC_NAME_MATCHER = Pattern.compile("[a-zA-Z0-9.$_@]+"); static final int PATTERN_SWITCHES = Pattern.CASE_INSENSITIVE | Pattern.DOTALL; static final Pattern COMMENT_PATTERN = Pattern.compile("/\\*.*?\\*/", Pattern.DOTALL); diff --git a/agent-bridge/src/main/java/com/newrelic/agent/bridge/CollectionFactory.java b/agent-bridge/src/main/java/com/newrelic/agent/bridge/CollectionFactory.java index 378905c034..33d604e00b 100644 --- a/agent-bridge/src/main/java/com/newrelic/agent/bridge/CollectionFactory.java +++ b/agent-bridge/src/main/java/com/newrelic/agent/bridge/CollectionFactory.java @@ -49,4 +49,16 @@ public interface CollectionFactory { * @param the type of value stored/returned */ Function memorize(Function loader, int maxSize); + + /** + * Create a time based eviction cache in which an entry's age is determined on a last-access basis. + * + * @param key type + * @param cached type + * @param ageInSeconds how old, in seconds, a cache entry must be to be evicted after last access + * @param initialCapacity the initial capacity of the cache + * @param loader the function to calculate the value for a key, used if the key is not cached + * @return a time based concurrent cache + */ + Function createAccessTimeBasedCache(long ageInSeconds, int initialCapacity, Function loader); } diff --git a/agent-bridge/src/main/java/com/newrelic/agent/bridge/DefaultCollectionFactory.java b/agent-bridge/src/main/java/com/newrelic/agent/bridge/DefaultCollectionFactory.java index f42329d028..97e1521eda 100644 --- a/agent-bridge/src/main/java/com/newrelic/agent/bridge/DefaultCollectionFactory.java +++ b/agent-bridge/src/main/java/com/newrelic/agent/bridge/DefaultCollectionFactory.java @@ -43,4 +43,12 @@ public Function memorize(Function loader, int maxSize) { return loader.apply(k1); }); } + + /** + * Note: In this implementation, this method will return the loader function as is. + */ + @Override + public Function createAccessTimeBasedCache(long ageInSeconds, int initialCapacity, Function loader) { + return loader; + } } diff --git a/dev-tools/live-templates b/dev-tools/live-templates new file mode 100644 index 0000000000..931f35b3d8 --- /dev/null +++ b/dev-tools/live-templates @@ -0,0 +1,110 @@ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/gradle.properties b/gradle.properties index cb45415d71..a043ea78d9 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1,6 +1,6 @@ # The agent version. -agentVersion=8.14.0 -securityAgentVersion=1.4.0 +agentVersion=8.15.0 +securityAgentVersion=1.4.1 newrelicDebug=false org.gradle.jvmargs=-Xmx2048m diff --git a/instrumentation/aws-java-sdk-kinesis-1.11.106/build.gradle b/instrumentation/aws-java-sdk-kinesis-1.11.106/build.gradle new file mode 100644 index 0000000000..c3aa9ff565 --- /dev/null +++ b/instrumentation/aws-java-sdk-kinesis-1.11.106/build.gradle @@ -0,0 +1,12 @@ +dependencies { + implementation(project(":agent-bridge")) + implementation("com.amazonaws:aws-java-sdk-kinesis:1.11.106") +} + +jar { + manifest { attributes 'Implementation-Title': 'com.newrelic.instrumentation.aws-java-sdk-kinesis-1.11.106' } +} + +verifyInstrumentation { + passesOnly 'com.amazonaws:aws-java-sdk-kinesis:[1.11.106,)' +} \ No newline at end of file diff --git a/instrumentation/aws-java-sdk-kinesis-1.11.106/src/main/java/com/agent/instrumentation/awsjavasdk2/services/kinesis/KinesisUtil.java b/instrumentation/aws-java-sdk-kinesis-1.11.106/src/main/java/com/agent/instrumentation/awsjavasdk2/services/kinesis/KinesisUtil.java new file mode 100644 index 0000000000..88e4a8d97f --- /dev/null +++ b/instrumentation/aws-java-sdk-kinesis-1.11.106/src/main/java/com/agent/instrumentation/awsjavasdk2/services/kinesis/KinesisUtil.java @@ -0,0 +1,63 @@ +package com.agent.instrumentation.awsjavasdk2.services.kinesis; + +import com.amazonaws.AmazonWebServiceRequest; +import com.amazonaws.handlers.AsyncHandler_Instrumentation; +import com.newrelic.agent.bridge.AgentBridge; +import com.newrelic.api.agent.CloudParameters; +import com.newrelic.api.agent.NewRelic; +import com.newrelic.api.agent.Token; +import com.newrelic.api.agent.TracedMethod; + +import java.util.Map; + +public class KinesisUtil { + + public static final String PLATFORM = "aws_kinesis_data_streams"; + public static final String TRACE_CATEGORY = "Kinesis"; + + public static final Map requestTokenMap = AgentBridge.collectionFactory.createConcurrentWeakKeyedMap(); + private KinesisUtil() {} + + public static void setTokenForRequest(AmazonWebServiceRequest request) { + if (AgentBridge.getAgent().getTransaction(false) != null) { + if (request != null) { + Token token = NewRelic.getAgent().getTransaction().getToken(); + requestTokenMap.put(request, token); + } + } + } + + public static void setTraceInformation(String kinesisOperation, AmazonWebServiceRequest request) { + Token token = KinesisUtil.getToken(request); + if (token != null) { + token.linkAndExpire(); + } + KinesisUtil.cleanToken(request); + TracedMethod tracedMethod = NewRelic.getAgent().getTransaction().getTracedMethod(); + KinesisUtil.setTraceDetails(kinesisOperation, tracedMethod); + } + + public static Token getToken(AmazonWebServiceRequest request) { + if (request != null) { + return requestTokenMap.get(request); + } + return null; + } + + public static void cleanToken(AmazonWebServiceRequest request) { + if (request != null) { + requestTokenMap.remove(request); + } + } + + public static void setTraceDetails(String kinesisOperation, TracedMethod tracedMethod) { + tracedMethod.setMetricName(TRACE_CATEGORY, kinesisOperation); + tracedMethod.reportAsExternal(createCloudParams()); + } + + public static CloudParameters createCloudParams() { + // Todo: add arn to cloud parameters + return CloudParameters.provider(PLATFORM).build(); + } + +} diff --git a/instrumentation/aws-java-sdk-kinesis-1.11.106/src/main/java/com/amazonaws/handlers/AsyncHandler_Instrumentation.java b/instrumentation/aws-java-sdk-kinesis-1.11.106/src/main/java/com/amazonaws/handlers/AsyncHandler_Instrumentation.java new file mode 100644 index 0000000000..6a5e814917 --- /dev/null +++ b/instrumentation/aws-java-sdk-kinesis-1.11.106/src/main/java/com/amazonaws/handlers/AsyncHandler_Instrumentation.java @@ -0,0 +1,41 @@ +/* + * + * * Copyright 2020 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package com.amazonaws.handlers; + +import com.amazonaws.AmazonWebServiceRequest; +import com.newrelic.api.agent.Token; +import com.newrelic.api.agent.Trace; +import com.newrelic.api.agent.weaver.MatchType; +import com.newrelic.api.agent.weaver.NewField; +import com.newrelic.api.agent.weaver.Weave; +import com.newrelic.api.agent.weaver.Weaver; + +@Weave(originalName ="com.amazonaws.handlers.AsyncHandler", type = MatchType.Interface) +public class AsyncHandler_Instrumentation { + + @NewField + public Token token; + + @Trace(async = true) + public void onError(Exception exception) { + if (token != null) { + token.linkAndExpire(); + token = null; + } + Weaver.callOriginal(); + } + + @Trace(async = true) + public void onSuccess(REQUEST request, RESULT result) { + if (token != null) { + token.linkAndExpire(); + token = null; + } + Weaver.callOriginal(); + } +} \ No newline at end of file diff --git a/instrumentation/aws-java-sdk-kinesis-1.11.106/src/main/java/com/amazonaws/services/kinesis/AmazonKinesisAsyncClient_Instrumentation.java b/instrumentation/aws-java-sdk-kinesis-1.11.106/src/main/java/com/amazonaws/services/kinesis/AmazonKinesisAsyncClient_Instrumentation.java new file mode 100644 index 0000000000..eee81f6fb1 --- /dev/null +++ b/instrumentation/aws-java-sdk-kinesis-1.11.106/src/main/java/com/amazonaws/services/kinesis/AmazonKinesisAsyncClient_Instrumentation.java @@ -0,0 +1,194 @@ +package com.amazonaws.services.kinesis; + +import com.agent.instrumentation.awsjavasdk2.services.kinesis.KinesisUtil; +import com.amazonaws.AmazonWebServiceRequest; +import com.amazonaws.handlers.AsyncHandler_Instrumentation; +import com.amazonaws.services.kinesis.model.AddTagsToStreamRequest; +import com.amazonaws.services.kinesis.model.AddTagsToStreamResult; +import com.amazonaws.services.kinesis.model.CreateStreamRequest; +import com.amazonaws.services.kinesis.model.CreateStreamResult; +import com.amazonaws.services.kinesis.model.DecreaseStreamRetentionPeriodRequest; +import com.amazonaws.services.kinesis.model.DecreaseStreamRetentionPeriodResult; +import com.amazonaws.services.kinesis.model.DeleteStreamRequest; +import com.amazonaws.services.kinesis.model.DeleteStreamResult; +import com.amazonaws.services.kinesis.model.DescribeLimitsRequest; +import com.amazonaws.services.kinesis.model.DescribeLimitsResult; +import com.amazonaws.services.kinesis.model.DescribeStreamRequest; +import com.amazonaws.services.kinesis.model.DescribeStreamResult; +import com.amazonaws.services.kinesis.model.DisableEnhancedMonitoringRequest; +import com.amazonaws.services.kinesis.model.DisableEnhancedMonitoringResult; +import com.amazonaws.services.kinesis.model.EnableEnhancedMonitoringRequest; +import com.amazonaws.services.kinesis.model.EnableEnhancedMonitoringResult; +import com.amazonaws.services.kinesis.model.GetRecordsRequest; +import com.amazonaws.services.kinesis.model.GetRecordsResult; +import com.amazonaws.services.kinesis.model.GetShardIteratorRequest; +import com.amazonaws.services.kinesis.model.GetShardIteratorResult; +import com.amazonaws.services.kinesis.model.IncreaseStreamRetentionPeriodRequest; +import com.amazonaws.services.kinesis.model.IncreaseStreamRetentionPeriodResult; +import com.amazonaws.services.kinesis.model.ListStreamsRequest; +import com.amazonaws.services.kinesis.model.ListStreamsResult; +import com.amazonaws.services.kinesis.model.ListTagsForStreamRequest; +import com.amazonaws.services.kinesis.model.ListTagsForStreamResult; +import com.amazonaws.services.kinesis.model.MergeShardsRequest; +import com.amazonaws.services.kinesis.model.MergeShardsResult; +import com.amazonaws.services.kinesis.model.PutRecordRequest; +import com.amazonaws.services.kinesis.model.PutRecordResult; +import com.amazonaws.services.kinesis.model.PutRecordsRequest; +import com.amazonaws.services.kinesis.model.PutRecordsResult; +import com.amazonaws.services.kinesis.model.RemoveTagsFromStreamRequest; +import com.amazonaws.services.kinesis.model.RemoveTagsFromStreamResult; +import com.amazonaws.services.kinesis.model.SplitShardRequest; +import com.amazonaws.services.kinesis.model.SplitShardResult; +import com.amazonaws.services.kinesis.model.UpdateShardCountRequest; +import com.amazonaws.services.kinesis.model.UpdateShardCountResult; +import com.newrelic.agent.bridge.AgentBridge; +import com.newrelic.api.agent.NewRelic; +import com.newrelic.api.agent.Trace; +import com.newrelic.api.agent.weaver.MatchType; +import com.newrelic.api.agent.weaver.Weave; +import com.newrelic.api.agent.weaver.Weaver; + +import java.util.concurrent.Future; + +@Weave(originalName ="com.amazonaws.services.kinesis.AmazonKinesisAsyncClient", type = MatchType.ExactClass) +public class AmazonKinesisAsyncClient_Instrumentation { + + @Trace + public Future addTagsToStreamAsync(AddTagsToStreamRequest request, + AsyncHandler_Instrumentation asyncHandler) { + setToken(asyncHandler, request); + return Weaver.callOriginal(); + } + + @Trace + public Future createStreamAsync(CreateStreamRequest request, + AsyncHandler_Instrumentation asyncHandler) { + setToken(asyncHandler, request); + return Weaver.callOriginal(); + } + + @Trace + public Future decreaseStreamRetentionPeriodAsync( + DecreaseStreamRetentionPeriodRequest request, + AsyncHandler_Instrumentation asyncHandler) { + setToken(asyncHandler, request); + return Weaver.callOriginal(); + } + + @Trace + public Future deleteStreamAsync(DeleteStreamRequest request, + AsyncHandler_Instrumentation asyncHandler) { + setToken(asyncHandler, request); + return Weaver.callOriginal(); + } + + @Trace + public Future describeLimitsAsync(DescribeLimitsRequest request, + AsyncHandler_Instrumentation asyncHandler) { + setToken(asyncHandler, request); + return Weaver.callOriginal(); + } + + @Trace + public Future describeStreamAsync(DescribeStreamRequest request, + AsyncHandler_Instrumentation asyncHandler) { + setToken(asyncHandler, request); + return Weaver.callOriginal(); + } + + @Trace + public Future disableEnhancedMonitoringAsync(DisableEnhancedMonitoringRequest request, + AsyncHandler_Instrumentation asyncHandler) { + setToken(asyncHandler, request); + return Weaver.callOriginal(); + } + + @Trace + public Future enableEnhancedMonitoringAsync(EnableEnhancedMonitoringRequest request, + AsyncHandler_Instrumentation asyncHandler) { + setToken(asyncHandler, request); + return Weaver.callOriginal(); + } + + @Trace + public Future getRecordsAsync(GetRecordsRequest request, AsyncHandler_Instrumentation asyncHandler) { + setToken(asyncHandler, request); + return Weaver.callOriginal(); + } + + @Trace + public Future getShardIteratorAsync(GetShardIteratorRequest request, + AsyncHandler_Instrumentation asyncHandler) { + setToken(asyncHandler, request); + return Weaver.callOriginal(); + } + + @Trace + public Future increaseStreamRetentionPeriodAsync( + IncreaseStreamRetentionPeriodRequest request, + AsyncHandler_Instrumentation asyncHandler) { + setToken(asyncHandler, request); + return Weaver.callOriginal(); + } + + @Trace + public Future listStreamsAsync(ListStreamsRequest request, AsyncHandler_Instrumentation asyncHandler) { + setToken(asyncHandler, request); + return Weaver.callOriginal(); + } + + @Trace + public Future listTagsForStreamAsync(ListTagsForStreamRequest request, + AsyncHandler_Instrumentation asyncHandler) { + setToken(asyncHandler, request); + return Weaver.callOriginal(); + } + + @Trace + public Future mergeShardsAsync(MergeShardsRequest request, AsyncHandler_Instrumentation asyncHandler) { + setToken(asyncHandler, request); + return Weaver.callOriginal(); + } + + @Trace + public Future putRecordAsync(PutRecordRequest request, AsyncHandler_Instrumentation asyncHandler) { + setToken(asyncHandler, request); + return Weaver.callOriginal(); + } + + @Trace + public Future putRecordsAsync(PutRecordsRequest request, AsyncHandler_Instrumentation asyncHandler) { + setToken(asyncHandler, request); + return Weaver.callOriginal(); + } + + @Trace + public Future removeTagsFromStreamAsync(RemoveTagsFromStreamRequest request, + AsyncHandler_Instrumentation asyncHandler) { + setToken(asyncHandler, request); + return Weaver.callOriginal(); + } + + @Trace + public Future splitShardAsync(SplitShardRequest request, AsyncHandler_Instrumentation asyncHandler) { + setToken(asyncHandler, request); + return Weaver.callOriginal(); + } + + @Trace + public Future updateShardCountAsync(UpdateShardCountRequest request, + AsyncHandler_Instrumentation asyncHandler) { + setToken(asyncHandler, request); + return Weaver.callOriginal(); + } + + private void setToken(AsyncHandler_Instrumentation asyncHandler, AmazonWebServiceRequest request) { + if (AgentBridge.getAgent().getTransaction(false) != null) { + if (asyncHandler != null) { + asyncHandler.token = NewRelic.getAgent().getTransaction().getToken(); + } + KinesisUtil.setTokenForRequest(request); + } + } + +} diff --git a/instrumentation/aws-java-sdk-kinesis-1.11.106/src/main/java/com/amazonaws/services/kinesis/AmazonKinesisClient_Instrumentation.java b/instrumentation/aws-java-sdk-kinesis-1.11.106/src/main/java/com/amazonaws/services/kinesis/AmazonKinesisClient_Instrumentation.java new file mode 100644 index 0000000000..cf70acb0d3 --- /dev/null +++ b/instrumentation/aws-java-sdk-kinesis-1.11.106/src/main/java/com/amazonaws/services/kinesis/AmazonKinesisClient_Instrumentation.java @@ -0,0 +1,166 @@ +package com.amazonaws.services.kinesis; + +import com.agent.instrumentation.awsjavasdk2.services.kinesis.KinesisUtil; +import com.amazonaws.services.kinesis.model.AddTagsToStreamRequest; +import com.amazonaws.services.kinesis.model.AddTagsToStreamResult; +import com.amazonaws.services.kinesis.model.CreateStreamRequest; +import com.amazonaws.services.kinesis.model.CreateStreamResult; +import com.amazonaws.services.kinesis.model.DecreaseStreamRetentionPeriodRequest; +import com.amazonaws.services.kinesis.model.DecreaseStreamRetentionPeriodResult; +import com.amazonaws.services.kinesis.model.DeleteStreamRequest; +import com.amazonaws.services.kinesis.model.DeleteStreamResult; +import com.amazonaws.services.kinesis.model.DescribeLimitsRequest; +import com.amazonaws.services.kinesis.model.DescribeLimitsResult; +import com.amazonaws.services.kinesis.model.DescribeStreamRequest; +import com.amazonaws.services.kinesis.model.DescribeStreamResult; +import com.amazonaws.services.kinesis.model.DisableEnhancedMonitoringRequest; +import com.amazonaws.services.kinesis.model.DisableEnhancedMonitoringResult; +import com.amazonaws.services.kinesis.model.EnableEnhancedMonitoringRequest; +import com.amazonaws.services.kinesis.model.EnableEnhancedMonitoringResult; +import com.amazonaws.services.kinesis.model.GetRecordsRequest; +import com.amazonaws.services.kinesis.model.GetRecordsResult; +import com.amazonaws.services.kinesis.model.GetShardIteratorRequest; +import com.amazonaws.services.kinesis.model.GetShardIteratorResult; +import com.amazonaws.services.kinesis.model.IncreaseStreamRetentionPeriodRequest; +import com.amazonaws.services.kinesis.model.IncreaseStreamRetentionPeriodResult; +import com.amazonaws.services.kinesis.model.ListStreamsRequest; +import com.amazonaws.services.kinesis.model.ListStreamsResult; +import com.amazonaws.services.kinesis.model.ListTagsForStreamRequest; +import com.amazonaws.services.kinesis.model.ListTagsForStreamResult; +import com.amazonaws.services.kinesis.model.MergeShardsRequest; +import com.amazonaws.services.kinesis.model.MergeShardsResult; +import com.amazonaws.services.kinesis.model.PutRecordRequest; +import com.amazonaws.services.kinesis.model.PutRecordResult; +import com.amazonaws.services.kinesis.model.PutRecordsRequest; +import com.amazonaws.services.kinesis.model.PutRecordsResult; +import com.amazonaws.services.kinesis.model.RemoveTagsFromStreamRequest; +import com.amazonaws.services.kinesis.model.RemoveTagsFromStreamResult; +import com.amazonaws.services.kinesis.model.SplitShardRequest; +import com.amazonaws.services.kinesis.model.SplitShardResult; +import com.amazonaws.services.kinesis.model.UpdateShardCountRequest; +import com.amazonaws.services.kinesis.model.UpdateShardCountResult; +import com.newrelic.api.agent.NewRelic; +import com.newrelic.api.agent.Token; +import com.newrelic.api.agent.Trace; +import com.newrelic.api.agent.weaver.MatchType; +import com.newrelic.api.agent.weaver.Weave; +import com.newrelic.api.agent.weaver.Weaver; + +@Weave(originalName = "com.amazonaws.services.kinesis.AmazonKinesisClient", type = MatchType.ExactClass) +public class AmazonKinesisClient_Instrumentation { + + @Trace(async = true, leaf = true) + final AddTagsToStreamResult executeAddTagsToStream(AddTagsToStreamRequest request) { + KinesisUtil.setTraceInformation("addTagsToStream", request); + return Weaver.callOriginal(); + } + + @Trace(async = true, leaf = true) + final CreateStreamResult executeCreateStream(CreateStreamRequest request) { + KinesisUtil.setTraceInformation("createStream", request); + return Weaver.callOriginal(); + } + + @Trace(async = true, leaf = true) + final DecreaseStreamRetentionPeriodResult executeDecreaseStreamRetentionPeriod(DecreaseStreamRetentionPeriodRequest request) { + KinesisUtil.setTraceInformation("decreaseStreamRetentionPeriod", request); + return Weaver.callOriginal(); + } + + @Trace(async = true, leaf = true) + final DeleteStreamResult executeDeleteStream(DeleteStreamRequest request) { + KinesisUtil.setTraceInformation("deleteStream", request); + return Weaver.callOriginal(); + } + + @Trace(async = true, leaf = true) + final DescribeLimitsResult executeDescribeLimits(DescribeLimitsRequest request) { + KinesisUtil.setTraceInformation("describeLimits", request); + return Weaver.callOriginal(); + } + + @Trace(async = true, leaf = true) + final DescribeStreamResult executeDescribeStream(DescribeStreamRequest request) { + KinesisUtil.setTraceInformation("describeStream", request); + return Weaver.callOriginal(); + } + + @Trace(async = true, leaf = true) + final DisableEnhancedMonitoringResult executeDisableEnhancedMonitoring(DisableEnhancedMonitoringRequest request) { + KinesisUtil.setTraceInformation("disableEnhancedMonitoring", request); + return Weaver.callOriginal(); + } + + @Trace(async = true, leaf = true) + final EnableEnhancedMonitoringResult executeEnableEnhancedMonitoring(EnableEnhancedMonitoringRequest request) { + KinesisUtil.setTraceInformation("enableEnhancedMonitoring", request); + return Weaver.callOriginal(); + } + + @Trace(async = true, leaf = true) + final GetRecordsResult executeGetRecords(GetRecordsRequest request) { + KinesisUtil.setTraceInformation("getRecords", request); + return Weaver.callOriginal(); + } + + @Trace(async = true, leaf = true) + final GetShardIteratorResult executeGetShardIterator(GetShardIteratorRequest request) { + KinesisUtil.setTraceInformation("getShardIterator", request); + return Weaver.callOriginal(); + } + + @Trace(async = true, leaf = true) + final IncreaseStreamRetentionPeriodResult executeIncreaseStreamRetentionPeriod(IncreaseStreamRetentionPeriodRequest request) { + KinesisUtil.setTraceInformation("increaseStreamRetentionPeriod", request); + return Weaver.callOriginal(); + } + + @Trace(async = true, leaf = true) + final ListStreamsResult executeListStreams(ListStreamsRequest request) { + KinesisUtil.setTraceInformation("listStreams", request); + return Weaver.callOriginal(); + } + + @Trace(async = true, leaf = true) + final ListTagsForStreamResult executeListTagsForStream(ListTagsForStreamRequest request) { + KinesisUtil.setTraceInformation("listTagsForStream", request); + return Weaver.callOriginal(); + } + + @Trace(async = true, leaf = true) + final MergeShardsResult executeMergeShards(MergeShardsRequest request) { + KinesisUtil.setTraceInformation("mergeShards", request); + return Weaver.callOriginal(); + } + + @Trace(async = true, leaf = true) + final PutRecordResult executePutRecord(PutRecordRequest request) { + KinesisUtil.setTraceInformation("putRecord", request); + return Weaver.callOriginal(); + } + + @Trace(async = true, leaf = true) + final PutRecordsResult executePutRecords(PutRecordsRequest request) { + KinesisUtil.setTraceInformation("putRecords", request); + return Weaver.callOriginal(); + } + + @Trace(async = true, leaf = true) + final RemoveTagsFromStreamResult executeRemoveTagsFromStream(RemoveTagsFromStreamRequest request) { + KinesisUtil.setTraceInformation("removeTagsFromStream", request); + return Weaver.callOriginal(); + } + + @Trace(async = true, leaf = true) + final SplitShardResult executeSplitShard(SplitShardRequest request) { + KinesisUtil.setTraceInformation("splitShard", request); + return Weaver.callOriginal(); + } + + @Trace(async = true, leaf = true) + final UpdateShardCountResult executeUpdateShardCount(UpdateShardCountRequest request) { + KinesisUtil.setTraceInformation("updateShardCount", request); + return Weaver.callOriginal(); + } + +} diff --git a/instrumentation/aws-java-sdk-kinesis-1.11.106/src/test/java/com/amazonaws/services/kinesis/AmazonKinesisAPITest.java b/instrumentation/aws-java-sdk-kinesis-1.11.106/src/test/java/com/amazonaws/services/kinesis/AmazonKinesisAPITest.java new file mode 100644 index 0000000000..f564ff645d --- /dev/null +++ b/instrumentation/aws-java-sdk-kinesis-1.11.106/src/test/java/com/amazonaws/services/kinesis/AmazonKinesisAPITest.java @@ -0,0 +1,300 @@ +package com.amazonaws.services.kinesis; + +import com.amazonaws.AmazonWebServiceRequest; +import com.amazonaws.auth.AWSCredentials; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.client.builder.AwsClientBuilder; +import com.amazonaws.handlers.AsyncHandler; +import com.amazonaws.services.kinesis.model.AddTagsToStreamRequest; +import com.amazonaws.services.kinesis.model.CreateStreamRequest; +import com.amazonaws.services.kinesis.model.DecreaseStreamRetentionPeriodRequest; +import com.amazonaws.services.kinesis.model.DeleteStreamRequest; +import com.amazonaws.services.kinesis.model.DescribeLimitsRequest; +import com.amazonaws.services.kinesis.model.DescribeStreamRequest; +import com.amazonaws.services.kinesis.model.DisableEnhancedMonitoringRequest; +import com.amazonaws.services.kinesis.model.EnableEnhancedMonitoringRequest; +import com.amazonaws.services.kinesis.model.GetRecordsRequest; +import com.amazonaws.services.kinesis.model.GetShardIteratorRequest; +import com.amazonaws.services.kinesis.model.IncreaseStreamRetentionPeriodRequest; +import com.amazonaws.services.kinesis.model.ListStreamsRequest; +import com.amazonaws.services.kinesis.model.ListTagsForStreamRequest; +import com.amazonaws.services.kinesis.model.MergeShardsRequest; +import com.amazonaws.services.kinesis.model.PutRecordRequest; +import com.amazonaws.services.kinesis.model.PutRecordsRequest; +import com.amazonaws.services.kinesis.model.RemoveTagsFromStreamRequest; +import com.amazonaws.services.kinesis.model.SplitShardRequest; +import com.amazonaws.services.kinesis.model.UpdateShardCountRequest; +import com.newrelic.agent.introspec.InstrumentationTestConfig; +import com.newrelic.agent.introspec.InstrumentationTestRunner; +import com.newrelic.agent.introspec.Introspector; +import com.newrelic.agent.introspec.SpanEvent; +import com.newrelic.agent.introspec.TraceSegment; +import com.newrelic.agent.introspec.TransactionTrace; +import com.newrelic.agent.introspec.internal.HttpServerRule; +import com.newrelic.api.agent.Trace; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.runner.RunWith; + +import java.net.URISyntaxException; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.Future; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +@RunWith(InstrumentationTestRunner.class) +@InstrumentationTestConfig(includePrefixes = {"com.amazonaws"}, configName = "dt_enabled.yml") +public class AmazonKinesisAPITest { + + @Rule + public HttpServerRule server = new HttpServerRule(); + private AmazonKinesis kinesisClient; + private AmazonKinesisAsync kinesisAsyncClient; + + @Before + public void setup() throws URISyntaxException { + String serverUriStr = server.getEndPoint().toString(); + AwsClientBuilder.EndpointConfiguration endpoint = new AwsClientBuilder.EndpointConfiguration(serverUriStr, "us-east-1"); + kinesisClient = AmazonKinesisClientBuilder.standard() + .withCredentials(new CredProvider()) + .withEndpointConfiguration(endpoint) + .build(); + kinesisAsyncClient = AmazonKinesisAsyncClientBuilder.standard() + .withCredentials(new CredProvider()) + .withEndpointConfiguration(endpoint) + .build(); + } + + // HttpServerRule is flaky so only 1 test is run + + @Test + public void testAddTagsToStream() { + txn(() -> kinesisClient.addTagsToStream(new AddTagsToStreamRequest())); + txnAsync(() -> kinesisAsyncClient.addTagsToStreamAsync(new AddTagsToStreamRequest())); + assertKinesisTrace("addTagsToStream", false); + } + +// @Test +// public void testCreateStream() { +// txn(() -> kinesisClient.createStream(new CreateStreamRequest())); +// txnAsync(() -> kinesisAsyncClient.createStreamAsync(new CreateStreamRequest())); +// assertKinesisTrace("createStream", false); +// } +// +// @Test +// public void testDecreaseStreamRetentionPeriod() { +// txn(() -> kinesisClient.decreaseStreamRetentionPeriod(new DecreaseStreamRetentionPeriodRequest())); +// txnAsync(() -> kinesisAsyncClient.decreaseStreamRetentionPeriodAsync(new DecreaseStreamRetentionPeriodRequest())); +// assertKinesisTrace("decreaseStreamRetentionPeriod", false); +// } +// +// @Test +// public void testDeleteStream() { +// txn(() -> kinesisClient.deleteStream(new DeleteStreamRequest())); +// txnAsync(() -> kinesisAsyncClient.deleteStreamAsync(new DeleteStreamRequest())); +// assertKinesisTrace("deleteStream", false); +// } +// +// @Test +// public void testDescribeLimits() { +// txn(() -> kinesisClient.describeLimits(new DescribeLimitsRequest())); +// txnAsync(() -> kinesisAsyncClient.describeLimitsAsync(new DescribeLimitsRequest())); +// assertKinesisTrace("describeLimits", false); +// } +// +// @Test +// public void testDescribeStream() { +// txn(() -> kinesisClient.describeStream(new DescribeStreamRequest())); +// txnAsync(() -> kinesisAsyncClient.describeStreamAsync(new DescribeStreamRequest())); +// assertKinesisTrace("describeStream", false); +// } +// +// @Test +// public void testDisableEnhancedMonitoring() { +// txn(() -> kinesisClient.disableEnhancedMonitoring(new DisableEnhancedMonitoringRequest())); +// txnAsync(() -> kinesisAsyncClient.disableEnhancedMonitoringAsync(new DisableEnhancedMonitoringRequest())); +// assertKinesisTrace("disableEnhancedMonitoring", false); +// } +// +// @Test +// public void testEnableEnhancedMonitoring() { +// txn(() -> kinesisClient.enableEnhancedMonitoring(new EnableEnhancedMonitoringRequest())); +// txnAsync(() -> kinesisAsyncClient.enableEnhancedMonitoringAsync(new EnableEnhancedMonitoringRequest())); +// assertKinesisTrace("enableEnhancedMonitoring", false); +// } +// +// @Test +// public void testGetRecords() { +// txn(() -> kinesisClient.getRecords(new GetRecordsRequest())); +// txnAsync(() -> kinesisAsyncClient.getRecordsAsync(new GetRecordsRequest())); +// assertKinesisTrace("getRecords", false); +// } +// +// @Test +// public void testGetShardIterator() { +// txn(() -> kinesisClient.getShardIterator(new GetShardIteratorRequest())); +// txnAsync(() -> kinesisAsyncClient.getShardIteratorAsync(new GetShardIteratorRequest())); +// assertKinesisTrace("getShardIterator", false); +// } +// +// @Test +// public void testIncreaseStreamRetentionPeriod() { +// txn(() -> kinesisClient.increaseStreamRetentionPeriod(new IncreaseStreamRetentionPeriodRequest())); +// txnAsync(() -> kinesisAsyncClient.increaseStreamRetentionPeriodAsync(new IncreaseStreamRetentionPeriodRequest())); +// assertKinesisTrace("increaseStreamRetentionPeriod", false); +// } +// +// @Test +// public void testListStreams() { +// txn(() -> kinesisClient.listStreams(new ListStreamsRequest())); +// txnAsync(() -> kinesisAsyncClient.listStreamsAsync(new ListStreamsRequest())); +// assertKinesisTrace("listStreams", false); +// } +// +// @Test +// public void testListTagsForStream() { +// txn(() -> kinesisClient.listTagsForStream(new ListTagsForStreamRequest())); +// txnAsync(() -> kinesisAsyncClient.listTagsForStreamAsync(new ListTagsForStreamRequest())); +// assertKinesisTrace("listTagsForStream", false); +// } +// +// @Test +// public void testMergeShards() { +// txn(() -> kinesisClient.mergeShards(new MergeShardsRequest())); +// txnAsync(() -> kinesisAsyncClient.mergeShardsAsync(new MergeShardsRequest())); +// assertKinesisTrace("mergeShards", false); +// } +// +// @Test +// public void testPutRecord() { +// txn(() -> kinesisClient.putRecord(new PutRecordRequest())); +// txnAsync(() -> kinesisAsyncClient.putRecordAsync(new PutRecordRequest())); +// assertKinesisTrace("putRecord", false); +// } +// +// @Test +// public void testPutRecords() { +// txn(() -> kinesisClient.putRecords(new PutRecordsRequest())); +// txnAsync(() -> kinesisAsyncClient.putRecordsAsync(new PutRecordsRequest())); +// assertKinesisTrace("putRecords", false); +// } +// +// @Test +// public void testRemoveTagsFromStream() { +// txn(() -> kinesisClient.removeTagsFromStream(new RemoveTagsFromStreamRequest())); +// txnAsync(() -> kinesisAsyncClient.removeTagsFromStreamAsync(new RemoveTagsFromStreamRequest())); +// assertKinesisTrace("removeTagsFromStream", false); +// } +// +// @Test +// public void testSplitShard() { +// txn(() -> kinesisClient.splitShard(new SplitShardRequest())); +// txnAsync(() -> kinesisAsyncClient.splitShardAsync(new SplitShardRequest())); +// assertKinesisTrace("splitShard", false); +// } +// +// @Test +// public void testUpdateShardCount() { +// txn(() -> kinesisClient.updateShardCount(new UpdateShardCountRequest())); +// txnAsync(() -> kinesisAsyncClient.updateShardCountAsync(new UpdateShardCountRequest())); +// assertKinesisTrace("updateShardCount", false); +// } + + @Trace(dispatcher = true) + public void txn(Runnable runnable) { + try { + Thread.sleep(200); + runnable.run(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + @Trace(dispatcher = true) + public void txnAsync(Supplier> function) { + try { + Thread.sleep(200); + function.get().get(); + } catch (Exception ignored) { + } + } + + @Trace(dispatcher = true) + public void txnAsyncWithHandler(Function, Future> function, AsyncHandler handler) { + try { + function.apply(handler).get(); + } catch (Exception ignored) { + } + } + + private void assertKinesisTrace(String kinesisOperation, boolean assertSpan) { + Introspector introspector = InstrumentationTestRunner.getIntrospector(); + final String traceName = "Kinesis/" + kinesisOperation; + if (assertSpan) { + // Span events fail to be generated when enough transactions are done in succession + List kinesisSpans = introspector.getSpanEvents().stream() + .filter(span -> traceName.equals(span.getName())) + .collect(Collectors.toList()); + assertEquals(2, kinesisSpans.size()); + for (SpanEvent kinesisSpan: kinesisSpans) { + assertEquals("aws_kinesis_data_streams", kinesisSpan.getAgentAttributes().get("cloud.platform")); + } + } + assertTxn(kinesisOperation, introspector); + assertTxnAsync(kinesisOperation, "OtherTransaction/Custom/com.amazonaws.services.kinesis.AmazonKinesisAPITest/txnAsync", introspector); + } + + private void assertTxn(String kinesisOperation, Introspector introspector) { + String transactionName = "OtherTransaction/Custom/com.amazonaws.services.kinesis.AmazonKinesisAPITest/txn"; + final String traceName = "Kinesis/" + kinesisOperation; + Collection transactionTraces = introspector.getTransactionTracesForTransaction(transactionName); + TransactionTrace transactionTrace = transactionTraces.iterator().next(); + List children = transactionTrace.getInitialTraceSegment().getChildren(); + assertEquals(1, children.size()); + TraceSegment trace = children.get(0); + assertEquals(traceName, trace.getName()); + assertEquals("aws_kinesis_data_streams", trace.getTracerAttributes().get("cloud.platform")); + } + + private void assertTxnAsync(String kinesisOperation, String transactionName, Introspector introspector) { + final String asyncClientTraceName = "Java/com.amazonaws.services.kinesis.AmazonKinesisAsyncClient/" + kinesisOperation + "Async"; + final String extTraceName = "Kinesis/" + kinesisOperation; + Collection transactionTraces = introspector.getTransactionTracesForTransaction(transactionName); + TransactionTrace transactionTrace = transactionTraces.iterator().next(); + + List rootChildren = transactionTrace.getInitialTraceSegment().getChildren(); + assertEquals(1, rootChildren.size()); + TraceSegment asyncClientTrace = rootChildren.get(0); + assertEquals(asyncClientTraceName, asyncClientTrace.getName()); + + List asyncFunctionTraceChildren = asyncClientTrace.getChildren(); + assertEquals(1, asyncFunctionTraceChildren.size()); + TraceSegment extTrace = asyncFunctionTraceChildren.get(0); + assertEquals(extTraceName, extTrace.getName()); + assertEquals("aws_kinesis_data_streams", extTrace.getTracerAttributes().get("cloud.platform")); + } + + private static class CredProvider implements AWSCredentialsProvider { + @Override + public AWSCredentials getCredentials() { + AWSCredentials credentials = mock(AWSCredentials.class); + when(credentials.getAWSAccessKeyId()).thenReturn("accessKeyId"); + when(credentials.getAWSSecretKey()).thenReturn("secretAccessKey"); + return credentials; + } + + @Override + public void refresh() { + + } + } + +} diff --git a/instrumentation/aws-java-sdk-kinesis-1.11.106/src/test/resources/dt_enabled.yml b/instrumentation/aws-java-sdk-kinesis-1.11.106/src/test/resources/dt_enabled.yml new file mode 100644 index 0000000000..53b0968002 --- /dev/null +++ b/instrumentation/aws-java-sdk-kinesis-1.11.106/src/test/resources/dt_enabled.yml @@ -0,0 +1,5 @@ +common: &default_settings + distributed_tracing: + enabled: true + span_events: + enabled: true \ No newline at end of file diff --git a/instrumentation/aws-java-sdk-kinesis-2.0.6/build.gradle b/instrumentation/aws-java-sdk-kinesis-2.0.6/build.gradle new file mode 100644 index 0000000000..bd30d61ce9 --- /dev/null +++ b/instrumentation/aws-java-sdk-kinesis-2.0.6/build.gradle @@ -0,0 +1,14 @@ +dependencies { + implementation(project(":agent-bridge")) + implementation("software.amazon.awssdk:kinesis:2.20.45") +} + +jar { + manifest { attributes 'Implementation-Title': 'com.newrelic.instrumentation.aws-java-sdk-kinesis-2.0.6' } +} + +verifyInstrumentation { + passes 'software.amazon.awssdk:kinesis:[2.1.0,)' + exclude 'software.amazon.awssdk:kinesis:2.17.200' // this version failed the test, but the next one works again. + excludeRegex '.*-preview-[0-9a-f]+' +} \ No newline at end of file diff --git a/instrumentation/aws-java-sdk-kinesis-2.0.6/src/main/java/com/agent/instrumentation/awsjavasdk2/services/kinesis/KinesisUtil.java b/instrumentation/aws-java-sdk-kinesis-2.0.6/src/main/java/com/agent/instrumentation/awsjavasdk2/services/kinesis/KinesisUtil.java new file mode 100644 index 0000000000..ce69329bee --- /dev/null +++ b/instrumentation/aws-java-sdk-kinesis-2.0.6/src/main/java/com/agent/instrumentation/awsjavasdk2/services/kinesis/KinesisUtil.java @@ -0,0 +1,30 @@ +package com.agent.instrumentation.awsjavasdk2.services.kinesis; + +import com.newrelic.api.agent.CloudParameters; +import com.newrelic.api.agent.NewRelic; +import com.newrelic.api.agent.Segment; +import com.newrelic.api.agent.TracedMethod; + +public class KinesisUtil { + + public static final String PLATFORM = "aws_kinesis_data_streams"; + public static final String TRACE_CATEGORY = "Kinesis"; + private KinesisUtil() {} + + public static Segment beginSegment(String kinesisOperation) { + Segment segment = NewRelic.getAgent().getTransaction().startSegment(TRACE_CATEGORY, kinesisOperation); + segment.reportAsExternal(createCloudParams()); + return segment; + } + + public static void setTraceDetails(String kinesisOperation) { + TracedMethod tracedMethod = NewRelic.getAgent().getTracedMethod(); + tracedMethod.setMetricName(TRACE_CATEGORY, kinesisOperation); + tracedMethod.reportAsExternal(createCloudParams()); + } + public static CloudParameters createCloudParams() { + // Todo: add arn to cloud parameters + return CloudParameters.provider(PLATFORM).build(); + } + +} diff --git a/instrumentation/aws-java-sdk-kinesis-2.0.6/src/main/java/com/agent/instrumentation/awsjavasdk2/services/kinesis/SegmentHandler.java b/instrumentation/aws-java-sdk-kinesis-2.0.6/src/main/java/com/agent/instrumentation/awsjavasdk2/services/kinesis/SegmentHandler.java new file mode 100644 index 0000000000..d996af803a --- /dev/null +++ b/instrumentation/aws-java-sdk-kinesis-2.0.6/src/main/java/com/agent/instrumentation/awsjavasdk2/services/kinesis/SegmentHandler.java @@ -0,0 +1,32 @@ +package com.agent.instrumentation.awsjavasdk2.services.kinesis; + +import com.newrelic.agent.bridge.AgentBridge; +import com.newrelic.api.agent.Segment; + +import java.util.concurrent.CompletableFuture; + +public class SegmentHandler { + private final CompletableFuture completableFuture; + private final Segment segment; + private final String implementationTitle; + + public SegmentHandler(CompletableFuture completableFuture, Segment segment, String implementationTitle) { + this.completableFuture = completableFuture; + this.segment = segment; + this.implementationTitle = implementationTitle; + } + + public CompletableFuture newSegmentCompletionStage() { + if (completableFuture == null) { + return null; + } + return completableFuture.whenComplete((r, t) -> { + try { + segment.reportAsExternal(KinesisUtil.createCloudParams()); + segment.end(); + } catch (Throwable t1) { + AgentBridge.instrumentation.noticeInstrumentationError(t1, implementationTitle); + } + }); + } +} \ No newline at end of file diff --git a/instrumentation/aws-java-sdk-kinesis-2.0.6/src/main/java/software/amazon/awssdk/core/client/handler/AsyncClientHandler_Instrumentation.java b/instrumentation/aws-java-sdk-kinesis-2.0.6/src/main/java/software/amazon/awssdk/core/client/handler/AsyncClientHandler_Instrumentation.java new file mode 100644 index 0000000000..17f8e249b5 --- /dev/null +++ b/instrumentation/aws-java-sdk-kinesis-2.0.6/src/main/java/software/amazon/awssdk/core/client/handler/AsyncClientHandler_Instrumentation.java @@ -0,0 +1,28 @@ +package software.amazon.awssdk.core.client.handler; + +import com.newrelic.api.agent.Trace; +import com.newrelic.api.agent.weaver.MatchType; +import com.newrelic.api.agent.weaver.Weave; +import com.newrelic.api.agent.weaver.Weaver; +import software.amazon.awssdk.core.SdkRequest; +import software.amazon.awssdk.core.SdkResponse; +import software.amazon.awssdk.core.async.AsyncResponseTransformer; + +import java.util.concurrent.CompletableFuture; + +@Weave(originalName = "software.amazon.awssdk.core.client.handler.AsyncClientHandler", type = MatchType.Interface) +public class AsyncClientHandler_Instrumentation { + // This prevents further traces from forming when using the async client + @Trace(leaf = true, excludeFromTransactionTrace = true) + public CompletableFuture execute( + ClientExecutionParams executionParams) { + return Weaver.callOriginal(); + } + + @Trace(leaf = true, excludeFromTransactionTrace = true) + public CompletableFuture execute( + ClientExecutionParams executionParams, + AsyncResponseTransformer asyncResponseTransformer) { + return Weaver.callOriginal(); + } +} diff --git a/instrumentation/aws-java-sdk-kinesis-2.0.6/src/main/java/software/amazon/awssdk/services/kinesis/DefaultKinesisAsyncClient_Instrumentation.java b/instrumentation/aws-java-sdk-kinesis-2.0.6/src/main/java/software/amazon/awssdk/services/kinesis/DefaultKinesisAsyncClient_Instrumentation.java new file mode 100644 index 0000000000..29763c9dc8 --- /dev/null +++ b/instrumentation/aws-java-sdk-kinesis-2.0.6/src/main/java/software/amazon/awssdk/services/kinesis/DefaultKinesisAsyncClient_Instrumentation.java @@ -0,0 +1,239 @@ +package software.amazon.awssdk.services.kinesis; + +import com.agent.instrumentation.awsjavasdk2.services.kinesis.KinesisUtil; +import com.agent.instrumentation.awsjavasdk2.services.kinesis.SegmentHandler; +import com.newrelic.api.agent.Segment; +import com.newrelic.api.agent.weaver.MatchType; +import com.newrelic.api.agent.weaver.Weave; +import com.newrelic.api.agent.weaver.Weaver; +import software.amazon.awssdk.services.kinesis.model.AddTagsToStreamRequest; +import software.amazon.awssdk.services.kinesis.model.AddTagsToStreamResponse; +import software.amazon.awssdk.services.kinesis.model.CreateStreamRequest; +import software.amazon.awssdk.services.kinesis.model.CreateStreamResponse; +import software.amazon.awssdk.services.kinesis.model.DecreaseStreamRetentionPeriodRequest; +import software.amazon.awssdk.services.kinesis.model.DecreaseStreamRetentionPeriodResponse; +import software.amazon.awssdk.services.kinesis.model.DeleteStreamRequest; +import software.amazon.awssdk.services.kinesis.model.DeleteStreamResponse; +import software.amazon.awssdk.services.kinesis.model.DeregisterStreamConsumerRequest; +import software.amazon.awssdk.services.kinesis.model.DeregisterStreamConsumerResponse; +import software.amazon.awssdk.services.kinesis.model.DescribeLimitsRequest; +import software.amazon.awssdk.services.kinesis.model.DescribeLimitsResponse; +import software.amazon.awssdk.services.kinesis.model.DescribeStreamConsumerRequest; +import software.amazon.awssdk.services.kinesis.model.DescribeStreamConsumerResponse; +import software.amazon.awssdk.services.kinesis.model.DescribeStreamRequest; +import software.amazon.awssdk.services.kinesis.model.DescribeStreamResponse; +import software.amazon.awssdk.services.kinesis.model.DescribeStreamSummaryRequest; +import software.amazon.awssdk.services.kinesis.model.DisableEnhancedMonitoringRequest; +import software.amazon.awssdk.services.kinesis.model.DisableEnhancedMonitoringResponse; +import software.amazon.awssdk.services.kinesis.model.EnableEnhancedMonitoringRequest; +import software.amazon.awssdk.services.kinesis.model.EnableEnhancedMonitoringResponse; +import software.amazon.awssdk.services.kinesis.model.GetRecordsRequest; +import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; +import software.amazon.awssdk.services.kinesis.model.GetShardIteratorRequest; +import software.amazon.awssdk.services.kinesis.model.GetShardIteratorResponse; +import software.amazon.awssdk.services.kinesis.model.IncreaseStreamRetentionPeriodRequest; +import software.amazon.awssdk.services.kinesis.model.IncreaseStreamRetentionPeriodResponse; +import software.amazon.awssdk.services.kinesis.model.ListShardsRequest; +import software.amazon.awssdk.services.kinesis.model.ListShardsResponse; +import software.amazon.awssdk.services.kinesis.model.ListStreamConsumersRequest; +import software.amazon.awssdk.services.kinesis.model.ListStreamConsumersResponse; +import software.amazon.awssdk.services.kinesis.model.ListStreamsRequest; +import software.amazon.awssdk.services.kinesis.model.ListStreamsResponse; +import software.amazon.awssdk.services.kinesis.model.ListTagsForStreamRequest; +import software.amazon.awssdk.services.kinesis.model.ListTagsForStreamResponse; +import software.amazon.awssdk.services.kinesis.model.MergeShardsRequest; +import software.amazon.awssdk.services.kinesis.model.MergeShardsResponse; +import software.amazon.awssdk.services.kinesis.model.PutRecordRequest; +import software.amazon.awssdk.services.kinesis.model.PutRecordResponse; +import software.amazon.awssdk.services.kinesis.model.PutRecordsRequest; +import software.amazon.awssdk.services.kinesis.model.PutRecordsResponse; +import software.amazon.awssdk.services.kinesis.model.RegisterStreamConsumerRequest; +import software.amazon.awssdk.services.kinesis.model.RegisterStreamConsumerResponse; +import software.amazon.awssdk.services.kinesis.model.RemoveTagsFromStreamRequest; +import software.amazon.awssdk.services.kinesis.model.RemoveTagsFromStreamResponse; +import software.amazon.awssdk.services.kinesis.model.SplitShardRequest; +import software.amazon.awssdk.services.kinesis.model.SplitShardResponse; +import software.amazon.awssdk.services.kinesis.model.StartStreamEncryptionRequest; +import software.amazon.awssdk.services.kinesis.model.StartStreamEncryptionResponse; +import software.amazon.awssdk.services.kinesis.model.StopStreamEncryptionRequest; +import software.amazon.awssdk.services.kinesis.model.StopStreamEncryptionResponse; +import software.amazon.awssdk.services.kinesis.model.SubscribeToShardRequest; +import software.amazon.awssdk.services.kinesis.model.SubscribeToShardResponseHandler; +import software.amazon.awssdk.services.kinesis.model.UpdateShardCountRequest; +import software.amazon.awssdk.services.kinesis.model.UpdateShardCountResponse; + +import java.util.concurrent.CompletableFuture; + +@Weave(originalName = "software.amazon.awssdk.services.kinesis.DefaultKinesisAsyncClient", type = MatchType.ExactClass) +class DefaultKinesisAsyncClient_Instrumentation { + + public CompletableFuture addTagsToStream(AddTagsToStreamRequest addTagsToStreamRequest) { + Segment segment = KinesisUtil.beginSegment("addTagsToStream"); + CompletableFuture response = Weaver.callOriginal(); + return new SegmentHandler<>(response, segment, Weaver.getImplementationTitle()).newSegmentCompletionStage(); + } + + public CompletableFuture createStream(CreateStreamRequest createStreamRequest) { + Segment segment = KinesisUtil.beginSegment("createStream"); + CompletableFuture response = Weaver.callOriginal(); + return new SegmentHandler<>(response, segment, Weaver.getImplementationTitle()).newSegmentCompletionStage(); + } + + public CompletableFuture decreaseStreamRetentionPeriod( + DecreaseStreamRetentionPeriodRequest decreaseStreamRetentionPeriodRequest) { + Segment segment = KinesisUtil.beginSegment("decreaseStreamRetentionPeriod"); + CompletableFuture response = Weaver.callOriginal(); + return new SegmentHandler<>(response, segment, Weaver.getImplementationTitle()).newSegmentCompletionStage(); + } + + public CompletableFuture deleteStream(DeleteStreamRequest deleteStreamRequest) { + Segment segment = KinesisUtil.beginSegment("deleteStream"); + CompletableFuture response = Weaver.callOriginal(); + return new SegmentHandler<>(response, segment, Weaver.getImplementationTitle()).newSegmentCompletionStage(); + } + + public CompletableFuture deregisterStreamConsumer(DeregisterStreamConsumerRequest deregisterStreamConsumerRequest) { + Segment segment = KinesisUtil.beginSegment("deregisterStreamConsumer"); + CompletableFuture response = Weaver.callOriginal(); + return new SegmentHandler<>(response, segment, Weaver.getImplementationTitle()).newSegmentCompletionStage(); + } + + public CompletableFuture describeLimits(DescribeLimitsRequest describeLimitsRequest) { + Segment segment = KinesisUtil.beginSegment("describeLimits"); + CompletableFuture response = Weaver.callOriginal(); + return new SegmentHandler<>(response, segment, Weaver.getImplementationTitle()).newSegmentCompletionStage(); + } + + public CompletableFuture describeStream(DescribeStreamRequest describeStreamRequest) { + Segment segment = KinesisUtil.beginSegment("describeStream"); + CompletableFuture response = Weaver.callOriginal(); + return new SegmentHandler<>(response, segment, Weaver.getImplementationTitle()).newSegmentCompletionStage(); + } + + public CompletableFuture describeStreamConsumer(DescribeStreamConsumerRequest describeStreamConsumerRequest) { + Segment segment = KinesisUtil.beginSegment("describeStreamConsumer"); + CompletableFuture response = Weaver.callOriginal(); + return new SegmentHandler<>(response, segment, Weaver.getImplementationTitle()).newSegmentCompletionStage(); + } + + public CompletableFuture describeStreamSummary(DescribeStreamSummaryRequest describeStreamSummaryRequest) { + Segment segment = KinesisUtil.beginSegment( "describeStreamSummary"); + CompletableFuture response = Weaver.callOriginal(); + return new SegmentHandler<>(response, segment, Weaver.getImplementationTitle()).newSegmentCompletionStage(); + } + + public CompletableFuture disableEnhancedMonitoring(DisableEnhancedMonitoringRequest disableEnhancedMonitoringRequest) { + Segment segment = KinesisUtil.beginSegment("disableEnhancedMonitoring"); + CompletableFuture response = Weaver.callOriginal(); + return new SegmentHandler<>(response, segment, Weaver.getImplementationTitle()).newSegmentCompletionStage(); + } + public CompletableFuture enableEnhancedMonitoring(EnableEnhancedMonitoringRequest enableEnhancedMonitoringRequest) { + Segment segment = KinesisUtil.beginSegment("enableEnhancedMonitoring"); + CompletableFuture response = Weaver.callOriginal(); + return new SegmentHandler<>(response, segment, Weaver.getImplementationTitle()).newSegmentCompletionStage(); + } + + public CompletableFuture getRecords(GetRecordsRequest getRecordsRequest) { + Segment segment = KinesisUtil.beginSegment("getRecords"); + CompletableFuture response = Weaver.callOriginal(); + return new SegmentHandler<>(response, segment, Weaver.getImplementationTitle()).newSegmentCompletionStage(); + } + + public CompletableFuture getShardIterator(GetShardIteratorRequest getShardIteratorRequest) { + Segment segment = KinesisUtil.beginSegment("getShardIterator"); + CompletableFuture response = Weaver.callOriginal(); + return new SegmentHandler<>(response, segment, Weaver.getImplementationTitle()).newSegmentCompletionStage(); + } + + public CompletableFuture increaseStreamRetentionPeriod( + IncreaseStreamRetentionPeriodRequest increaseStreamRetentionPeriodRequest) { + Segment segment = KinesisUtil.beginSegment("increaseStreamRetentionPeriod"); + CompletableFuture response = Weaver.callOriginal(); + return new SegmentHandler<>(response, segment, Weaver.getImplementationTitle()).newSegmentCompletionStage(); + } + + public CompletableFuture listShards(ListShardsRequest listShardsRequest) { + Segment segment = KinesisUtil.beginSegment("listShards"); + CompletableFuture response = Weaver.callOriginal(); + return new SegmentHandler<>(response, segment, Weaver.getImplementationTitle()).newSegmentCompletionStage(); + } + + public CompletableFuture listStreamConsumers(ListStreamConsumersRequest listStreamConsumersRequest) { + Segment segment = KinesisUtil.beginSegment("listStreamConsumers"); + CompletableFuture response = Weaver.callOriginal(); + return new SegmentHandler<>(response, segment, Weaver.getImplementationTitle()).newSegmentCompletionStage(); + } + + public CompletableFuture listStreams(ListStreamsRequest listStreamsRequest) { + Segment segment = KinesisUtil.beginSegment("listStreams"); + CompletableFuture response = Weaver.callOriginal(); + return new SegmentHandler<>(response, segment, Weaver.getImplementationTitle()).newSegmentCompletionStage(); + } + + public CompletableFuture listTagsForStream(ListTagsForStreamRequest listTagsForStreamRequest) { + Segment segment = KinesisUtil.beginSegment("listTagsForStream"); + CompletableFuture response = Weaver.callOriginal(); + return new SegmentHandler<>(response, segment, Weaver.getImplementationTitle()).newSegmentCompletionStage(); + } + + public CompletableFuture mergeShards(MergeShardsRequest mergeShardsRequest) { + Segment segment = KinesisUtil.beginSegment("mergeShards"); + CompletableFuture response = Weaver.callOriginal(); + return new SegmentHandler<>(response, segment, Weaver.getImplementationTitle()).newSegmentCompletionStage(); + } + + public CompletableFuture putRecord(PutRecordRequest putRecordRequest) { + Segment segment = KinesisUtil.beginSegment("putRecord"); + CompletableFuture response = Weaver.callOriginal(); + return new SegmentHandler<>(response, segment, Weaver.getImplementationTitle()).newSegmentCompletionStage(); + } + + public CompletableFuture putRecords(PutRecordsRequest putRecordsRequest) { + Segment segment = KinesisUtil.beginSegment("putRecords"); + CompletableFuture response = Weaver.callOriginal(); + return new SegmentHandler<>(response, segment, Weaver.getImplementationTitle()).newSegmentCompletionStage(); + } + + public CompletableFuture registerStreamConsumer(RegisterStreamConsumerRequest registerStreamConsumerRequest) { + Segment segment = KinesisUtil.beginSegment("registerStreamConsumer"); + CompletableFuture response = Weaver.callOriginal(); + return new SegmentHandler<>(response, segment, Weaver.getImplementationTitle()).newSegmentCompletionStage(); + } + + public CompletableFuture removeTagsFromStream(RemoveTagsFromStreamRequest removeTagsFromStreamRequest) { + Segment segment = KinesisUtil.beginSegment("removeTagsFromStream"); + CompletableFuture response = Weaver.callOriginal(); + return new SegmentHandler<>(response, segment, Weaver.getImplementationTitle()).newSegmentCompletionStage(); + } + + public CompletableFuture splitShard(SplitShardRequest splitShardRequest) { + Segment segment = KinesisUtil.beginSegment("splitShard"); + CompletableFuture response = Weaver.callOriginal(); + return new SegmentHandler<>(response, segment, Weaver.getImplementationTitle()).newSegmentCompletionStage(); + } + + public CompletableFuture startStreamEncryption(StartStreamEncryptionRequest startStreamEncryptionRequest) { + Segment segment = KinesisUtil.beginSegment("startStreamEncryption"); + CompletableFuture response = Weaver.callOriginal(); + return new SegmentHandler<>(response, segment, Weaver.getImplementationTitle()).newSegmentCompletionStage(); + } + + public CompletableFuture stopStreamEncryption(StopStreamEncryptionRequest stopStreamEncryptionRequest) { + Segment segment = KinesisUtil.beginSegment("stopStreamEncryption"); + CompletableFuture response = Weaver.callOriginal(); + return new SegmentHandler<>(response, segment, Weaver.getImplementationTitle()).newSegmentCompletionStage(); + } + + public CompletableFuture subscribeToShard(SubscribeToShardRequest subscribeToShardRequest, SubscribeToShardResponseHandler asyncResponseHandler) { + Segment segment = KinesisUtil.beginSegment("stopStreamEncryption"); + CompletableFuture response = Weaver.callOriginal(); + return new SegmentHandler<>(response, segment, Weaver.getImplementationTitle()).newSegmentCompletionStage(); + } + + public CompletableFuture updateShardCount(UpdateShardCountRequest updateShardCountRequest) { + Segment segment = KinesisUtil.beginSegment("updateShardCount"); + CompletableFuture response = Weaver.callOriginal(); + return new SegmentHandler<>(response, segment, Weaver.getImplementationTitle()).newSegmentCompletionStage(); + } + +} diff --git a/instrumentation/aws-java-sdk-kinesis-2.0.6/src/main/java/software/amazon/awssdk/services/kinesis/DefaultKinesisClient_Instrumentation.java b/instrumentation/aws-java-sdk-kinesis-2.0.6/src/main/java/software/amazon/awssdk/services/kinesis/DefaultKinesisClient_Instrumentation.java new file mode 100644 index 0000000000..04ae89282a --- /dev/null +++ b/instrumentation/aws-java-sdk-kinesis-2.0.6/src/main/java/software/amazon/awssdk/services/kinesis/DefaultKinesisClient_Instrumentation.java @@ -0,0 +1,227 @@ +package software.amazon.awssdk.services.kinesis; + +import com.agent.instrumentation.awsjavasdk2.services.kinesis.KinesisUtil; +import com.newrelic.api.agent.Trace; +import com.newrelic.api.agent.weaver.MatchType; +import com.newrelic.api.agent.weaver.Weave; +import com.newrelic.api.agent.weaver.Weaver; +import software.amazon.awssdk.services.kinesis.model.AddTagsToStreamRequest; +import software.amazon.awssdk.services.kinesis.model.AddTagsToStreamResponse; +import software.amazon.awssdk.services.kinesis.model.CreateStreamRequest; +import software.amazon.awssdk.services.kinesis.model.CreateStreamResponse; +import software.amazon.awssdk.services.kinesis.model.DecreaseStreamRetentionPeriodRequest; +import software.amazon.awssdk.services.kinesis.model.DecreaseStreamRetentionPeriodResponse; +import software.amazon.awssdk.services.kinesis.model.DeleteStreamRequest; +import software.amazon.awssdk.services.kinesis.model.DeleteStreamResponse; +import software.amazon.awssdk.services.kinesis.model.DeregisterStreamConsumerRequest; +import software.amazon.awssdk.services.kinesis.model.DeregisterStreamConsumerResponse; +import software.amazon.awssdk.services.kinesis.model.DescribeLimitsRequest; +import software.amazon.awssdk.services.kinesis.model.DescribeLimitsResponse; +import software.amazon.awssdk.services.kinesis.model.DescribeStreamConsumerRequest; +import software.amazon.awssdk.services.kinesis.model.DescribeStreamConsumerResponse; +import software.amazon.awssdk.services.kinesis.model.DescribeStreamRequest; +import software.amazon.awssdk.services.kinesis.model.DescribeStreamResponse; +import software.amazon.awssdk.services.kinesis.model.DescribeStreamSummaryRequest; +import software.amazon.awssdk.services.kinesis.model.DescribeStreamSummaryResponse; +import software.amazon.awssdk.services.kinesis.model.DisableEnhancedMonitoringRequest; +import software.amazon.awssdk.services.kinesis.model.DisableEnhancedMonitoringResponse; +import software.amazon.awssdk.services.kinesis.model.EnableEnhancedMonitoringRequest; +import software.amazon.awssdk.services.kinesis.model.EnableEnhancedMonitoringResponse; +import software.amazon.awssdk.services.kinesis.model.GetRecordsRequest; +import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; +import software.amazon.awssdk.services.kinesis.model.GetShardIteratorRequest; +import software.amazon.awssdk.services.kinesis.model.GetShardIteratorResponse; +import software.amazon.awssdk.services.kinesis.model.IncreaseStreamRetentionPeriodRequest; +import software.amazon.awssdk.services.kinesis.model.IncreaseStreamRetentionPeriodResponse; +import software.amazon.awssdk.services.kinesis.model.ListShardsRequest; +import software.amazon.awssdk.services.kinesis.model.ListShardsResponse; +import software.amazon.awssdk.services.kinesis.model.ListStreamConsumersRequest; +import software.amazon.awssdk.services.kinesis.model.ListStreamConsumersResponse; +import software.amazon.awssdk.services.kinesis.model.ListStreamsRequest; +import software.amazon.awssdk.services.kinesis.model.ListStreamsResponse; +import software.amazon.awssdk.services.kinesis.model.ListTagsForStreamRequest; +import software.amazon.awssdk.services.kinesis.model.ListTagsForStreamResponse; +import software.amazon.awssdk.services.kinesis.model.MergeShardsRequest; +import software.amazon.awssdk.services.kinesis.model.MergeShardsResponse; +import software.amazon.awssdk.services.kinesis.model.PutRecordRequest; +import software.amazon.awssdk.services.kinesis.model.PutRecordResponse; +import software.amazon.awssdk.services.kinesis.model.PutRecordsRequest; +import software.amazon.awssdk.services.kinesis.model.PutRecordsResponse; +import software.amazon.awssdk.services.kinesis.model.RegisterStreamConsumerRequest; +import software.amazon.awssdk.services.kinesis.model.RegisterStreamConsumerResponse; +import software.amazon.awssdk.services.kinesis.model.RemoveTagsFromStreamRequest; +import software.amazon.awssdk.services.kinesis.model.RemoveTagsFromStreamResponse; +import software.amazon.awssdk.services.kinesis.model.SplitShardRequest; +import software.amazon.awssdk.services.kinesis.model.SplitShardResponse; +import software.amazon.awssdk.services.kinesis.model.StartStreamEncryptionRequest; +import software.amazon.awssdk.services.kinesis.model.StartStreamEncryptionResponse; +import software.amazon.awssdk.services.kinesis.model.StopStreamEncryptionRequest; +import software.amazon.awssdk.services.kinesis.model.StopStreamEncryptionResponse; +import software.amazon.awssdk.services.kinesis.model.UpdateShardCountRequest; +import software.amazon.awssdk.services.kinesis.model.UpdateShardCountResponse; + +@Weave(originalName = "software.amazon.awssdk.services.kinesis.DefaultKinesisClient", type = MatchType.ExactClass) +class DefaultKinesisClient_Instrumentation { + @Trace(leaf=true) + public AddTagsToStreamResponse addTagsToStream(AddTagsToStreamRequest addTagsToStreamRequest) { + KinesisUtil.setTraceDetails("addTagsToStream"); + return Weaver.callOriginal(); + } + + @Trace(leaf=true) + public CreateStreamResponse createStream(CreateStreamRequest createStreamRequest) { + KinesisUtil.setTraceDetails("createStream"); + return Weaver.callOriginal(); + } + + @Trace(leaf=true) + public DecreaseStreamRetentionPeriodResponse decreaseStreamRetentionPeriod(DecreaseStreamRetentionPeriodRequest decreaseStreamRetentionPeriodRequest) { + KinesisUtil.setTraceDetails("decreaseStreamRetentionPeriod"); + return Weaver.callOriginal(); + } + + @Trace(leaf=true) + public DeleteStreamResponse deleteStream(DeleteStreamRequest deleteStreamRequest) { + KinesisUtil.setTraceDetails("deleteStream"); + return Weaver.callOriginal(); + } + + @Trace(leaf=true) + public DeregisterStreamConsumerResponse deregisterStreamConsumer(DeregisterStreamConsumerRequest deregisterStreamConsumerRequest) { + KinesisUtil.setTraceDetails("deregisterStreamConsumer"); + return Weaver.callOriginal(); + } + + @Trace(leaf=true) + public DescribeLimitsResponse describeLimits(DescribeLimitsRequest describeLimitsRequest) { + KinesisUtil.setTraceDetails("describeLimits"); + return Weaver.callOriginal(); + } + + @Trace(leaf=true) + public DescribeStreamResponse describeStream(DescribeStreamRequest describeStreamRequest) { + KinesisUtil.setTraceDetails("describeStream"); + return Weaver.callOriginal(); + } + + @Trace(leaf=true) + public DescribeStreamConsumerResponse describeStreamConsumer(DescribeStreamConsumerRequest describeStreamConsumerRequest) { + KinesisUtil.setTraceDetails("describeStreamConsumer"); + return Weaver.callOriginal(); + } + + @Trace(leaf=true) + public DescribeStreamSummaryResponse describeStreamSummary(DescribeStreamSummaryRequest describeStreamSummaryRequest) { + KinesisUtil.setTraceDetails("describeStreamSummary"); + return Weaver.callOriginal(); + } + + @Trace(leaf=true) + public DisableEnhancedMonitoringResponse disableEnhancedMonitoring(DisableEnhancedMonitoringRequest disableEnhancedMonitoringRequest) { + KinesisUtil.setTraceDetails("disableEnhancedMonitoring"); + return Weaver.callOriginal(); + } + + @Trace(leaf=true) + public EnableEnhancedMonitoringResponse enableEnhancedMonitoring(EnableEnhancedMonitoringRequest enableEnhancedMonitoringRequest) { + KinesisUtil.setTraceDetails("enableEnhancedMonitoring"); + return Weaver.callOriginal(); + } + + @Trace(leaf=true) + public GetRecordsResponse getRecords(GetRecordsRequest getRecordsRequest) { + KinesisUtil.setTraceDetails("getRecords"); + return Weaver.callOriginal(); + } + + @Trace(leaf=true) + public GetShardIteratorResponse getShardIterator(GetShardIteratorRequest getShardIteratorRequest) { + KinesisUtil.setTraceDetails("getShardIterator"); + return Weaver.callOriginal(); + } + + @Trace(leaf=true) + public IncreaseStreamRetentionPeriodResponse increaseStreamRetentionPeriod(IncreaseStreamRetentionPeriodRequest increaseStreamRetentionPeriodRequest) { + KinesisUtil.setTraceDetails("increaseStreamRetentionPeriod"); + return Weaver.callOriginal(); + } + + @Trace(leaf=true) + public ListShardsResponse listShards(ListShardsRequest listShardsRequest) { + KinesisUtil.setTraceDetails("listShards"); + return Weaver.callOriginal(); + } + + @Trace(leaf=true) + public ListStreamConsumersResponse listStreamConsumers(ListStreamConsumersRequest listStreamConsumersRequest) { + KinesisUtil.setTraceDetails("listStreamConsumers"); + return Weaver.callOriginal(); + } + + @Trace(leaf=true) + public ListStreamsResponse listStreams(ListStreamsRequest listStreamsRequest) { + KinesisUtil.setTraceDetails("listStreams"); + return Weaver.callOriginal(); + } + + @Trace(leaf=true) + public ListTagsForStreamResponse listTagsForStream(ListTagsForStreamRequest listTagsForStreamRequest) { + KinesisUtil.setTraceDetails("listTagsForStream"); + return Weaver.callOriginal(); + } + + @Trace(leaf=true) + public MergeShardsResponse mergeShards(MergeShardsRequest mergeShardsRequest) { + KinesisUtil.setTraceDetails("mergeShards"); + return Weaver.callOriginal(); + } + + @Trace(leaf=true) + public PutRecordResponse putRecord(PutRecordRequest putRecordRequest) { + KinesisUtil.setTraceDetails("putRecord"); + return Weaver.callOriginal(); + } + + @Trace(leaf=true) + public PutRecordsResponse putRecords(PutRecordsRequest putRecordsRequest) { + KinesisUtil.setTraceDetails("putRecords"); + return Weaver.callOriginal(); + } + @Trace(leaf=true) + public RegisterStreamConsumerResponse registerStreamConsumer(RegisterStreamConsumerRequest registerStreamConsumerRequest) { + KinesisUtil.setTraceDetails("registerStreamConsumer"); + return Weaver.callOriginal(); + } + + + @Trace(leaf=true) + public RemoveTagsFromStreamResponse removeTagsFromStream(RemoveTagsFromStreamRequest removeTagsFromStreamRequest) { + KinesisUtil.setTraceDetails("removeTagsFromStream"); + return Weaver.callOriginal(); + } + + @Trace(leaf=true) + public SplitShardResponse splitShard(SplitShardRequest splitShardRequest) { + KinesisUtil.setTraceDetails("splitShard"); + return Weaver.callOriginal(); + } + + @Trace(leaf=true) + public StartStreamEncryptionResponse startStreamEncryption(StartStreamEncryptionRequest startStreamEncryptionRequest) { + KinesisUtil.setTraceDetails("startStreamEncryption"); + return Weaver.callOriginal(); + } + + @Trace(leaf=true) + public StopStreamEncryptionResponse stopStreamEncryption(StopStreamEncryptionRequest stopStreamEncryptionRequest) { + KinesisUtil.setTraceDetails("stopStreamEncryption"); + return Weaver.callOriginal(); + } + + @Trace(leaf=true) + public UpdateShardCountResponse updateShardCount(UpdateShardCountRequest updateShardCountRequest) { + KinesisUtil.setTraceDetails("updateShardCount"); + return Weaver.callOriginal(); + } + +} diff --git a/instrumentation/aws-java-sdk-kinesis-2.0.6/src/test/java/software/amazon/awssdk/services/kinesis/DefaultKinesisAsyncClientTest.java b/instrumentation/aws-java-sdk-kinesis-2.0.6/src/test/java/software/amazon/awssdk/services/kinesis/DefaultKinesisAsyncClientTest.java new file mode 100644 index 0000000000..e64542b05f --- /dev/null +++ b/instrumentation/aws-java-sdk-kinesis-2.0.6/src/test/java/software/amazon/awssdk/services/kinesis/DefaultKinesisAsyncClientTest.java @@ -0,0 +1,315 @@ +package software.amazon.awssdk.services.kinesis; + +import com.newrelic.agent.introspec.InstrumentationTestConfig; +import com.newrelic.agent.introspec.InstrumentationTestRunner; +import com.newrelic.agent.introspec.Introspector; +import com.newrelic.agent.introspec.SpanEvent; +import com.newrelic.agent.introspec.TraceSegment; +import com.newrelic.agent.introspec.TransactionTrace; +import com.newrelic.api.agent.Trace; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.core.async.EmptyPublisher; +import software.amazon.awssdk.http.AbortableInputStream; +import software.amazon.awssdk.http.ExecutableHttpRequest; +import software.amazon.awssdk.http.HttpExecuteResponse; +import software.amazon.awssdk.http.SdkHttpFullResponse; +import software.amazon.awssdk.http.async.AsyncExecuteRequest; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.kinesis.model.AddTagsToStreamRequest; +import software.amazon.awssdk.services.kinesis.model.CreateStreamRequest; +import software.amazon.awssdk.services.kinesis.model.DecreaseStreamRetentionPeriodRequest; +import software.amazon.awssdk.services.kinesis.model.DeleteStreamRequest; +import software.amazon.awssdk.services.kinesis.model.DeregisterStreamConsumerRequest; +import software.amazon.awssdk.services.kinesis.model.DescribeLimitsRequest; +import software.amazon.awssdk.services.kinesis.model.DescribeStreamConsumerRequest; +import software.amazon.awssdk.services.kinesis.model.DescribeStreamRequest; +import software.amazon.awssdk.services.kinesis.model.DescribeStreamSummaryRequest; +import software.amazon.awssdk.services.kinesis.model.DisableEnhancedMonitoringRequest; +import software.amazon.awssdk.services.kinesis.model.EnableEnhancedMonitoringRequest; +import software.amazon.awssdk.services.kinesis.model.GetRecordsRequest; +import software.amazon.awssdk.services.kinesis.model.GetShardIteratorRequest; +import software.amazon.awssdk.services.kinesis.model.IncreaseStreamRetentionPeriodRequest; +import software.amazon.awssdk.services.kinesis.model.ListShardsRequest; +import software.amazon.awssdk.services.kinesis.model.ListStreamsRequest; +import software.amazon.awssdk.services.kinesis.model.ListTagsForStreamRequest; +import software.amazon.awssdk.services.kinesis.model.MergeShardsRequest; +import software.amazon.awssdk.services.kinesis.model.PutRecordRequest; +import software.amazon.awssdk.services.kinesis.model.PutRecordsRequest; +import software.amazon.awssdk.services.kinesis.model.RegisterStreamConsumerRequest; +import software.amazon.awssdk.services.kinesis.model.RemoveTagsFromStreamRequest; +import software.amazon.awssdk.services.kinesis.model.SplitShardRequest; +import software.amazon.awssdk.services.kinesis.model.StartStreamEncryptionRequest; +import software.amazon.awssdk.services.kinesis.model.StopStreamEncryptionRequest; +import software.amazon.awssdk.services.kinesis.model.UpdateShardCountRequest; +import software.amazon.awssdk.utils.StringInputStream; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; +import java.util.function.Supplier; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +@RunWith(InstrumentationTestRunner.class) +@InstrumentationTestConfig(includePrefixes = {"software.amazon.awssdk"}, configName = "dt_enabled.yml") +public class DefaultKinesisAsyncClientTest { + public KinesisAsyncClient kinesisAsyncClient; + public HttpExecuteResponse response; + + @Before + public void setup() { + AsyncHttpClient mockHttpClient = new AsyncHttpClient(); + response = mockHttpClient.getResponse(); + kinesisAsyncClient = KinesisAsyncClient.builder() + .httpClient(mockHttpClient) + .credentialsProvider(new CredProvider()) + .region(Region.US_EAST_1) + .build(); + } + + @Test + public void testAddTagsToStream() { + txn(() -> kinesisAsyncClient.addTagsToStream(AddTagsToStreamRequest.builder().build())); + assertKinesisTrace("Kinesis/addTagsToStream", true); + } + + @Test + public void testCreateStream() { + txn(() -> kinesisAsyncClient.createStream(CreateStreamRequest.builder().build())); + assertKinesisTrace("Kinesis/createStream", false); + } + + @Test + public void testDecreaseStreamRetentionPeriod() { + txn(() -> kinesisAsyncClient.decreaseStreamRetentionPeriod(DecreaseStreamRetentionPeriodRequest.builder().build())); + assertKinesisTrace("Kinesis/decreaseStreamRetentionPeriod", false); + } + + @Test + public void testDeleteStream() { + txn(() -> kinesisAsyncClient.deleteStream(DeleteStreamRequest.builder().build())); + assertKinesisTrace("Kinesis/deleteStream", false); + } + + @Test + public void testDeregisterStreamConsumer() { + txn(() -> kinesisAsyncClient.deregisterStreamConsumer(DeregisterStreamConsumerRequest.builder().build())); + assertKinesisTrace("Kinesis/deregisterStreamConsumer", false); + } + + @Test + public void testDescribeLimits() { + txn(() -> kinesisAsyncClient.describeLimits(DescribeLimitsRequest.builder().build())); + assertKinesisTrace("Kinesis/describeLimits", false); + } + + @Test + public void testDescribeStream() { + txn(() -> kinesisAsyncClient.describeStream(DescribeStreamRequest.builder().build())); + assertKinesisTrace("Kinesis/describeStream", false); + } + + @Test + public void testDescribeStreamConsumer() { + txn(() -> kinesisAsyncClient.describeStreamConsumer(DescribeStreamConsumerRequest.builder().build())); + assertKinesisTrace("Kinesis/describeStreamConsumer", false); + } + + @Test + public void testDescribeStreamSummary() { + txn(() -> kinesisAsyncClient.describeStreamSummary(DescribeStreamSummaryRequest.builder().build())); + assertKinesisTrace("Kinesis/describeStreamSummary", false); + } + + @Test + public void DisableEnhancedMonitoring() { + txn(() -> kinesisAsyncClient.disableEnhancedMonitoring(DisableEnhancedMonitoringRequest.builder().build())); + assertKinesisTrace("Kinesis/disableEnhancedMonitoring", false); + } + + @Test + public void testEnableEnhancedMonitoring() { + txn(() -> kinesisAsyncClient.enableEnhancedMonitoring(EnableEnhancedMonitoringRequest.builder().build())); + assertKinesisTrace("Kinesis/enableEnhancedMonitoring", false); + } + + @Test + public void testGetRecords() { + txn(() -> kinesisAsyncClient.getRecords(GetRecordsRequest.builder().build())); + assertKinesisTrace("Kinesis/getRecords", false); + } + + @Test + public void testGetShardIterator() { + txn(() -> kinesisAsyncClient.getShardIterator(GetShardIteratorRequest.builder().build())); + assertKinesisTrace("Kinesis/getShardIterator", false); + } + + @Test + public void testIncreaseStreamRetentionPeriod() { + txn(() -> kinesisAsyncClient.increaseStreamRetentionPeriod(IncreaseStreamRetentionPeriodRequest.builder().build())); + assertKinesisTrace("Kinesis/increaseStreamRetentionPeriod", false); + } + + @Test + public void testListShards() { + txn(() -> kinesisAsyncClient.listShards(ListShardsRequest.builder().build())); + assertKinesisTrace("Kinesis/listShards", false); + } + + @Test + public void testListStreams() { + txn(() -> kinesisAsyncClient.listStreams(ListStreamsRequest.builder().build())); + assertKinesisTrace("Kinesis/listStreams", false); + } + + @Test + public void testListTagsForStream() { + txn(() -> kinesisAsyncClient.listTagsForStream(ListTagsForStreamRequest.builder().build())); + assertKinesisTrace("Kinesis/listTagsForStream", false); + } + + @Test + public void testMergeShards() { + txn(() -> kinesisAsyncClient.mergeShards(MergeShardsRequest.builder().build())); + assertKinesisTrace("Kinesis/mergeShards", false); + } + + @Test + public void testPutRecord() { + txn(() -> kinesisAsyncClient.putRecord(PutRecordRequest.builder().build())); + assertKinesisTrace("Kinesis/putRecord", false); + } + + @Test + public void testPutRecords() { + txn(() -> kinesisAsyncClient.putRecords(PutRecordsRequest.builder().build())); + assertKinesisTrace("Kinesis/putRecords", false); + } + + @Test + public void testRegisterStreamConsumer() { + txn(() -> kinesisAsyncClient.registerStreamConsumer(RegisterStreamConsumerRequest.builder().build())); + assertKinesisTrace("Kinesis/registerStreamConsumer", false); + } + + @Test + public void testRemoveTagsFromStream() { + txn(() -> kinesisAsyncClient.removeTagsFromStream(RemoveTagsFromStreamRequest.builder().build())); + assertKinesisTrace("Kinesis/removeTagsFromStream", false); + } + + @Test + public void testSplitShard() { + txn(() -> kinesisAsyncClient.splitShard(SplitShardRequest.builder().build())); + assertKinesisTrace("Kinesis/splitShard", false); + } + + @Test + public void testStartStreamEncryption() { + txn(() -> kinesisAsyncClient.startStreamEncryption(StartStreamEncryptionRequest.builder().build())); + assertKinesisTrace("Kinesis/startStreamEncryption", false); + } + + @Test + public void testStopStreamEncryption() { + txn(() -> kinesisAsyncClient.stopStreamEncryption(StopStreamEncryptionRequest.builder().build())); + assertKinesisTrace("Kinesis/stopStreamEncryption", false); + } + + @Test + public void testUpdateShardCount() { + txn(() -> kinesisAsyncClient.updateShardCount(UpdateShardCountRequest.builder().build())); + assertKinesisTrace("Kinesis/updateShardCount", false); + } + + @Trace(dispatcher = true) + private void txn(Supplier> supplier) { + supplier.get(); + } + + private void assertKinesisTrace(String traceName, boolean assertSpan) { + Introspector introspector = InstrumentationTestRunner.getIntrospector(); + if (assertSpan) { + // Span events fail to be generated when enough transactions are done in succession + SpanEvent kinesisSpan = introspector.getSpanEvents().stream() + .filter(span -> traceName.equals(span.getName())) + .findFirst().orElse(null); + assertNotNull(kinesisSpan); + assertEquals("aws_kinesis_data_streams", kinesisSpan.getAgentAttributes().get("cloud.platform")); + } + Collection transactionTraces = introspector.getTransactionTracesForTransaction( + "OtherTransaction/Custom/software.amazon.awssdk.services.kinesis.DefaultKinesisAsyncClientTest/txn"); + TransactionTrace transactionTrace = transactionTraces.iterator().next(); + List children = transactionTrace.getInitialTraceSegment().getChildren(); + assertEquals(1, children.size()); + TraceSegment trace = children.get(0); + assertEquals(traceName, trace.getName()); + assertEquals("aws_kinesis_data_streams", trace.getTracerAttributes().get("cloud.platform")); + } + + // This mock SdkAsyncHttpClient allows testing the AWS SDK without making actual HTTP requests. + private static class AsyncHttpClient implements SdkAsyncHttpClient { + private ExecutableHttpRequest executableMock; + private HttpExecuteResponse response; + private SdkHttpFullResponse httpResponse; + + public AsyncHttpClient() { + executableMock = mock(ExecutableHttpRequest.class); + response = mock(HttpExecuteResponse.class, Mockito.RETURNS_DEEP_STUBS); + httpResponse = mock(SdkHttpFullResponse.class, Mockito.RETURNS_DEEP_STUBS); + when(response.httpResponse()).thenReturn(httpResponse); + when(httpResponse.toBuilder().content(any()).build()).thenReturn(httpResponse); + when(httpResponse.isSuccessful()).thenReturn(true); + AbortableInputStream inputStream = AbortableInputStream.create(new StringInputStream("Dont panic")); + when(httpResponse.content()).thenReturn(Optional.of(inputStream)); + try { + when(executableMock.call()).thenReturn(response); + + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + public void close() { + } + + @Override + public CompletableFuture execute(AsyncExecuteRequest asyncExecuteRequest) { + asyncExecuteRequest.responseHandler().onStream(new EmptyPublisher<>()); + return new CompletableFuture<>(); + } + + @Override + public String clientName() { + return "MockHttpClient"; + } + + public HttpExecuteResponse getResponse() { + return response; + } + } + + private static class CredProvider implements AwsCredentialsProvider { + @Override + public AwsCredentials resolveCredentials() { + AwsCredentials credentials = mock(AwsCredentials.class); + when(credentials.accessKeyId()).thenReturn("accessKeyId"); + when(credentials.secretAccessKey()).thenReturn("secretAccessKey"); + return credentials; + } + } +} diff --git a/instrumentation/aws-java-sdk-kinesis-2.0.6/src/test/java/software/amazon/awssdk/services/kinesis/DefaultKinesisClientTest.java b/instrumentation/aws-java-sdk-kinesis-2.0.6/src/test/java/software/amazon/awssdk/services/kinesis/DefaultKinesisClientTest.java new file mode 100644 index 0000000000..d70b4274bd --- /dev/null +++ b/instrumentation/aws-java-sdk-kinesis-2.0.6/src/test/java/software/amazon/awssdk/services/kinesis/DefaultKinesisClientTest.java @@ -0,0 +1,311 @@ +package software.amazon.awssdk.services.kinesis; + +import com.newrelic.agent.introspec.InstrumentationTestConfig; +import com.newrelic.agent.introspec.InstrumentationTestRunner; +import com.newrelic.agent.introspec.Introspector; +import com.newrelic.agent.introspec.SpanEvent; +import com.newrelic.agent.introspec.TraceSegment; +import com.newrelic.agent.introspec.TransactionTrace; +import com.newrelic.api.agent.Trace; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.http.AbortableInputStream; +import software.amazon.awssdk.http.ExecutableHttpRequest; +import software.amazon.awssdk.http.HttpExecuteRequest; +import software.amazon.awssdk.http.HttpExecuteResponse; +import software.amazon.awssdk.http.SdkHttpClient; +import software.amazon.awssdk.http.SdkHttpFullResponse; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.kinesis.model.AddTagsToStreamRequest; +import software.amazon.awssdk.services.kinesis.model.CreateStreamRequest; +import software.amazon.awssdk.services.kinesis.model.DecreaseStreamRetentionPeriodRequest; +import software.amazon.awssdk.services.kinesis.model.DeleteStreamRequest; +import software.amazon.awssdk.services.kinesis.model.DeregisterStreamConsumerRequest; +import software.amazon.awssdk.services.kinesis.model.DescribeLimitsRequest; +import software.amazon.awssdk.services.kinesis.model.DescribeStreamConsumerRequest; +import software.amazon.awssdk.services.kinesis.model.DescribeStreamRequest; +import software.amazon.awssdk.services.kinesis.model.DescribeStreamSummaryRequest; +import software.amazon.awssdk.services.kinesis.model.DisableEnhancedMonitoringRequest; +import software.amazon.awssdk.services.kinesis.model.EnableEnhancedMonitoringRequest; +import software.amazon.awssdk.services.kinesis.model.GetRecordsRequest; +import software.amazon.awssdk.services.kinesis.model.GetShardIteratorRequest; +import software.amazon.awssdk.services.kinesis.model.IncreaseStreamRetentionPeriodRequest; +import software.amazon.awssdk.services.kinesis.model.ListShardsRequest; +import software.amazon.awssdk.services.kinesis.model.ListStreamsRequest; +import software.amazon.awssdk.services.kinesis.model.ListTagsForStreamRequest; +import software.amazon.awssdk.services.kinesis.model.MergeShardsRequest; +import software.amazon.awssdk.services.kinesis.model.PutRecordRequest; +import software.amazon.awssdk.services.kinesis.model.PutRecordsRequest; +import software.amazon.awssdk.services.kinesis.model.RegisterStreamConsumerRequest; +import software.amazon.awssdk.services.kinesis.model.RemoveTagsFromStreamRequest; +import software.amazon.awssdk.services.kinesis.model.SplitShardRequest; +import software.amazon.awssdk.services.kinesis.model.StartStreamEncryptionRequest; +import software.amazon.awssdk.services.kinesis.model.StopStreamEncryptionRequest; +import software.amazon.awssdk.services.kinesis.model.UpdateShardCountRequest; +import software.amazon.awssdk.utils.StringInputStream; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Optional; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +@RunWith(InstrumentationTestRunner.class) +@InstrumentationTestConfig(includePrefixes = {"software.amazon.awssdk"}, configName = "dt_enabled.yml") +public class DefaultKinesisClientTest { + + public KinesisClient kinesisClient; + public HttpExecuteResponse response; + + @Before + public void setup() { + MockHttpClient mockHttpClient = new MockHttpClient(); + response = mockHttpClient.getResponse(); + kinesisClient = KinesisClient.builder() + .httpClient(mockHttpClient) + .credentialsProvider(new CredProvider()) + .region(Region.US_EAST_1) + .build(); + } + + @Test + public void testAddTagsToStream() { + txn(() -> kinesisClient.addTagsToStream(AddTagsToStreamRequest.builder().build())); + assertKinesisTrace("Kinesis/addTagsToStream", true); + } + + @Test + public void testCreateStream() { + txn(() -> kinesisClient.createStream(CreateStreamRequest.builder().build())); + assertKinesisTrace("Kinesis/createStream", false); + } + + @Test + public void testDecreaseStreamRetentionPeriod() { + txn(() -> kinesisClient.decreaseStreamRetentionPeriod(DecreaseStreamRetentionPeriodRequest.builder().build())); + assertKinesisTrace("Kinesis/decreaseStreamRetentionPeriod", false); + } + + @Test + public void testDeleteStream() { + txn(() -> kinesisClient.deleteStream(DeleteStreamRequest.builder().build())); + assertKinesisTrace("Kinesis/deleteStream", false); + } + + @Test + public void testDeregisterStreamConsumer() { + txn(() -> kinesisClient.deregisterStreamConsumer(DeregisterStreamConsumerRequest.builder().build())); + assertKinesisTrace("Kinesis/deregisterStreamConsumer", false); + } + + @Test + public void testDescribeLimits() { + txn(() -> kinesisClient.describeLimits(DescribeLimitsRequest.builder().build())); + assertKinesisTrace("Kinesis/describeLimits", false); + } + + @Test + public void testDescribeStream() { + txn(() -> kinesisClient.describeStream(DescribeStreamRequest.builder().build())); + assertKinesisTrace("Kinesis/describeStream", false); + } + + @Test + public void testDescribeStreamConsumer() { + txn(() -> kinesisClient.describeStreamConsumer(DescribeStreamConsumerRequest.builder().build())); + assertKinesisTrace("Kinesis/describeStreamConsumer", false); + } + + @Test + public void testDescribeStreamSummary() { + txn(() -> kinesisClient.describeStreamSummary(DescribeStreamSummaryRequest.builder().build())); + assertKinesisTrace("Kinesis/describeStreamSummary", false); + } + + @Test + public void DisableEnhancedMonitoring() { + txn(() -> kinesisClient.disableEnhancedMonitoring(DisableEnhancedMonitoringRequest.builder().build())); + assertKinesisTrace("Kinesis/disableEnhancedMonitoring", false); + } + + @Test + public void testEnableEnhancedMonitoring() { + txn(() -> kinesisClient.enableEnhancedMonitoring(EnableEnhancedMonitoringRequest.builder().build())); + assertKinesisTrace("Kinesis/enableEnhancedMonitoring", false); + } + + @Test + public void testGetRecords() { + txn(() -> kinesisClient.getRecords(GetRecordsRequest.builder().build())); + assertKinesisTrace("Kinesis/getRecords", false); + } + + @Test + public void testGetShardIterator() { + txn(() -> kinesisClient.getShardIterator(GetShardIteratorRequest.builder().build())); + assertKinesisTrace("Kinesis/getShardIterator", false); + } + + @Test + public void testIncreaseStreamRetentionPeriod() { + txn(() -> kinesisClient.increaseStreamRetentionPeriod(IncreaseStreamRetentionPeriodRequest.builder().build())); + assertKinesisTrace("Kinesis/increaseStreamRetentionPeriod", false); + } + + @Test + public void testListShards() { + txn(() -> kinesisClient.listShards(ListShardsRequest.builder().build())); + assertKinesisTrace("Kinesis/listShards", false); + } + + @Test + public void testListStreams() { + txn(() -> kinesisClient.listStreams(ListStreamsRequest.builder().build())); + assertKinesisTrace("Kinesis/listStreams", false); + } + + @Test + public void testListTagsForStream() { + txn(() -> kinesisClient.listTagsForStream(ListTagsForStreamRequest.builder().build())); + assertKinesisTrace("Kinesis/listTagsForStream", false); + } + + @Test + public void testMergeShards() { + txn(() -> kinesisClient.mergeShards(MergeShardsRequest.builder().build())); + assertKinesisTrace("Kinesis/mergeShards", false); + } + + @Test + public void testPutRecord() { + txn(() -> kinesisClient.putRecord(PutRecordRequest.builder().build())); + assertKinesisTrace("Kinesis/putRecord", false); + } + + @Test + public void testPutRecords() { + txn(() -> kinesisClient.putRecords(PutRecordsRequest.builder().build())); + assertKinesisTrace("Kinesis/putRecords", false); + } + + @Test + public void testRegisterStreamConsumer() { + txn(() -> kinesisClient.registerStreamConsumer(RegisterStreamConsumerRequest.builder().build())); + assertKinesisTrace("Kinesis/registerStreamConsumer", false); + } + + @Test + public void testRemoveTagsFromStream() { + txn(() -> kinesisClient.removeTagsFromStream(RemoveTagsFromStreamRequest.builder().build())); + assertKinesisTrace("Kinesis/removeTagsFromStream", false); + } + + @Test + public void testSplitShard() { + txn(() -> kinesisClient.splitShard(SplitShardRequest.builder().build())); + assertKinesisTrace("Kinesis/splitShard", false); + } + + @Test + public void testStartStreamEncryption() { + txn(() -> kinesisClient.startStreamEncryption(StartStreamEncryptionRequest.builder().build())); + assertKinesisTrace("Kinesis/startStreamEncryption", false); + } + + @Test + public void testStopStreamEncryption() { + txn(() -> kinesisClient.stopStreamEncryption(StopStreamEncryptionRequest.builder().build())); + assertKinesisTrace("Kinesis/stopStreamEncryption", false); + } + + @Test + public void testUpdateShardCount() { + txn(() -> kinesisClient.updateShardCount(UpdateShardCountRequest.builder().build())); + assertKinesisTrace("Kinesis/updateShardCount", false); + } + + @Trace(dispatcher = true) + private void txn(Runnable runnable) { + runnable.run(); + } + + private void assertKinesisTrace(String traceName, boolean assertSpan) { + Introspector introspector = InstrumentationTestRunner.getIntrospector(); + if (assertSpan) { + // Span events fail to be generated when enough transactions are done in succession + SpanEvent kinesisSpan = introspector.getSpanEvents().stream() + .filter(span -> traceName.equals(span.getName())) + .findFirst().orElse(null); + assertNotNull(kinesisSpan); + assertEquals("aws_kinesis_data_streams", kinesisSpan.getAgentAttributes().get("cloud.platform")); + } + Collection transactionTraces = introspector.getTransactionTracesForTransaction( + "OtherTransaction/Custom/software.amazon.awssdk.services.kinesis.DefaultKinesisClientTest/txn"); + TransactionTrace transactionTrace = transactionTraces.iterator().next(); + List children = transactionTrace.getInitialTraceSegment().getChildren(); + assertEquals(1, children.size()); + TraceSegment trace = children.get(0); + assertEquals(traceName, trace.getName()); + assertEquals("aws_kinesis_data_streams", trace.getTracerAttributes().get("cloud.platform")); + } + + private static class MockHttpClient implements SdkHttpClient { + private final ExecutableHttpRequest executableMock; + private final HttpExecuteResponse response; + private final SdkHttpFullResponse httpResponse; + + public MockHttpClient() { + executableMock = mock(ExecutableHttpRequest.class); + response = mock(HttpExecuteResponse.class, Mockito.RETURNS_DEEP_STUBS); + httpResponse = mock(SdkHttpFullResponse.class, Mockito.RETURNS_DEEP_STUBS); + when(response.httpResponse()).thenReturn(httpResponse); + when(httpResponse.toBuilder().content(any()).build()).thenReturn(httpResponse); + when(httpResponse.isSuccessful()).thenReturn(true); + AbortableInputStream inputStream = AbortableInputStream.create(new StringInputStream("42")); + when(httpResponse.content()).thenReturn(Optional.of(inputStream)); + try { + when(executableMock.call()).thenReturn(response); + + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + public void close() { + } + + @Override + public ExecutableHttpRequest prepareRequest(HttpExecuteRequest httpExecuteRequest) { + return executableMock; + } + + @Override + public String clientName() { + return "MockHttpClient"; + } + + public HttpExecuteResponse getResponse() { + return response; + } + } + + private static class CredProvider implements AwsCredentialsProvider { + @Override + public AwsCredentials resolveCredentials() { + AwsCredentials credentials = mock(AwsCredentials.class); + when(credentials.accessKeyId()).thenReturn("accessKeyId"); + when(credentials.secretAccessKey()).thenReturn("secretAccessKey"); + return credentials; + } + } +} diff --git a/instrumentation/aws-java-sdk-kinesis-2.0.6/src/test/resources/dt_enabled.yml b/instrumentation/aws-java-sdk-kinesis-2.0.6/src/test/resources/dt_enabled.yml new file mode 100644 index 0000000000..53b0968002 --- /dev/null +++ b/instrumentation/aws-java-sdk-kinesis-2.0.6/src/test/resources/dt_enabled.yml @@ -0,0 +1,5 @@ +common: &default_settings + distributed_tracing: + enabled: true + span_events: + enabled: true \ No newline at end of file diff --git a/instrumentation/aws-java-sdk-lambda-1.11.280/build.gradle b/instrumentation/aws-java-sdk-lambda-1.11.280/build.gradle new file mode 100644 index 0000000000..464c45fed6 --- /dev/null +++ b/instrumentation/aws-java-sdk-lambda-1.11.280/build.gradle @@ -0,0 +1,20 @@ +jar { + manifest { attributes 'Implementation-Title': 'com.newrelic.instrumentation.aws-java-sdk-lambda-1.11.280' } +} + +dependencies { + implementation(project(":agent-bridge")) + implementation(project(":agent-bridge-datastore")) + + implementation("com.amazonaws:aws-java-sdk-lambda:1.12.763") +} + +verifyInstrumentation { + // not using passesOnly to decrease the number of artifacts this is tested against + passes 'com.amazonaws:aws-java-sdk-lambda:[1.11.280,)' +} + +site { + title 'AWS Lambda' + type 'Framework' +} diff --git a/instrumentation/aws-java-sdk-lambda-1.11.280/src/main/java/com/agent/instrumentation/awsjavasdk1/services/lambda/FunctionProcessedData.java b/instrumentation/aws-java-sdk-lambda-1.11.280/src/main/java/com/agent/instrumentation/awsjavasdk1/services/lambda/FunctionProcessedData.java new file mode 100644 index 0000000000..a51bdf1f17 --- /dev/null +++ b/instrumentation/aws-java-sdk-lambda-1.11.280/src/main/java/com/agent/instrumentation/awsjavasdk1/services/lambda/FunctionProcessedData.java @@ -0,0 +1,29 @@ +/* + * + * * Copyright 2024 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package com.agent.instrumentation.awsjavasdk1.services.lambda; + +/** + * Function data extracted from the request and config. + */ +class FunctionProcessedData { + private final String functionName; + private final String arn; + + public FunctionProcessedData(String functionName, String arn) { + this.functionName = functionName; + this.arn = arn; + } + + public String getFunctionName() { + return functionName; + } + + public String getArn() { + return arn; + } +} diff --git a/instrumentation/aws-java-sdk-lambda-1.11.280/src/main/java/com/agent/instrumentation/awsjavasdk1/services/lambda/FunctionRawData.java b/instrumentation/aws-java-sdk-lambda-1.11.280/src/main/java/com/agent/instrumentation/awsjavasdk1/services/lambda/FunctionRawData.java new file mode 100644 index 0000000000..604a3a0673 --- /dev/null +++ b/instrumentation/aws-java-sdk-lambda-1.11.280/src/main/java/com/agent/instrumentation/awsjavasdk1/services/lambda/FunctionRawData.java @@ -0,0 +1,55 @@ +/* + * + * * Copyright 2024 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package com.agent.instrumentation.awsjavasdk1.services.lambda; + +import java.util.Objects; + +/** + * Data necessary to calculate the ARN. This class is used as the key for the ARN cache. + */ +public class FunctionRawData { + private final String functionRef; + private final String qualifier; + private final String region; + + public FunctionRawData(String functionRef, String qualifier, String region) { + this.functionRef = functionRef; + this.qualifier = qualifier; + this.region = region; + } + + public String getFunctionRef() { + return functionRef; + } + + public String getQualifier() { + return qualifier; + } + + public String getRegion() { + return region; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof FunctionRawData)) { + return false; + } + FunctionRawData that = (FunctionRawData) o; + return Objects.equals(functionRef, that.functionRef) && Objects.equals(qualifier, that.qualifier) && + Objects.equals(region, that.region); + } + + @Override + public int hashCode() { + return Objects.hash(functionRef, qualifier, region); + } +} diff --git a/instrumentation/aws-java-sdk-lambda-1.11.280/src/main/java/com/agent/instrumentation/awsjavasdk1/services/lambda/LambdaUtil.java b/instrumentation/aws-java-sdk-lambda-1.11.280/src/main/java/com/agent/instrumentation/awsjavasdk1/services/lambda/LambdaUtil.java new file mode 100644 index 0000000000..44cd29c058 --- /dev/null +++ b/instrumentation/aws-java-sdk-lambda-1.11.280/src/main/java/com/agent/instrumentation/awsjavasdk1/services/lambda/LambdaUtil.java @@ -0,0 +1,106 @@ +/* + * + * * Copyright 2024 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package com.agent.instrumentation.awsjavasdk1.services.lambda; + +import com.newrelic.agent.bridge.AgentBridge; +import com.newrelic.api.agent.CloudParameters; + +import java.util.function.Function; + +public class LambdaUtil { + + private static final String PLATFORM = "aws_lambda"; + private static final String NULL_ARN = ""; + private static final String PREFIX = "arn:aws:lambda:"; + private static final Function CACHE = + AgentBridge.collectionFactory.createAccessTimeBasedCache(3600, 8, LambdaUtil::processData); + + public static CloudParameters getCloudParameters(FunctionRawData functionRawData) { + FunctionProcessedData data = CACHE.apply(functionRawData); + String arn = data.getArn(); + CloudParameters.ResourceIdParameter cloudParameters = CloudParameters.provider(PLATFORM); + // the cache will always return the NULL_ARN when it is not possible to calculate the ARN + // so saving a few cycles by using != instead of equals. + if (arn != NULL_ARN) { + cloudParameters.resourceId(arn); + } + + return cloudParameters.build(); + } + + /** + *

+ * Calculates the simple function name and ARN given + * the function name, qualifier, and possibly region (provided by config). + *

+ *

+ * Aliases are returned as part of the ARN, but versions are removed + * because they would make it harder to link to Lambdas/Alias entities. + *

+ *

+ * If qualifiers are provided both in the function ref, and as a qualifier, the one in function ref "wins". + * If they differ, the LambdaClient will throw an exception. + *

+ * + * @return a FunctionProcessedData object with the function name and ARN. + * If any of its values cannot be calculated, it will be the NULL_ARN. + */ + // Visible for testing + static FunctionProcessedData processData(FunctionRawData data) { + String functionRef = data.getFunctionRef(); + + String[] parts = functionRef.split(":"); + + String functionName = NULL_ARN; + String arn = NULL_ARN; + + if (parts.length == 1) { + // function ref is only function name + // does not have the account id, so cannot assemble the ARN. + functionName = functionRef; + } else if (parts.length == 2) { + // function ref is only function name with alias/version + // does not have the account id, so cannot assemble the ARN. + functionName = parts[0]; + } else if (parts.length == 3) { + // partial ARN: {account-id}:function:{function-name} + functionName = parts[2]; + String qualifier = data.getQualifier(); + if (qualifier == null) { + arn = PREFIX + data.getRegion() + ":" + functionRef; + } else { + arn = PREFIX + data.getRegion() + ":" + functionRef + ":" + qualifier; + } + } else if (parts.length == 4) { + // partial ARN with qualifier: {account-id}:function:{function-name}:{qualifier} + functionName = parts[2]; + arn = PREFIX + data.getRegion() + ":" + functionRef; + } else if (parts.length == 7) { + // full ARN: arn:aws:lambda:{region}:{account-id}:function:{function-name} + functionName = parts[6]; + String qualifier = data.getQualifier(); + if (qualifier == null) { + arn = functionRef; + } else { + arn = functionRef + ":" + qualifier; + } + } else if (parts.length == 8) { + // full ARN with qualifier: arn:aws:lambda:{region}:{account-id}:function:{function-name}:{qualifier} + functionName = parts[6]; + arn = functionRef; + } + // reference should be invalid if the number of parts do not match any of the expected cases + + return new FunctionProcessedData(functionName, arn); + } + + + public static String getSimpleFunctionName(FunctionRawData functionRawData) { + return CACHE.apply(functionRawData).getFunctionName(); + } +} diff --git a/instrumentation/aws-java-sdk-lambda-1.11.280/src/main/java/com/amazonaws/services/lambda/AWSLambdaAsyncClient_Instrumentation.java b/instrumentation/aws-java-sdk-lambda-1.11.280/src/main/java/com/amazonaws/services/lambda/AWSLambdaAsyncClient_Instrumentation.java new file mode 100644 index 0000000000..047bec36f6 --- /dev/null +++ b/instrumentation/aws-java-sdk-lambda-1.11.280/src/main/java/com/amazonaws/services/lambda/AWSLambdaAsyncClient_Instrumentation.java @@ -0,0 +1,71 @@ +/* + * + * * Copyright 2024 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package com.amazonaws.services.lambda; + +import com.agent.instrumentation.awsjavasdk1.services.lambda.FunctionRawData; +import com.agent.instrumentation.awsjavasdk1.services.lambda.LambdaUtil; +import com.amazonaws.handlers.AsyncHandler; +import com.amazonaws.services.lambda.model.InvokeRequest; +import com.amazonaws.services.lambda.model.InvokeResult; +import com.newrelic.api.agent.CloudParameters; +import com.newrelic.api.agent.NewRelic; +import com.newrelic.api.agent.Segment; +import com.newrelic.api.agent.weaver.MatchType; +import com.newrelic.api.agent.weaver.Weave; +import com.newrelic.api.agent.weaver.Weaver; + +import java.util.concurrent.Future; + +@Weave(type = MatchType.ExactClass, originalName = "com.amazonaws.services.lambda.AWSLambdaAsyncClient") +public abstract class AWSLambdaAsyncClient_Instrumentation { + + protected abstract String getSigningRegion(); + + public Future invokeAsync(final InvokeRequest request, AsyncHandler asyncHandler) { + FunctionRawData functionRawData = new FunctionRawData(request.getFunctionName(), request.getQualifier(), getSigningRegion()); + CloudParameters cloudParameters = LambdaUtil.getCloudParameters(functionRawData); + String functionName = LambdaUtil.getSimpleFunctionName(functionRawData); + Segment segment = NewRelic.getAgent().getTransaction().startSegment("Lambda", "invoke/" + functionName); + + try { + segment.reportAsExternal(cloudParameters); + asyncHandler = new SegmentEndingAsyncHandler(asyncHandler, segment); + return Weaver.callOriginal(); + } catch (Throwable t) { + segment.end(); + throw t; + } + } + + private static class SegmentEndingAsyncHandler implements AsyncHandler { + private final AsyncHandler originalHandler; + private final Segment segment; + + public SegmentEndingAsyncHandler( + AsyncHandler asyncHandler, Segment segment) { + this.segment = segment; + this.originalHandler = asyncHandler; + } + + @Override + public void onError(Exception exception) { + segment.end(); + if (originalHandler != null) { + originalHandler.onError(exception); + } + } + + @Override + public void onSuccess(InvokeRequest request, InvokeResult invokeResult) { + segment.end(); + if (originalHandler != null) { + originalHandler.onSuccess(request, invokeResult); + } + } + } +} diff --git a/instrumentation/aws-java-sdk-lambda-1.11.280/src/main/java/com/amazonaws/services/lambda/AWSLambdaClient_Instrumentation.java b/instrumentation/aws-java-sdk-lambda-1.11.280/src/main/java/com/amazonaws/services/lambda/AWSLambdaClient_Instrumentation.java new file mode 100644 index 0000000000..23245e49d4 --- /dev/null +++ b/instrumentation/aws-java-sdk-lambda-1.11.280/src/main/java/com/amazonaws/services/lambda/AWSLambdaClient_Instrumentation.java @@ -0,0 +1,39 @@ +/* + * + * * Copyright 2024 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package com.amazonaws.services.lambda; + +import com.agent.instrumentation.awsjavasdk1.services.lambda.FunctionRawData; +import com.agent.instrumentation.awsjavasdk1.services.lambda.LambdaUtil; +import com.amazonaws.services.lambda.model.InvokeRequest; +import com.amazonaws.services.lambda.model.InvokeResult; +import com.newrelic.api.agent.CloudParameters; +import com.newrelic.api.agent.NewRelic; +import com.newrelic.api.agent.Trace; +import com.newrelic.api.agent.TracedMethod; +import com.newrelic.api.agent.weaver.CatchAndLog; +import com.newrelic.api.agent.weaver.MatchType; +import com.newrelic.api.agent.weaver.Weave; +import com.newrelic.api.agent.weaver.Weaver; + +@Weave(type = MatchType.ExactClass, originalName = "com.amazonaws.services.lambda.AWSLambdaClient") +public abstract class AWSLambdaClient_Instrumentation { + + abstract protected String getSigningRegion(); + + @Trace(leaf = true) + public InvokeResult invoke(InvokeRequest invokeRequest) { + FunctionRawData functionRawData = new FunctionRawData(invokeRequest.getFunctionName(), invokeRequest.getQualifier(), getSigningRegion()); + CloudParameters cloudParameters = LambdaUtil.getCloudParameters(functionRawData); + TracedMethod tracedMethod = NewRelic.getAgent().getTracedMethod(); + tracedMethod.reportAsExternal(cloudParameters); + tracedMethod.setMetricName("Lambda", "invoke", LambdaUtil.getSimpleFunctionName(functionRawData)); + return Weaver.callOriginal(); + } + +} + diff --git a/instrumentation/aws-java-sdk-lambda-1.11.280/src/test/java/com/agent/instrumentation/awsjavasdk1/services/lambda/LambdaUtilTest.java b/instrumentation/aws-java-sdk-lambda-1.11.280/src/test/java/com/agent/instrumentation/awsjavasdk1/services/lambda/LambdaUtilTest.java new file mode 100644 index 0000000000..d3a8302f95 --- /dev/null +++ b/instrumentation/aws-java-sdk-lambda-1.11.280/src/test/java/com/agent/instrumentation/awsjavasdk1/services/lambda/LambdaUtilTest.java @@ -0,0 +1,164 @@ +/* + * + * * Copyright 2024 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package com.agent.instrumentation.awsjavasdk1.services.lambda; + +import com.amazonaws.regions.Regions; +import com.amazonaws.services.lambda.model.InvokeRequest; +import com.newrelic.api.agent.CloudParameters; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; + +public class LambdaUtilTest { + + @Test + public void testGetCloudParamFunctionName() { + FunctionRawData functionRawData = new FunctionRawData("my-function", null, getRegion()); + CloudParameters cloudParameters = LambdaUtil.getCloudParameters(functionRawData); + assertNotNull(cloudParameters); + assertEquals("aws_lambda", cloudParameters.getPlatform()); + assertNull(cloudParameters.getResourceId()); + } + + @Test + public void testGetCloudParamPartialArn() { + FunctionRawData functionRawData = new FunctionRawData("123456789012:function:my-function", null, getRegion()); + CloudParameters cloudParameters = LambdaUtil.getCloudParameters(functionRawData); + assertNotNull(cloudParameters); + assertEquals("aws_lambda", cloudParameters.getPlatform()); + assertEquals("arn:aws:lambda:us-east-1:123456789012:function:my-function", cloudParameters.getResourceId()); + } + + @Test + public void testGetCloudParamArnQualifier() { + FunctionRawData functionRawData = new FunctionRawData("arn:aws:lambda:us-east-1:123456789012:function:my-function", "alias", getRegion()); + CloudParameters cloudParameters = LambdaUtil.getCloudParameters(functionRawData); + assertNotNull(cloudParameters); + assertEquals("aws_lambda", cloudParameters.getPlatform()); + assertEquals("arn:aws:lambda:us-east-1:123456789012:function:my-function:alias", cloudParameters.getResourceId()); + } + + @Test + public void testGetArnFunctionName() { + FunctionProcessedData data = LambdaUtil.processData(new FunctionRawData("my-function", null, getRegion())); + assertEquals("", data.getArn()); + assertEquals("my-function", data.getFunctionName()); + } + + @Test + public void testGetArnFunctionNameWithAlias() { + FunctionProcessedData data = LambdaUtil.processData(new FunctionRawData("my-function:alias", null, getRegion())); + assertEquals("", data.getArn()); + assertEquals("my-function", data.getFunctionName()); + } + + @Test + public void testGetArnFunctionNameWithVersion() { + FunctionProcessedData data = LambdaUtil.processData(new FunctionRawData("my-function:123", null, getRegion())); + assertEquals("", data.getArn()); + assertEquals("my-function", data.getFunctionName()); + } + + @Test + public void testGetArnFunctionNameAndAliasQualifier() { + FunctionProcessedData data = LambdaUtil.processData(new FunctionRawData("my-function", "alias", getRegion())); + assertEquals("", data.getArn()); + assertEquals("my-function", data.getFunctionName()); + } + + @Test + public void testGetArnFunctionNameAndVersionQualifier() { + FunctionProcessedData data = LambdaUtil.processData(new FunctionRawData("my-function", "123", getRegion())); + assertEquals("", data.getArn()); + assertEquals("my-function", data.getFunctionName()); + } + + @Test + public void testGetArnPartialArn() { + FunctionProcessedData data = LambdaUtil.processData(new FunctionRawData("123456789012:function:my-function", null, getRegion())); + assertEquals("arn:aws:lambda:us-east-1:123456789012:function:my-function", data.getArn()); + assertEquals("my-function", data.getFunctionName()); + } + + @Test + public void testGetArnPartialArnWithAlias() { + FunctionProcessedData data = LambdaUtil.processData(new FunctionRawData("123456789012:function:my-function:alias", null, getRegion())); + assertEquals("arn:aws:lambda:us-east-1:123456789012:function:my-function:alias", data.getArn()); + assertEquals("my-function", data.getFunctionName()); + } + + @Test + public void testGetArnPartialArnWithVersion() { + FunctionProcessedData data = LambdaUtil.processData(new FunctionRawData("123456789012:function:my-function:123", null, getRegion())); + assertEquals("arn:aws:lambda:us-east-1:123456789012:function:my-function:123", data.getArn()); + assertEquals("my-function", data.getFunctionName()); + } + + @Test + public void testGetArnPartialArnAndAliasQualifier() { + FunctionProcessedData data = LambdaUtil.processData(new FunctionRawData("123456789012:function:my-function", "alias", getRegion())); + assertEquals("arn:aws:lambda:us-east-1:123456789012:function:my-function:alias", data.getArn()); + assertEquals("my-function", data.getFunctionName()); + } + + @Test + public void testGetArnPartialArnAndVersionQualifier() { + FunctionProcessedData data = LambdaUtil.processData(new FunctionRawData("123456789012:function:my-function", "123", getRegion())); + assertEquals("arn:aws:lambda:us-east-1:123456789012:function:my-function:123", data.getArn()); + assertEquals("my-function", data.getFunctionName()); + } + + @Test + public void testGetArnFullArn() { + FunctionProcessedData data = LambdaUtil.processData(new FunctionRawData("arn:aws:lambda:us-east-1:123456789012:function:my-function", null, getRegion())); + assertEquals("arn:aws:lambda:us-east-1:123456789012:function:my-function", data.getArn()); + assertEquals("my-function", data.getFunctionName()); + } + + @Test + public void testGetArnFullArnWithAlias() { + FunctionProcessedData data = LambdaUtil.processData(new FunctionRawData("arn:aws:lambda:us-east-1:123456789012:function:my-function:alias", null, getRegion())); + assertEquals("arn:aws:lambda:us-east-1:123456789012:function:my-function:alias", data.getArn()); + assertEquals("my-function", data.getFunctionName()); + } + + @Test + public void testGetArnFullArnWithVersion() { + FunctionProcessedData data = LambdaUtil.processData(new FunctionRawData("arn:aws:lambda:us-east-1:123456789012:function:my-function:123", null, getRegion())); + assertEquals("arn:aws:lambda:us-east-1:123456789012:function:my-function:123", data.getArn()); + assertEquals("my-function", data.getFunctionName()); + } + + @Test + public void testGetArnFullArnAndAliasQualifier() { + FunctionProcessedData data = LambdaUtil.processData(new FunctionRawData("arn:aws:lambda:us-east-1:123456789012:function:my-function", "alias", getRegion())); + assertEquals("arn:aws:lambda:us-east-1:123456789012:function:my-function:alias", data.getArn()); + assertEquals("my-function", data.getFunctionName()); + } + + @Test + public void testGetArnFullArnAndVersionQualifier() { + FunctionProcessedData data = LambdaUtil.processData(new FunctionRawData("arn:aws:lambda:us-east-1:123456789012:function:my-function", "123", getRegion())); + assertEquals("arn:aws:lambda:us-east-1:123456789012:function:my-function:123", data.getArn()); + assertEquals("my-function", data.getFunctionName()); + } + + @Test + public void testGetArnDifferentRegion() { + FunctionProcessedData data = LambdaUtil.processData(new FunctionRawData("arn:aws:lambda:us-west-2:123456789012:function:my-function", null, getRegion())); + assertEquals("arn:aws:lambda:us-west-2:123456789012:function:my-function", data.getArn()); + assertEquals("my-function", data.getFunctionName()); + } + + public String getRegion() { + return Regions.US_EAST_1.getName(); + } + +} \ No newline at end of file diff --git a/instrumentation/aws-java-sdk-lambda-1.11.280/src/test/java/com/amazonaws/services/lambda/DefaultLambdaAsyncClient_InstrumentationTest.java b/instrumentation/aws-java-sdk-lambda-1.11.280/src/test/java/com/amazonaws/services/lambda/DefaultLambdaAsyncClient_InstrumentationTest.java new file mode 100644 index 0000000000..8c8ae40b8d --- /dev/null +++ b/instrumentation/aws-java-sdk-lambda-1.11.280/src/test/java/com/amazonaws/services/lambda/DefaultLambdaAsyncClient_InstrumentationTest.java @@ -0,0 +1,102 @@ +/* + * + * * Copyright 2024 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package com.amazonaws.services.lambda; + +import com.amazonaws.auth.AWSCredentials; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.client.builder.AwsClientBuilder; +import com.amazonaws.services.lambda.model.InvokeRequest; +import com.newrelic.agent.introspec.InstrumentationTestConfig; +import com.newrelic.agent.introspec.InstrumentationTestRunner; +import com.newrelic.agent.introspec.Introspector; +import com.newrelic.agent.introspec.SpanEvent; +import com.newrelic.agent.introspec.TraceSegment; +import com.newrelic.agent.introspec.TransactionTrace; +import com.newrelic.agent.introspec.internal.HttpServerRule; +import com.newrelic.api.agent.Trace; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.runner.RunWith; + +import java.net.URISyntaxException; +import java.util.Collection; +import java.util.List; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +@RunWith(InstrumentationTestRunner.class) +@InstrumentationTestConfig(includePrefixes = {"com.amazonaws"}, configName = "dt_enabled.yml") +public class DefaultLambdaAsyncClient_InstrumentationTest { + + public AWSLambdaAsync lambdaClient; + + @Rule + public HttpServerRule server = new HttpServerRule(); + + @Before + public void setup() throws URISyntaxException { + AwsClientBuilder.EndpointConfiguration endpoint = new AwsClientBuilder.EndpointConfiguration(server.getEndPoint().toString(), "us-east-1"); + lambdaClient = AWSLambdaAsyncClient.asyncBuilder() + .withEndpointConfiguration(endpoint) + .withCredentials(new CredProvider()) + .build(); + } + + @Test + public void testInvokeArn() { + Introspector introspector = InstrumentationTestRunner.getIntrospector(); + txn(); + SpanEvent lambdaSpan = introspector.getSpanEvents().stream() + .filter(span -> span.getName().equals("Lambda/invoke/my-function")) + .findFirst().orElse(null); + assertNotNull(lambdaSpan); + assertEquals("arn:aws:lambda:us-east-1:123456789012:function:my-function", lambdaSpan.getAgentAttributes().get("cloud.resource_id")); + + Collection transactionTraces = introspector.getTransactionTracesForTransaction( + "OtherTransaction/Custom/com.amazonaws.services.lambda.DefaultLambdaAsyncClient_InstrumentationTest/txn"); + assertEquals(1,transactionTraces.size()); + TransactionTrace transactionTrace = transactionTraces.iterator().next(); + List children = transactionTrace.getInitialTraceSegment().getChildren(); + assertEquals(1, children.size()); + TraceSegment trace = children.get(0); + assertEquals("Lambda/invoke/my-function", trace.getName()); + assertEquals("arn:aws:lambda:us-east-1:123456789012:function:my-function", trace.getTracerAttributes().get("cloud.resource_id")); + } + + @Trace(dispatcher = true) + public void txn() { + InvokeRequest request = new InvokeRequest(); + request.setFunctionName("arn:aws:lambda:us-east-1:123456789012:function:my-function"); + lambdaClient.invoke(request); + } + + private static class CredProvider implements AWSCredentialsProvider { + @Override + public AWSCredentials getCredentials() { + return new AWSCredentials() { + + @Override + public String getAWSAccessKeyId() { + return "accessKeyId"; + } + + @Override + public String getAWSSecretKey() { + return "secretKey"; + } + }; + } + + @Override + public void refresh() { + + } + } +} \ No newline at end of file diff --git a/instrumentation/aws-java-sdk-lambda-1.11.280/src/test/java/com/amazonaws/services/lambda/DefaultLambdaClient_InstrumentationTest.java b/instrumentation/aws-java-sdk-lambda-1.11.280/src/test/java/com/amazonaws/services/lambda/DefaultLambdaClient_InstrumentationTest.java new file mode 100644 index 0000000000..02584fb431 --- /dev/null +++ b/instrumentation/aws-java-sdk-lambda-1.11.280/src/test/java/com/amazonaws/services/lambda/DefaultLambdaClient_InstrumentationTest.java @@ -0,0 +1,96 @@ +/* + * + * * Copyright 2024 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package com.amazonaws.services.lambda; + +import com.amazonaws.auth.AWSCredentials; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.client.builder.AwsClientBuilder; +import com.amazonaws.services.lambda.model.InvokeRequest; +import com.newrelic.agent.introspec.InstrumentationTestConfig; +import com.newrelic.agent.introspec.InstrumentationTestRunner; +import com.newrelic.agent.introspec.Introspector; +import com.newrelic.agent.introspec.SpanEvent; +import com.newrelic.agent.introspec.TraceSegment; +import com.newrelic.agent.introspec.TransactionTrace; +import com.newrelic.agent.introspec.internal.HttpServerRule; +import com.newrelic.api.agent.Trace; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.runner.RunWith; + +import java.net.URISyntaxException; +import java.util.Collection; +import java.util.List; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +@RunWith(InstrumentationTestRunner.class) +@InstrumentationTestConfig(includePrefixes = {"com.amazonaws"}, configName = "dt_enabled.yml") +public class DefaultLambdaClient_InstrumentationTest { + + @Rule + public HttpServerRule server = new HttpServerRule(); + private AWSLambda lambdaClient; + + @Before + public void setup() throws URISyntaxException { + AwsClientBuilder.EndpointConfiguration endpoint = new AwsClientBuilder.EndpointConfiguration(server.getEndPoint().toString(), "us-east-1"); + lambdaClient = AWSLambdaClient.builder() + .withCredentials(new CredProvider()) + .withEndpointConfiguration(endpoint) + .build(); + } + + @Test + public void testInvokeArn() { + Introspector introspector = InstrumentationTestRunner.getIntrospector(); + txn(); + SpanEvent lambdaSpan = introspector.getSpanEvents().stream() + .filter(span -> span.getName().equals("Lambda/invoke/my-function")) + .findFirst().orElse(null); + assertNotNull(lambdaSpan); + assertEquals("arn:aws:lambda:us-east-1:123456789012:function:my-function", lambdaSpan.getAgentAttributes().get("cloud.resource_id")); + + Collection transactionTraces = introspector.getTransactionTracesForTransaction( + "OtherTransaction/Custom/com.amazonaws.services.lambda.DefaultLambdaClient_InstrumentationTest/txn"); + assertEquals(1,transactionTraces.size()); + TransactionTrace transactionTrace = transactionTraces.iterator().next(); + List children = transactionTrace.getInitialTraceSegment().getChildren(); + assertEquals(1, children.size()); + TraceSegment trace = children.get(0); + assertEquals("Lambda/invoke/my-function", trace.getName()); + assertEquals("arn:aws:lambda:us-east-1:123456789012:function:my-function", trace.getTracerAttributes().get("cloud.resource_id")); + } + + @Trace(dispatcher = true) + public void txn() { + InvokeRequest request = new InvokeRequest(); + request.withFunctionName("arn:aws:lambda:us-east-1:123456789012:function:my-function"); + lambdaClient.invoke(request); + } + + + private static class CredProvider implements AWSCredentialsProvider { + @Override + public AWSCredentials getCredentials() { + AWSCredentials credentials = mock(AWSCredentials.class); + when(credentials.getAWSAccessKeyId()).thenReturn("accessKeyId"); + when(credentials.getAWSSecretKey()).thenReturn("secretAccessKey"); + return credentials; + } + + @Override + public void refresh() { + + } + } +} \ No newline at end of file diff --git a/instrumentation/aws-java-sdk-lambda-1.11.280/src/test/resources/dt_enabled.yml b/instrumentation/aws-java-sdk-lambda-1.11.280/src/test/resources/dt_enabled.yml new file mode 100644 index 0000000000..53b0968002 --- /dev/null +++ b/instrumentation/aws-java-sdk-lambda-1.11.280/src/test/resources/dt_enabled.yml @@ -0,0 +1,5 @@ +common: &default_settings + distributed_tracing: + enabled: true + span_events: + enabled: true \ No newline at end of file diff --git a/instrumentation/aws-java-sdk-lambda-2.1/build.gradle b/instrumentation/aws-java-sdk-lambda-2.1/build.gradle new file mode 100644 index 0000000000..11dd84e6ed --- /dev/null +++ b/instrumentation/aws-java-sdk-lambda-2.1/build.gradle @@ -0,0 +1,23 @@ +jar { + manifest { attributes 'Implementation-Title': 'com.newrelic.instrumentation.aws-java-sdk-lambda-2.1' } +} + + + +dependencies { + implementation(project(":agent-bridge")) + implementation(project(":agent-bridge-datastore")) + + implementation("software.amazon.awssdk:lambda:2.10.14") +} + +verifyInstrumentation { + passes 'software.amazon.awssdk:lambda:[2.1.0,)' + excludeRegex ".*preview.*" + exclude "software.amazon.awssdk:lambda:2.17.200" +} + +site { + title 'AWS Lambda' + type 'Framework' +} diff --git a/instrumentation/aws-java-sdk-lambda-2.1/src/main/java/com/agent/instrumentation/awsjavasdk2/services/lambda/FunctionProcessedData.java b/instrumentation/aws-java-sdk-lambda-2.1/src/main/java/com/agent/instrumentation/awsjavasdk2/services/lambda/FunctionProcessedData.java new file mode 100644 index 0000000000..88413afe2d --- /dev/null +++ b/instrumentation/aws-java-sdk-lambda-2.1/src/main/java/com/agent/instrumentation/awsjavasdk2/services/lambda/FunctionProcessedData.java @@ -0,0 +1,29 @@ +/* + * + * * Copyright 2024 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package com.agent.instrumentation.awsjavasdk2.services.lambda; + +/** + * Function data extracted from the request and config. + */ +class FunctionProcessedData { + private final String functionName; + private final String arn; + + public FunctionProcessedData(String functionName, String arn) { + this.functionName = functionName; + this.arn = arn; + } + + public String getFunctionName() { + return functionName; + } + + public String getArn() { + return arn; + } +} diff --git a/instrumentation/aws-java-sdk-lambda-2.1/src/main/java/com/agent/instrumentation/awsjavasdk2/services/lambda/FunctionRawData.java b/instrumentation/aws-java-sdk-lambda-2.1/src/main/java/com/agent/instrumentation/awsjavasdk2/services/lambda/FunctionRawData.java new file mode 100644 index 0000000000..dcc7be01c5 --- /dev/null +++ b/instrumentation/aws-java-sdk-lambda-2.1/src/main/java/com/agent/instrumentation/awsjavasdk2/services/lambda/FunctionRawData.java @@ -0,0 +1,61 @@ +/* + * + * * Copyright 2024 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package com.agent.instrumentation.awsjavasdk2.services.lambda; + +import software.amazon.awssdk.awscore.client.config.AwsClientOption; +import software.amazon.awssdk.core.client.config.SdkClientConfiguration; + +import java.util.Objects; + +/** + * Data necessary to calculate the ARN. This class is used as the key for the ARN cache. + */ +public class FunctionRawData { + private final String functionRef; + private final String qualifier; + // the code only cares about the region, but the config is stored + // to prevent unnecessary calls to get the region + private final SdkClientConfiguration config; + + public FunctionRawData(String functionRef, String qualifier, SdkClientConfiguration config) { + this.functionRef = functionRef; + this.qualifier = qualifier; + this.config = config; + } + + public String getFunctionRef() { + return functionRef; + } + + public String getQualifier() { + return qualifier; + } + + public String getRegion() { + return config.option(AwsClientOption.AWS_REGION).toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof FunctionRawData)) { + return false; + } + FunctionRawData that = (FunctionRawData) o; + return Objects.equals(functionRef, that.functionRef) && Objects.equals(qualifier, that.qualifier) && + // config uses Object.equals, so should be fast + Objects.equals(config, that.config); + } + + @Override + public int hashCode() { + return Objects.hash(functionRef, qualifier, config); + } +} diff --git a/instrumentation/aws-java-sdk-lambda-2.1/src/main/java/com/agent/instrumentation/awsjavasdk2/services/lambda/LambdaUtil.java b/instrumentation/aws-java-sdk-lambda-2.1/src/main/java/com/agent/instrumentation/awsjavasdk2/services/lambda/LambdaUtil.java new file mode 100644 index 0000000000..2fdd592fc8 --- /dev/null +++ b/instrumentation/aws-java-sdk-lambda-2.1/src/main/java/com/agent/instrumentation/awsjavasdk2/services/lambda/LambdaUtil.java @@ -0,0 +1,105 @@ +/* + * + * * Copyright 2024 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package com.agent.instrumentation.awsjavasdk2.services.lambda; + +import com.newrelic.agent.bridge.AgentBridge; +import com.newrelic.api.agent.CloudParameters; + +import java.util.function.Function; + +public class LambdaUtil { + + private static final String PLATFORM = "aws_lambda"; + private static final String NULL_ARN = ""; + private static final String PREFIX = "arn:aws:lambda:"; + private static final Function CACHE = + AgentBridge.collectionFactory.createAccessTimeBasedCache(3600, 8, LambdaUtil::processData); + + public static CloudParameters getCloudParameters(FunctionRawData functionRawData) { + FunctionProcessedData functionData = CACHE.apply(functionRawData); + String arn = functionData.getArn(); + CloudParameters.ResourceIdParameter cloudParameters = CloudParameters.provider(PLATFORM); + // the cache will always return the NULL_ARN when it is not possible to calculate the ARN + // so saving a few cycles by using != instead of equals. + if (arn != NULL_ARN) { + cloudParameters.resourceId(arn); + } + + return cloudParameters.build(); + } + + /** + *

+ * Calculates the simple function name and ARN given + * the function name, qualifier, and possibly region (provided by config). + *

+ *

+ * Aliases are returned as part of the ARN, but versions are removed + * because they would make it harder to link to Lambdas/Alias entities. + *

+ *

+ * If qualifiers are provided both in the function ref, and as a qualifier, the one in function ref "wins". + * If they differ, the LambdaClient will throw an exception. + *

+ * + * @return a FunctionProcessedData object with the function name and ARN. + * If any of its values cannot be calculated, it will be the NULL_ARN. + */ + // Visible for testing + static FunctionProcessedData processData(FunctionRawData data) { + String functionRef = data.getFunctionRef(); + + String[] parts = functionRef.split(":"); + + String functionName = NULL_ARN; + String arn = NULL_ARN; + + if (parts.length == 1) { + // function ref is only function name + // does not have the account id, so cannot assemble the ARN. + functionName = functionRef; + } else if (parts.length == 2) { + // function ref is only function name with alias/version + // does not have the account id, so cannot assemble the ARN. + functionName = parts[0]; + } else if (parts.length == 3) { + // partial ARN: {account-id}:function:{function-name} + functionName = parts[2]; + String qualifier = data.getQualifier(); + if (qualifier == null) { + arn = PREFIX + data.getRegion() + ":" + functionRef; + } else { + arn = PREFIX + data.getRegion() + ":" + functionRef + ":" + qualifier; + } + } else if (parts.length == 4) { + // partial ARN with qualifier: {account-id}:function:{function-name}:{qualifier} + functionName = parts[2]; + arn = PREFIX + data.getRegion() + ":" + functionRef; + } else if (parts.length == 7) { + // full ARN: arn:aws:lambda:{region}:{account-id}:function:{function-name} + functionName = parts[6]; + String qualifier = data.getQualifier(); + if (qualifier == null) { + arn = functionRef; + } else { + arn = functionRef + ":" + qualifier; + } + } else if (parts.length == 8) { + // full ARN with qualifier: arn:aws:lambda:{region}:{account-id}:function:{function-name}:{qualifier} + functionName = parts[6]; + arn = functionRef; + } + // reference should be invalid if the number of parts do not match any of the expected cases + + return new FunctionProcessedData(functionName, arn); + } + + public static String getSimpleFunctionName(FunctionRawData functionRawData) { + return CACHE.apply(functionRawData).getFunctionName(); + } +} diff --git a/instrumentation/aws-java-sdk-lambda-2.1/src/main/java/software/amazon/awssdk/services/lambda/DefaultLambdaAsyncClient_Instrumentation.java b/instrumentation/aws-java-sdk-lambda-2.1/src/main/java/software/amazon/awssdk/services/lambda/DefaultLambdaAsyncClient_Instrumentation.java new file mode 100644 index 0000000000..84378b169b --- /dev/null +++ b/instrumentation/aws-java-sdk-lambda-2.1/src/main/java/software/amazon/awssdk/services/lambda/DefaultLambdaAsyncClient_Instrumentation.java @@ -0,0 +1,62 @@ +/* + * + * * Copyright 2024 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package software.amazon.awssdk.services.lambda; + +import com.agent.instrumentation.awsjavasdk2.services.lambda.FunctionRawData; +import com.agent.instrumentation.awsjavasdk2.services.lambda.LambdaUtil; +import com.newrelic.api.agent.CloudParameters; +import com.newrelic.api.agent.NewRelic; +import com.newrelic.api.agent.Segment; +import com.newrelic.api.agent.Trace; +import com.newrelic.api.agent.weaver.MatchType; +import com.newrelic.api.agent.weaver.Weave; +import com.newrelic.api.agent.weaver.Weaver; +import software.amazon.awssdk.core.client.config.SdkClientConfiguration; +import software.amazon.awssdk.services.lambda.model.InvokeRequest; +import software.amazon.awssdk.services.lambda.model.InvokeResponse; + +import java.util.concurrent.CompletableFuture; +import java.util.function.BiConsumer; +import java.util.logging.Level; + +@Weave(type = MatchType.ExactClass, originalName = "software.amazon.awssdk.services.lambda.DefaultLambdaAsyncClient") +final class DefaultLambdaAsyncClient_Instrumentation { + + private final SdkClientConfiguration clientConfiguration = Weaver.callOriginal(); + + public CompletableFuture invoke(InvokeRequest invokeRequest) { + FunctionRawData functionRawData = new FunctionRawData(invokeRequest.functionName(), invokeRequest.qualifier(), clientConfiguration); + CloudParameters cloudParameters = LambdaUtil.getCloudParameters(functionRawData); + String functionName = LambdaUtil.getSimpleFunctionName(functionRawData); + Segment segment = NewRelic.getAgent().getTransaction().startSegment("Lambda", "invoke/" + functionName); + + try { + segment.reportAsExternal(cloudParameters); + CompletableFuture invokeResponseCompletableFuture = Weaver.callOriginal(); + + return invokeResponseCompletableFuture.whenComplete(new SegmentFinisher(segment)); + } catch (Throwable t) { + + segment.end(); + throw t; + } + } + + private static class SegmentFinisher implements BiConsumer { + private final Segment segment; + + public SegmentFinisher(Segment segment) { + this.segment = segment; + } + + @Override + public void accept(InvokeResponse invokeResponse, Throwable throwable) { + segment.end(); + } + } +} diff --git a/instrumentation/aws-java-sdk-lambda-2.1/src/main/java/software/amazon/awssdk/services/lambda/DefaultLambdaClient_Instrumentation.java b/instrumentation/aws-java-sdk-lambda-2.1/src/main/java/software/amazon/awssdk/services/lambda/DefaultLambdaClient_Instrumentation.java new file mode 100644 index 0000000000..44b9c80c34 --- /dev/null +++ b/instrumentation/aws-java-sdk-lambda-2.1/src/main/java/software/amazon/awssdk/services/lambda/DefaultLambdaClient_Instrumentation.java @@ -0,0 +1,38 @@ +/* + * + * * Copyright 2024 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package software.amazon.awssdk.services.lambda; + +import com.agent.instrumentation.awsjavasdk2.services.lambda.FunctionRawData; +import com.agent.instrumentation.awsjavasdk2.services.lambda.LambdaUtil; +import com.newrelic.api.agent.CloudParameters; +import com.newrelic.api.agent.NewRelic; +import com.newrelic.api.agent.Trace; +import com.newrelic.api.agent.TracedMethod; +import com.newrelic.api.agent.weaver.MatchType; +import com.newrelic.api.agent.weaver.Weave; +import com.newrelic.api.agent.weaver.Weaver; +import software.amazon.awssdk.core.client.config.SdkClientConfiguration; +import software.amazon.awssdk.services.lambda.model.InvokeRequest; +import software.amazon.awssdk.services.lambda.model.InvokeResponse; + +@Weave(type = MatchType.ExactClass, originalName = "software.amazon.awssdk.services.lambda.DefaultLambdaClient") +final class DefaultLambdaClient_Instrumentation { + + private final SdkClientConfiguration clientConfiguration = Weaver.callOriginal(); + + @Trace(leaf = true) + public InvokeResponse invoke(InvokeRequest invokeRequest) { + FunctionRawData functionRawData = new FunctionRawData(invokeRequest.functionName(), invokeRequest.qualifier(), clientConfiguration); + CloudParameters cloudParameters = LambdaUtil.getCloudParameters(functionRawData); + TracedMethod tracedMethod = NewRelic.getAgent().getTracedMethod(); + tracedMethod.reportAsExternal(cloudParameters); + tracedMethod.setMetricName("Lambda", "invoke", LambdaUtil.getSimpleFunctionName(functionRawData)); + return Weaver.callOriginal(); + } +} + diff --git a/instrumentation/aws-java-sdk-lambda-2.1/src/test/java/com/agent/instrumentation/awsjavasdk2/services/lambda/LambdaUtilTest.java b/instrumentation/aws-java-sdk-lambda-2.1/src/test/java/com/agent/instrumentation/awsjavasdk2/services/lambda/LambdaUtilTest.java new file mode 100644 index 0000000000..82b767b55e --- /dev/null +++ b/instrumentation/aws-java-sdk-lambda-2.1/src/test/java/com/agent/instrumentation/awsjavasdk2/services/lambda/LambdaUtilTest.java @@ -0,0 +1,168 @@ +/* + * + * * Copyright 2024 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package com.agent.instrumentation.awsjavasdk2.services.lambda; + +import com.newrelic.api.agent.CloudParameters; +import org.junit.Test; +import software.amazon.awssdk.awscore.client.config.AwsClientOption; +import software.amazon.awssdk.core.client.config.SdkClientConfiguration; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.lambda.model.InvokeRequest; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; + +public class LambdaUtilTest { + + @Test + public void testGetCloudParamFunctionName() { + FunctionRawData functionRawData = new FunctionRawData("my-function", null, getConfig()); + CloudParameters cloudParameters = LambdaUtil.getCloudParameters(functionRawData); + assertNotNull(cloudParameters); + assertEquals("aws_lambda", cloudParameters.getPlatform()); + assertNull(cloudParameters.getResourceId()); + } + + @Test + public void testGetCloudParamPartialArn() { + FunctionRawData functionRawData = new FunctionRawData("123456789012:function:my-function", null, getConfig()); + CloudParameters cloudParameters = LambdaUtil.getCloudParameters(functionRawData); + assertNotNull(cloudParameters); + assertEquals("aws_lambda", cloudParameters.getPlatform()); + assertEquals("arn:aws:lambda:us-east-1:123456789012:function:my-function", cloudParameters.getResourceId()); + } + + @Test + public void testGetCloudParamArnQualifier() { + FunctionRawData functionRawData = new FunctionRawData("arn:aws:lambda:us-east-1:123456789012:function:my-function", "alias", getConfig()); + CloudParameters cloudParameters = LambdaUtil.getCloudParameters(functionRawData); + assertNotNull(cloudParameters); + assertEquals("aws_lambda", cloudParameters.getPlatform()); + assertEquals("arn:aws:lambda:us-east-1:123456789012:function:my-function:alias", cloudParameters.getResourceId()); + } + + @Test + public void testGetArnFunctionName() { + FunctionProcessedData functionProcessedData = LambdaUtil.processData(new FunctionRawData("my-function", null, getConfig())); + assertEquals("", functionProcessedData.getArn()); + assertEquals("my-function", functionProcessedData.getFunctionName()); + } + + @Test + public void testGetArnFunctionNameWithAlias() { + FunctionProcessedData functionProcessedData = LambdaUtil.processData(new FunctionRawData("my-function:alias", null, getConfig())); + assertEquals("", functionProcessedData.getArn()); + assertEquals("my-function", functionProcessedData.getFunctionName()); + } + + @Test + public void testGetArnFunctionNameWithVersion() { + FunctionProcessedData functionProcessedData = LambdaUtil.processData(new FunctionRawData("my-function:123", null, getConfig())); + assertEquals("", functionProcessedData.getArn()); + assertEquals("my-function", functionProcessedData.getFunctionName()); + } + + @Test + public void testGetArnFunctionNameAndAliasQualifier() { + FunctionProcessedData functionProcessedData = LambdaUtil.processData(new FunctionRawData("my-function", "alias", getConfig())); + assertEquals("", functionProcessedData.getArn()); + assertEquals("my-function", functionProcessedData.getFunctionName()); + } + + @Test + public void testGetArnFunctionNameAndVersionQualifier() { + FunctionProcessedData functionProcessedData = LambdaUtil.processData(new FunctionRawData("my-function", "123", getConfig())); + assertEquals("", functionProcessedData.getArn()); + assertEquals("my-function", functionProcessedData.getFunctionName()); + } + + @Test + public void testGetArnPartialArn() { + FunctionProcessedData functionProcessedData = LambdaUtil.processData(new FunctionRawData("123456789012:function:my-function", null, getConfig())); + assertEquals("arn:aws:lambda:us-east-1:123456789012:function:my-function", functionProcessedData.getArn()); + assertEquals("my-function", functionProcessedData.getFunctionName()); + } + + @Test + public void testGetArnPartialArnWithAlias() { + FunctionProcessedData functionProcessedData = LambdaUtil.processData(new FunctionRawData("123456789012:function:my-function:alias", null, getConfig())); + assertEquals("arn:aws:lambda:us-east-1:123456789012:function:my-function:alias", functionProcessedData.getArn()); + assertEquals("my-function", functionProcessedData.getFunctionName()); + } + + @Test + public void testGetArnPartialArnWithVersion() { + FunctionProcessedData functionProcessedData = LambdaUtil.processData(new FunctionRawData("123456789012:function:my-function:123", null, getConfig())); + assertEquals("arn:aws:lambda:us-east-1:123456789012:function:my-function:123", functionProcessedData.getArn()); + assertEquals("my-function", functionProcessedData.getFunctionName()); + } + + @Test + public void testGetArnPartialArnAndAliasQualifier() { + FunctionProcessedData functionProcessedData = LambdaUtil.processData(new FunctionRawData("123456789012:function:my-function", "alias", getConfig())); + assertEquals("arn:aws:lambda:us-east-1:123456789012:function:my-function:alias", functionProcessedData.getArn()); + assertEquals("my-function", functionProcessedData.getFunctionName()); + } + + @Test + public void testGetArnPartialArnAndVersionQualifier() { + FunctionProcessedData functionProcessedData = LambdaUtil.processData(new FunctionRawData("123456789012:function:my-function", "123", getConfig())); + assertEquals("arn:aws:lambda:us-east-1:123456789012:function:my-function:123", functionProcessedData.getArn()); + assertEquals("my-function", functionProcessedData.getFunctionName()); + } + + @Test + public void testGetArnFullArn() { + FunctionProcessedData functionProcessedData = LambdaUtil.processData(new FunctionRawData("arn:aws:lambda:us-east-1:123456789012:function:my-function", null, getConfig())); + assertEquals("arn:aws:lambda:us-east-1:123456789012:function:my-function", functionProcessedData.getArn()); + assertEquals("my-function", functionProcessedData.getFunctionName()); + } + + @Test + public void testGetArnFullArnWithAlias() { + FunctionProcessedData functionProcessedData = LambdaUtil.processData(new FunctionRawData("arn:aws:lambda:us-east-1:123456789012:function:my-function:alias", null, getConfig())); + assertEquals("arn:aws:lambda:us-east-1:123456789012:function:my-function:alias", functionProcessedData.getArn()); + assertEquals("my-function", functionProcessedData.getFunctionName()); + } + + @Test + public void testGetArnFullArnWithVersion() { + FunctionProcessedData functionProcessedData = LambdaUtil.processData(new FunctionRawData("arn:aws:lambda:us-east-1:123456789012:function:my-function:123", null, getConfig())); + assertEquals("arn:aws:lambda:us-east-1:123456789012:function:my-function:123", functionProcessedData.getArn()); + assertEquals("my-function", functionProcessedData.getFunctionName()); + } + + @Test + public void testGetArnFullArnAndAliasQualifier() { + FunctionProcessedData functionProcessedData = LambdaUtil.processData(new FunctionRawData("arn:aws:lambda:us-east-1:123456789012:function:my-function", "alias", getConfig())); + assertEquals("arn:aws:lambda:us-east-1:123456789012:function:my-function:alias", functionProcessedData.getArn()); + assertEquals("my-function", functionProcessedData.getFunctionName()); + } + + @Test + public void testGetArnFullArnAndVersionQualifier() { + FunctionProcessedData functionProcessedData = LambdaUtil.processData(new FunctionRawData("arn:aws:lambda:us-east-1:123456789012:function:my-function", "123", getConfig())); + assertEquals("arn:aws:lambda:us-east-1:123456789012:function:my-function:123", functionProcessedData.getArn()); + assertEquals("my-function", functionProcessedData.getFunctionName()); + } + + @Test + public void testGetArnDifferentRegion() { + FunctionProcessedData functionProcessedData = LambdaUtil.processData(new FunctionRawData("arn:aws:lambda:us-west-2:123456789012:function:my-function", null, getConfig())); + assertEquals("arn:aws:lambda:us-west-2:123456789012:function:my-function", functionProcessedData.getArn()); + assertEquals("my-function", functionProcessedData.getFunctionName()); + } + + private SdkClientConfiguration getConfig() { + SdkClientConfiguration config = SdkClientConfiguration.builder() + .option(AwsClientOption.AWS_REGION, Region.US_EAST_1) + .build(); + return config; + } +} \ No newline at end of file diff --git a/instrumentation/aws-java-sdk-lambda-2.1/src/test/java/software/amazon/awssdk/services/lambda/DefaultLambdaAsyncClient_InstrumentationTest.java b/instrumentation/aws-java-sdk-lambda-2.1/src/test/java/software/amazon/awssdk/services/lambda/DefaultLambdaAsyncClient_InstrumentationTest.java new file mode 100644 index 0000000000..e6674ef0d6 --- /dev/null +++ b/instrumentation/aws-java-sdk-lambda-2.1/src/test/java/software/amazon/awssdk/services/lambda/DefaultLambdaAsyncClient_InstrumentationTest.java @@ -0,0 +1,147 @@ +/* + * + * * Copyright 2024 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package software.amazon.awssdk.services.lambda; + +import com.newrelic.agent.introspec.InstrumentationTestConfig; +import com.newrelic.agent.introspec.InstrumentationTestRunner; +import com.newrelic.agent.introspec.Introspector; +import com.newrelic.agent.introspec.SpanEvent; +import com.newrelic.agent.introspec.TraceSegment; +import com.newrelic.agent.introspec.TransactionTrace; +import com.newrelic.api.agent.Trace; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.core.async.EmptyPublisher; +import software.amazon.awssdk.http.AbortableInputStream; +import software.amazon.awssdk.http.ExecutableHttpRequest; +import software.amazon.awssdk.http.HttpExecuteRequest; +import software.amazon.awssdk.http.HttpExecuteResponse; +import software.amazon.awssdk.http.SdkHttpFullResponse; +import software.amazon.awssdk.http.async.AsyncExecuteRequest; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.lambda.model.InvokeRequest; +import software.amazon.awssdk.utils.StringInputStream; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +@RunWith(InstrumentationTestRunner.class) +@InstrumentationTestConfig(includePrefixes = {"software.amazon.awssdk.services.lambda"}, configName = "dt_enabled.yml") +public class DefaultLambdaAsyncClient_InstrumentationTest { + + public LambdaAsyncClient lambdaClient; + public HttpExecuteResponse response; + + @Before + public void setup() { + AsyncHttpClient asyncHttpClient = new AsyncHttpClient(); + response = asyncHttpClient.getResponse(); + lambdaClient = LambdaAsyncClient.builder() + .httpClient(asyncHttpClient) + .credentialsProvider(new CredProvider()) + .region(Region.US_EAST_1) + .build(); + } + + @Test + public void testInvokeArn() { + Introspector introspector = InstrumentationTestRunner.getIntrospector(); + txn(); + SpanEvent lambdaSpan = introspector.getSpanEvents().stream() + .filter(span -> span.getName().equals("Lambda/invoke/my-function")) + .findFirst().orElse(null); + assertNotNull(lambdaSpan); + assertEquals("arn:aws:lambda:us-east-1:123456789012:function:my-function", lambdaSpan.getAgentAttributes().get("cloud.resource_id")); + + Collection transactionTraces = introspector.getTransactionTracesForTransaction( + "OtherTransaction/Custom/software.amazon.awssdk.services.lambda.DefaultLambdaAsyncClient_InstrumentationTest/txn"); + assertEquals(1,transactionTraces.size()); + TransactionTrace transactionTrace = transactionTraces.iterator().next(); + List children = transactionTrace.getInitialTraceSegment().getChildren(); + assertEquals(1, children.size()); + TraceSegment trace = children.get(0); + assertEquals("Lambda/invoke/my-function", trace.getName()); + assertEquals("arn:aws:lambda:us-east-1:123456789012:function:my-function", trace.getTracerAttributes().get("cloud.resource_id")); + } + + @Trace(dispatcher = true) + public void txn() { + InvokeRequest request = InvokeRequest.builder() + .functionName("arn:aws:lambda:us-east-1:123456789012:function:my-function") + .build(); + lambdaClient.invoke(request); + } + + + // This mock SdkAsyncHttpClient allows testing the AWS SDK without making actual HTTP requests. + private static class AsyncHttpClient implements SdkAsyncHttpClient { + private ExecutableHttpRequest executableMock; + private HttpExecuteResponse response; + private SdkHttpFullResponse httpResponse; + + public AsyncHttpClient() { + executableMock = mock(ExecutableHttpRequest.class); + response = mock(HttpExecuteResponse.class, Mockito.RETURNS_DEEP_STUBS); + httpResponse = mock(SdkHttpFullResponse.class, Mockito.RETURNS_DEEP_STUBS); + when(response.httpResponse()).thenReturn(httpResponse); + when(httpResponse.toBuilder().content(any()).build()).thenReturn(httpResponse); + when(httpResponse.isSuccessful()).thenReturn(true); + AbortableInputStream inputStream = AbortableInputStream.create(new StringInputStream("Dont panic")); + when(httpResponse.content()).thenReturn(Optional.of(inputStream)); + try { + when(executableMock.call()).thenReturn(response); + + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + public void close() { + } + + @Override + public CompletableFuture execute(AsyncExecuteRequest asyncExecuteRequest) { + asyncExecuteRequest.responseHandler().onStream(new EmptyPublisher<>()); + return new CompletableFuture<>(); + } + + @Override + public String clientName() { + return "MockHttpClient"; + } + + public HttpExecuteResponse getResponse() { + return response; + } + } + + private static class CredProvider implements AwsCredentialsProvider { + @Override + public AwsCredentials resolveCredentials() { + AwsCredentials credentials = mock(AwsCredentials.class); + when(credentials.accessKeyId()).thenReturn("accessKeyId"); + when(credentials.secretAccessKey()).thenReturn("secretAccessKey"); + return credentials; + } + } +} \ No newline at end of file diff --git a/instrumentation/aws-java-sdk-lambda-2.1/src/test/java/software/amazon/awssdk/services/lambda/DefaultLambdaClient_InstrumentationTest.java b/instrumentation/aws-java-sdk-lambda-2.1/src/test/java/software/amazon/awssdk/services/lambda/DefaultLambdaClient_InstrumentationTest.java new file mode 100644 index 0000000000..3db97f6345 --- /dev/null +++ b/instrumentation/aws-java-sdk-lambda-2.1/src/test/java/software/amazon/awssdk/services/lambda/DefaultLambdaClient_InstrumentationTest.java @@ -0,0 +1,145 @@ +/* + * + * * Copyright 2024 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package software.amazon.awssdk.services.lambda; + +import com.newrelic.agent.introspec.InstrumentationTestConfig; +import com.newrelic.agent.introspec.InstrumentationTestRunner; +import com.newrelic.agent.introspec.Introspector; +import com.newrelic.agent.introspec.SpanEvent; +import com.newrelic.agent.introspec.TraceSegment; +import com.newrelic.agent.introspec.TransactionTrace; +import com.newrelic.api.agent.Trace; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.http.AbortableInputStream; +import software.amazon.awssdk.http.ExecutableHttpRequest; +import software.amazon.awssdk.http.HttpExecuteRequest; +import software.amazon.awssdk.http.HttpExecuteResponse; +import software.amazon.awssdk.http.SdkHttpClient; +import software.amazon.awssdk.http.SdkHttpFullResponse; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.lambda.model.InvokeRequest; +import software.amazon.awssdk.utils.StringInputStream; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Optional; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +@RunWith(InstrumentationTestRunner.class) +@InstrumentationTestConfig(includePrefixes = {"software.amazon.awssdk.services.lambda"}, configName = "dt_enabled.yml") +public class DefaultLambdaClient_InstrumentationTest { + + public LambdaClient lambdaClient; + public HttpExecuteResponse response; + + @Before + public void setup() { + MockHttpClient mockHttpClient = new MockHttpClient(); + response = mockHttpClient.getResponse(); + lambdaClient = LambdaClient.builder() + .httpClient(mockHttpClient) + .credentialsProvider(new CredProvider()) + .region(Region.US_EAST_1) + .build(); + } + + @Test + public void testInvokeArn() { + Introspector introspector = InstrumentationTestRunner.getIntrospector(); + txn(); + SpanEvent lambdaSpan = introspector.getSpanEvents().stream() + .filter(span -> span.getName().equals("Lambda/invoke/my-function")) + .findFirst().orElse(null); + assertNotNull(lambdaSpan); + assertEquals("arn:aws:lambda:us-east-1:123456789012:function:my-function", lambdaSpan.getAgentAttributes().get("cloud.resource_id")); + + Collection transactionTraces = introspector.getTransactionTracesForTransaction( + "OtherTransaction/Custom/software.amazon.awssdk.services.lambda.DefaultLambdaClient_InstrumentationTest/txn"); + assertEquals(1,transactionTraces.size()); + TransactionTrace transactionTrace = transactionTraces.iterator().next(); + List children = transactionTrace.getInitialTraceSegment().getChildren(); + assertEquals(1, children.size()); + TraceSegment trace = children.get(0); + assertEquals("Lambda/invoke/my-function", trace.getName()); + assertEquals("arn:aws:lambda:us-east-1:123456789012:function:my-function", trace.getTracerAttributes().get("cloud.resource_id")); + } + + @Trace(dispatcher = true) + public void txn() { + InvokeRequest request = InvokeRequest.builder() + .functionName("arn:aws:lambda:us-east-1:123456789012:function:my-function") + .build(); + lambdaClient.invoke(request); + } + + + // This mock SdkHttpClient allows testing the AWS SDK without making actual HTTP requests. + private static class MockHttpClient implements SdkHttpClient { + private ExecutableHttpRequest executableMock; + private HttpExecuteResponse response; + private SdkHttpFullResponse httpResponse; + + public MockHttpClient() { + executableMock = mock(ExecutableHttpRequest.class); + response = mock(HttpExecuteResponse.class, Mockito.RETURNS_DEEP_STUBS); + httpResponse = mock(SdkHttpFullResponse.class, Mockito.RETURNS_DEEP_STUBS); + when(response.httpResponse()).thenReturn(httpResponse); + when(httpResponse.toBuilder().content(any()).build()).thenReturn(httpResponse); + when(httpResponse.isSuccessful()).thenReturn(true); + AbortableInputStream inputStream = AbortableInputStream.create(new StringInputStream("42")); + when(httpResponse.content()).thenReturn(Optional.of(inputStream)); + try { + when(executableMock.call()).thenReturn(response); + + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + public void close() { + } + + @Override + public ExecutableHttpRequest prepareRequest(HttpExecuteRequest httpExecuteRequest) { + return executableMock; + } + + @Override + public String clientName() { + return "MockHttpClient"; + } + + public HttpExecuteResponse getResponse() { + return response; + } + } + + private static class CredProvider implements AwsCredentialsProvider { + @Override + public AwsCredentials resolveCredentials() { + AwsCredentials credentials = mock(AwsCredentials.class); + when(credentials.accessKeyId()).thenReturn("accessKeyId"); + when(credentials.secretAccessKey()).thenReturn("secretAccessKey"); + return credentials; + } + } +} \ No newline at end of file diff --git a/instrumentation/aws-java-sdk-lambda-2.1/src/test/resources/dt_enabled.yml b/instrumentation/aws-java-sdk-lambda-2.1/src/test/resources/dt_enabled.yml new file mode 100644 index 0000000000..53b0968002 --- /dev/null +++ b/instrumentation/aws-java-sdk-lambda-2.1/src/test/resources/dt_enabled.yml @@ -0,0 +1,5 @@ +common: &default_settings + distributed_tracing: + enabled: true + span_events: + enabled: true \ No newline at end of file diff --git a/instrumentation/flyway-core-8.0.0/README.md b/instrumentation/flyway-core-8.0.0/README.md new file mode 100644 index 0000000000..9d3f21efaf --- /dev/null +++ b/instrumentation/flyway-core-8.0.0/README.md @@ -0,0 +1,18 @@ +# flyway-core-8.0.0 + +This instrumentation module will migration report events to New Relic during Flyway migrations initiated via code, including when running +properly configured Spring/Spring Boot applications. + +The following Flyway migration events are reported: +- Event.AFTER_EACH_MIGRATE +- Event.AFTER_EACH_MIGRATE_ERROR +- Event.AFTER_EACH_UNDO +- Event.AFTER_EACH_UNDO_ERROR + +The custom event type name is `FlywayMigration`, with the following attributes: +- migrationSuccess: `true` if the migration was successful +- migrationFilePhysicalLocation: Path/filename of the migration script +- migrationChecksum: Calculated checksum of the migration +- migrationVersion: Version portion of the migration script filename +- migrationScriptName: Name portion of the migration script filename +- migrationEvent: The underlying Flyway migration event \ No newline at end of file diff --git a/instrumentation/flyway-core-8.0.0/build.gradle b/instrumentation/flyway-core-8.0.0/build.gradle new file mode 100644 index 0000000000..cd4be42c6b --- /dev/null +++ b/instrumentation/flyway-core-8.0.0/build.gradle @@ -0,0 +1,19 @@ + +dependencies { + implementation(project(":agent-bridge")) + implementation("org.flywaydb.enterprise:flyway-core:8.0.0") +} + +jar { + manifest { attributes 'Implementation-Title': 'com.newrelic.instrumentation.flyway-core-8.0.0' } +} + +verifyInstrumentation { + excludeRegex '.*.beta[0-9]' + passes 'org.flywaydb.enterprise:flyway-core:[8.0.0,)' +} + +site { + title 'Flyway' + type 'Datastore' +} \ No newline at end of file diff --git a/instrumentation/flyway-core-8.0.0/src/main/java/com/nr/agent/instrumentation/FlywayUtils.java b/instrumentation/flyway-core-8.0.0/src/main/java/com/nr/agent/instrumentation/FlywayUtils.java new file mode 100644 index 0000000000..6a9f806cde --- /dev/null +++ b/instrumentation/flyway-core-8.0.0/src/main/java/com/nr/agent/instrumentation/FlywayUtils.java @@ -0,0 +1,55 @@ +/* + * + * * Copyright 2024 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ +package com.nr.agent.instrumentation; + +import com.newrelic.api.agent.NewRelic; +import org.flywaydb.core.api.MigrationInfo; +import org.flywaydb.core.api.callback.Event; + +import java.util.EnumMap; +import java.util.HashMap; +import java.util.Map; + +public class FlywayUtils { + private static final String EVENT_NAME = "FlywayMigration"; + public static final String ATTR_SUCCESS = "migrationSuccess"; + public static final String ATTR_PHYSICAL_LOCATION = "migrationFilePhysicalLocation"; + public static final String ATTR_CHECKSUM = "migrationChecksum"; + public static final String ATTR_VERSION = "migrationVersion"; + public static final String ATTR_SCRIPT = "migrationScriptName"; + public static final String FLYWAY_EVENT = "migrationEvent"; + + // Map of Flyway events we're interested in with a boolean value that denotes a + // successful or failed migration step + private static final EnumMap TARGET_EVENTS = new EnumMap<>(Event.class); + static { + TARGET_EVENTS.put(Event.AFTER_EACH_MIGRATE, true); + TARGET_EVENTS.put(Event.AFTER_EACH_MIGRATE_ERROR, false); + TARGET_EVENTS.put(Event.AFTER_EACH_UNDO, true); + TARGET_EVENTS.put(Event.AFTER_EACH_UNDO_ERROR, false); + } + + public static boolean isTargetEvent(Event event) { + return TARGET_EVENTS.containsKey(event); + } + + public static void submitFlywayEvent(Event event, MigrationInfo migrationInfo) { + Map attributes = new HashMap<>(); + attributes.put(ATTR_SUCCESS, isSuccessfulEvent(event).toString()); + attributes.put(ATTR_PHYSICAL_LOCATION, migrationInfo.getPhysicalLocation()); + attributes.put(ATTR_CHECKSUM, migrationInfo.getChecksum().toString()); + attributes.put(ATTR_VERSION, migrationInfo.getVersion().getVersion()); + attributes.put(ATTR_SCRIPT, migrationInfo.getScript()); + attributes.put(FLYWAY_EVENT, event.toString()); + NewRelic.getAgent().getInsights().recordCustomEvent(EVENT_NAME, attributes); + } + + private static Boolean isSuccessfulEvent(Event event) { + return TARGET_EVENTS.get(event); + } + +} diff --git a/instrumentation/flyway-core-8.0.0/src/main/java/org/flywaydb/core/internal/callback/DefaultCallbackExecutor_Instrumentation.java b/instrumentation/flyway-core-8.0.0/src/main/java/org/flywaydb/core/internal/callback/DefaultCallbackExecutor_Instrumentation.java new file mode 100644 index 0000000000..395739e8e3 --- /dev/null +++ b/instrumentation/flyway-core-8.0.0/src/main/java/org/flywaydb/core/internal/callback/DefaultCallbackExecutor_Instrumentation.java @@ -0,0 +1,34 @@ +/* + * + * * Copyright 2024 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ +package org.flywaydb.core.internal.callback; + +import com.newrelic.api.agent.NewRelic; +import com.newrelic.api.agent.weaver.MatchType; +import com.newrelic.api.agent.weaver.Weave; +import com.newrelic.api.agent.weaver.Weaver; +import com.nr.agent.instrumentation.FlywayUtils; +import org.flywaydb.core.api.MigrationInfo; +import org.flywaydb.core.api.callback.Event; + +import java.util.logging.Level; + +@Weave(type = MatchType.ExactClass, originalName = "org.flywaydb.core.internal.callback.DefaultCallbackExecutor") +public class DefaultCallbackExecutor_Instrumentation { + + private MigrationInfo migrationInfo; + + public void onEachMigrateOrUndoEvent(Event event) { + if (FlywayUtils.isTargetEvent(event)) { + if (NewRelic.getAgent().getLogger().isLoggable(Level.FINEST)) { + NewRelic.getAgent().getLogger().log(Level.FINEST, "Adding custom Flyway migration event: {0}", event); + } + FlywayUtils.submitFlywayEvent(event, migrationInfo); + } + + Weaver.callOriginal(); + } +} diff --git a/instrumentation/flyway-core-8.0.0/src/test/java/com/nr/agent/instrumentation/FlywayUtilsTest.java b/instrumentation/flyway-core-8.0.0/src/test/java/com/nr/agent/instrumentation/FlywayUtilsTest.java new file mode 100644 index 0000000000..01af608d84 --- /dev/null +++ b/instrumentation/flyway-core-8.0.0/src/test/java/com/nr/agent/instrumentation/FlywayUtilsTest.java @@ -0,0 +1,26 @@ +/* + * + * * Copyright 2020 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ +package com.nr.agent.instrumentation; + +import org.flywaydb.core.api.callback.Event; +import org.junit.Test; + +import static org.junit.Assert.*; + +public class FlywayUtilsTest { + @Test + public void isTargetEvent_returnsTrue_forTargetedEvent() { + assertTrue(FlywayUtils.isTargetEvent(Event.AFTER_EACH_MIGRATE)); + assertTrue(FlywayUtils.isTargetEvent(Event.AFTER_EACH_UNDO)); + } + + @Test + public void isTargetEvent_returnsFalse_forNonTargetedEvent() { + assertFalse(FlywayUtils.isTargetEvent(Event.AFTER_CLEAN)); + assertFalse(FlywayUtils.isTargetEvent(Event.AFTER_CLEAN_ERROR)); + } +} diff --git a/instrumentation/glassfish-jul-extension-7/README.md b/instrumentation/glassfish-jul-extension-7/README.md new file mode 100644 index 0000000000..84fd4dfe15 --- /dev/null +++ b/instrumentation/glassfish-jul-extension-7/README.md @@ -0,0 +1,5 @@ +# glassfish-jul-extension-7 + +Glassfish uses JUL as its default logger but the `GlassFishLogger` extends `java.util.logging.Logger` in a way that prevents the `java.logging-jdk8` instrumentation from applying. + +Essentially, when using the `glassfish-jul-extension` logging library, the `java.logging-jdk8` instrumentation will work for local decorating but fail to apply for the log forwarding and log metrics functionality. This instrumentation module provides the missing functionality when using the `glassfish-jul-extension` logging library by weaving `org.glassfish.main.jul.GlassFishLogger` to forward logs and generate log metrics. diff --git a/instrumentation/glassfish-jul-extension-7/build.gradle b/instrumentation/glassfish-jul-extension-7/build.gradle new file mode 100644 index 0000000000..095ac02d23 --- /dev/null +++ b/instrumentation/glassfish-jul-extension-7/build.gradle @@ -0,0 +1,26 @@ +dependencies { + implementation(project(":agent-bridge")) + // Only supports Java 11+ + implementation 'org.glassfish.main:glassfish-jul-extension:7.0.17' +} + +jar { + manifest { attributes 'Implementation-Title': 'com.newrelic.instrumentation.glassfish-jul-extension-7' } +} + +verifyInstrumentation { + passesOnly 'org.glassfish.main:glassfish-jul-extension:[7.0.0-M10,)' + excludeRegex 'org.glassfish.main:glassfish-jul-extension:.*(-M).*' +} + +java { + toolchain { + languageVersion.set(JavaLanguageVersion.of(11)) + } +} + +site { + title 'Glassfish JUL Extension' + type 'Other' + versionOverride '[7.0.0,)' +} diff --git a/instrumentation/glassfish-jul-extension-7/src/main/java/com/nr/instrumentation/glassfish/jul/AgentUtil.java b/instrumentation/glassfish-jul-extension-7/src/main/java/com/nr/instrumentation/glassfish/jul/AgentUtil.java new file mode 100644 index 0000000000..068709b1e2 --- /dev/null +++ b/instrumentation/glassfish-jul-extension-7/src/main/java/com/nr/instrumentation/glassfish/jul/AgentUtil.java @@ -0,0 +1,108 @@ +/* + * + * * Copyright 2024 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package com.nr.instrumentation.glassfish.jul; + +import com.newrelic.agent.bridge.AgentBridge; +import com.newrelic.agent.bridge.logging.LogAttributeKey; + +import java.util.HashMap; +import java.util.Map; +import java.util.logging.Level; +import java.util.logging.LogRecord; + +import static com.newrelic.agent.bridge.logging.AppLoggingUtils.DEFAULT_NUM_OF_LOG_EVENT_ATTRIBUTES; +import static com.newrelic.agent.bridge.logging.AppLoggingUtils.ERROR_CLASS; +import static com.newrelic.agent.bridge.logging.AppLoggingUtils.ERROR_MESSAGE; +import static com.newrelic.agent.bridge.logging.AppLoggingUtils.ERROR_STACK; +import static com.newrelic.agent.bridge.logging.AppLoggingUtils.INSTRUMENTATION; +import static com.newrelic.agent.bridge.logging.AppLoggingUtils.LEVEL; +import static com.newrelic.agent.bridge.logging.AppLoggingUtils.LOGGER_FQCN; +import static com.newrelic.agent.bridge.logging.AppLoggingUtils.LOGGER_NAME; +import static com.newrelic.agent.bridge.logging.AppLoggingUtils.MESSAGE; +import static com.newrelic.agent.bridge.logging.AppLoggingUtils.THREAD_ID; +import static com.newrelic.agent.bridge.logging.AppLoggingUtils.THREAD_NAME; +import static com.newrelic.agent.bridge.logging.AppLoggingUtils.TIMESTAMP; +import static com.newrelic.agent.bridge.logging.AppLoggingUtils.UNKNOWN; + +public class AgentUtil { + + /** + * Record a LogEvent to be sent to New Relic. + * + * @param record to parse + */ + public static void recordNewRelicLogEvent(LogRecord record) { + if (record != null) { + String message = record.getMessage(); + Throwable throwable = record.getThrown(); + + if (shouldCreateLogEvent(message, throwable)) { + // JUL does not directly support MDC, so we only initialize the map size based on standard attributes + Map logEventMap = new HashMap<>(DEFAULT_NUM_OF_LOG_EVENT_ATTRIBUTES); + logEventMap.put(INSTRUMENTATION, "glassfish-jul-extension-7"); + logEventMap.put(MESSAGE, message); + logEventMap.put(TIMESTAMP, record.getMillis()); + + Level level = record.getLevel(); + if (level != null) { + String levelName = level.getName(); + if (levelName.isEmpty()) { + logEventMap.put(LEVEL, UNKNOWN); + } else { + logEventMap.put(LEVEL, levelName); + } + } + + String errorStack = ExceptionUtil.getErrorStack(throwable); + if (errorStack != null) { + logEventMap.put(ERROR_STACK, errorStack); + } + + String errorMessage = ExceptionUtil.getErrorMessage(throwable); + if (errorMessage != null) { + logEventMap.put(ERROR_MESSAGE, errorMessage); + } + + String errorClass = ExceptionUtil.getErrorClass(throwable); + if (errorClass != null) { + logEventMap.put(ERROR_CLASS, errorClass); + } + + String threadName = Thread.currentThread().getName(); + if (threadName != null) { + logEventMap.put(THREAD_NAME, threadName); + } + + logEventMap.put(THREAD_ID, record.getThreadID()); + + String loggerName = record.getLoggerName(); + if (loggerName != null) { + logEventMap.put(LOGGER_NAME, loggerName); + } + + String loggerFqcn = record.getSourceClassName(); + if (loggerFqcn != null) { + logEventMap.put(LOGGER_FQCN, loggerFqcn); + } + + AgentBridge.getAgent().getLogSender().recordLogEvent(logEventMap); + } + } + } + + /** + * A LogEvent MUST NOT be reported if neither a log message nor an error is logged. If either is present report the LogEvent. + * + * @param message Message to validate + * @param throwable Throwable to validate + * @return true if a LogEvent should be created, otherwise false + */ + private static boolean shouldCreateLogEvent(String message, Throwable throwable) { + return (message != null) || !ExceptionUtil.isThrowableNull(throwable); + } +} diff --git a/instrumentation/glassfish-jul-extension-7/src/main/java/com/nr/instrumentation/glassfish/jul/ExceptionUtil.java b/instrumentation/glassfish-jul-extension-7/src/main/java/com/nr/instrumentation/glassfish/jul/ExceptionUtil.java new file mode 100644 index 0000000000..9dd9c4546c --- /dev/null +++ b/instrumentation/glassfish-jul-extension-7/src/main/java/com/nr/instrumentation/glassfish/jul/ExceptionUtil.java @@ -0,0 +1,67 @@ +/* + * + * * Copyright 2024 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package com.nr.instrumentation.glassfish.jul; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +public class ExceptionUtil { + public static final int MAX_STACK_SIZE = 300; + + public static boolean isThrowableNull(Throwable throwable) { + return throwable == null; + } + + public static String getErrorStack(Throwable throwable) { + if (throwable == null) { + return null; + } + + Throwable t = throwable; + List lines = new ArrayList<>(); + boolean inner = false; + while (t != null) { + if (inner) { + lines.add(" caused by: " + t.getClass().getName() + ": " + t.getMessage()); + } + lines.addAll(stackTracesToStrings(t.getStackTrace())); + t = t.equals(t.getCause()) ? null : t.getCause(); + inner = true; + } + + return String.join("\n", lines.subList(0, Math.min(lines.size(), MAX_STACK_SIZE))); + } + + public static String getErrorMessage(Throwable throwable) { + if (throwable == null) { + return null; + } + return throwable.getMessage(); + } + + public static String getErrorClass(Throwable throwable) { + if (throwable == null) { + return null; + } + return throwable.getClass().getName(); + } + + private static Collection stackTracesToStrings(StackTraceElement[] stackTraces) { + if (stackTraces == null || stackTraces.length == 0) { + return Collections.emptyList(); + } + List lines = new ArrayList<>(stackTraces.length); + for (StackTraceElement e : stackTraces) { + lines.add(" at " + e.toString()); + } + + return lines; + } +} diff --git a/instrumentation/glassfish-jul-extension-7/src/main/java/org/glassfish/main/jul/GlassFishLogger_Instrumentation.java b/instrumentation/glassfish-jul-extension-7/src/main/java/org/glassfish/main/jul/GlassFishLogger_Instrumentation.java new file mode 100644 index 0000000000..592ecd1b0b --- /dev/null +++ b/instrumentation/glassfish-jul-extension-7/src/main/java/org/glassfish/main/jul/GlassFishLogger_Instrumentation.java @@ -0,0 +1,74 @@ +/* + * + * * Copyright 2024 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package org.glassfish.main.jul; + +import com.newrelic.api.agent.NewRelic; +import com.newrelic.api.agent.weaver.MatchType; +import com.newrelic.api.agent.weaver.NewField; +import com.newrelic.api.agent.weaver.Weave; +import com.newrelic.api.agent.weaver.Weaver; + +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.logging.Level; +import java.util.logging.LogRecord; +import java.util.logging.Logger; + +import static com.newrelic.agent.bridge.logging.AppLoggingUtils.isApplicationLoggingEnabled; +import static com.newrelic.agent.bridge.logging.AppLoggingUtils.isApplicationLoggingForwardingEnabled; +import static com.newrelic.agent.bridge.logging.AppLoggingUtils.isApplicationLoggingMetricsEnabled; +import static com.nr.instrumentation.glassfish.jul.AgentUtil.recordNewRelicLogEvent; +import static java.util.Objects.requireNonNull; + +@Weave(type = MatchType.ExactClass, originalName = "org.glassfish.main.jul.GlassFishLogger") +public class GlassFishLogger_Instrumentation extends Logger { + @NewField + public static AtomicBoolean instrumented = new AtomicBoolean(false); + + protected GlassFishLogger_Instrumentation(final String name) { + super(name, null); + + // Generate the instrumentation module supportability metric only once + if (!instrumented.getAndSet(true)) { + NewRelic.incrementCounter("Supportability/Logging/Java/GlassFishJUL/enabled"); + } + } + + GlassFishLogger_Instrumentation(final Logger logger) { + // resource bundle name is taken from the set resource bundle + super(requireNonNull(logger, "logger is null!").getName(), null); + + // Generate the instrumentation module supportability metric only once + if (!instrumented.getAndSet(true)) { + NewRelic.incrementCounter("Supportability/Logging/Java/GlassFishJUL/enabled"); + } + } + + // Check if a message of the given level would actually be logged by this logger. + // This check is based on the Loggers effective level, which may be inherited from its parent. + public boolean isLoggable(Level level) { + return Boolean.TRUE.equals(Weaver.callOriginal()); + } + + void checkAndLog(LogRecord record) { + // Do nothing if application_logging.enabled: false + if (isApplicationLoggingEnabled()) { + + boolean shouldLog = isLoggable(record.getLevel()) && (getFilter() == null || getFilter().isLoggable(record)); + if (isApplicationLoggingMetricsEnabled() && shouldLog) { + // Generate log level metrics + NewRelic.incrementCounter("Logging/lines"); + NewRelic.incrementCounter("Logging/lines/" + record.getLevel().toString()); + } + if (isApplicationLoggingForwardingEnabled() && shouldLog) { + // Record and send LogEvent to New Relic + recordNewRelicLogEvent(record); + } + } + Weaver.callOriginal(); + } +} diff --git a/instrumentation/jdbc-sqlserver/build.gradle b/instrumentation/jdbc-sqlserver/build.gradle index 46c6b5df8d..3b4c3ad056 100644 --- a/instrumentation/jdbc-sqlserver/build.gradle +++ b/instrumentation/jdbc-sqlserver/build.gradle @@ -8,7 +8,8 @@ jar { } verifyInstrumentation { - passes("com.microsoft.sqlserver:mssql-jdbc:[0,12.7.0)") + passesOnly("com.microsoft.sqlserver:mssql-jdbc:[0,)") + exclude("com.microsoft.sqlserver:mssql-jdbc:12.7.0") // 12.7.0 is a pre-release version excludeRegex(".*jre9.*") excludeRegex(".*jre1\\d.*") excludeRegex(".*preview.*") diff --git a/instrumentation/kafka-clients-metrics-3.7.0/build.gradle b/instrumentation/kafka-clients-metrics-3.7.0/build.gradle new file mode 100644 index 0000000000..e5694cc339 --- /dev/null +++ b/instrumentation/kafka-clients-metrics-3.7.0/build.gradle @@ -0,0 +1,21 @@ + +dependencies { + implementation(project(":agent-bridge")) + implementation("org.apache.kafka:kafka-clients:3.7.0") + + testImplementation("org.testcontainers:kafka:1.16.3") +} + +jar { + manifest { attributes 'Implementation-Title': 'com.newrelic.instrumentation.kafka-clients-metrics-3.7.0', + 'Implementation-Title-Alias': 'kafka-clients-metrics' } +} + +verifyInstrumentation { + passesOnly 'org.apache.kafka:kafka-clients:[3.7.0,)' +} + +site { + title 'Kafka' + type 'Messaging' +} diff --git a/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/CallbackWrapper.java b/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/CallbackWrapper.java new file mode 100644 index 0000000000..259f8f3d6f --- /dev/null +++ b/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/CallbackWrapper.java @@ -0,0 +1,41 @@ +/* + * + * * Copyright 2022 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package com.nr.instrumentation.kafka; + +import com.newrelic.api.agent.NewRelic; +import org.apache.kafka.clients.producer.Callback; +import org.apache.kafka.clients.producer.RecordMetadata; + +import java.util.HashMap; +import java.util.Map; + +public class CallbackWrapper implements Callback { + + private final Callback callback; + private final String topic; + + public CallbackWrapper(Callback callback, String topic) { + this.callback = callback; + this.topic = topic; + } + + @Override + public void onCompletion(RecordMetadata metadata, Exception exception) { + try { + if (exception != null) { + Map atts = new HashMap<>(); + atts.put("topic_name", topic); + NewRelic.noticeError(exception, atts); + } + } catch (Throwable t) { + } + + this.callback.onCompletion(metadata, exception); + } + +} diff --git a/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/Metrics.java b/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/Metrics.java new file mode 100644 index 0000000000..f26455ee44 --- /dev/null +++ b/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/Metrics.java @@ -0,0 +1,20 @@ +/* + * + * * Copyright 2022 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package com.nr.instrumentation.kafka; + +public class Metrics { + + // Serialization/Deserialization metrics + public static final String DESERIALIZATION_TIME_METRIC_BASE = "MessageBroker/Kafka/Deserialization/"; + public static final String SERIALIZATION_TIME_METRIC_BASE = "MessageBroker/Kafka/Serialization/"; + + // Rebalance metrics + public static final String REBALANCE_REVOKED_BASE = "MessageBroker/Kafka/Rebalance/Revoked/"; + public static final String REBALANCE_ASSIGNED_BASE = "MessageBroker/Kafka/Rebalance/Assigned/"; + +} diff --git a/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/MetricsConstants.java b/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/MetricsConstants.java new file mode 100644 index 0000000000..20b3c2b54e --- /dev/null +++ b/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/MetricsConstants.java @@ -0,0 +1,23 @@ +/* + * + * * Copyright 2023 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ +package com.nr.instrumentation.kafka; + +import com.newrelic.api.agent.NewRelic; + +public class MetricsConstants { + public static final boolean KAFKA_METRICS_DEBUG = NewRelic.getAgent().getConfig().getValue("kafka.metrics.debug.enabled", false); + + public static final boolean METRICS_AS_EVENTS = NewRelic.getAgent().getConfig().getValue("kafka.metrics.as_events.enabled", false); + + public static final long REPORTING_INTERVAL_IN_SECONDS = NewRelic.getAgent().getConfig().getValue("kafka.metrics.interval", 30); + + public static final String METRIC_PREFIX = "MessageBroker/Kafka/Internal/"; + + public static final String METRICS_EVENT_TYPE = "KafkaMetrics"; + + public static final String NODE_PREFIX = "MessageBroker/Kafka/Nodes/"; +} diff --git a/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/MetricsScheduler.java b/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/MetricsScheduler.java new file mode 100644 index 0000000000..45afc0a0fd --- /dev/null +++ b/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/MetricsScheduler.java @@ -0,0 +1,101 @@ +/* + * + * * Copyright 2023 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ +package com.nr.instrumentation.kafka; + +import com.newrelic.agent.bridge.AgentBridge; +import com.newrelic.api.agent.NewRelic; +import org.apache.kafka.common.metrics.KafkaMetric; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.logging.Level; + +import static com.nr.instrumentation.kafka.MetricsConstants.KAFKA_METRICS_DEBUG; +import static com.nr.instrumentation.kafka.MetricsConstants.METRICS_AS_EVENTS; +import static com.nr.instrumentation.kafka.MetricsConstants.METRICS_EVENT_TYPE; +import static com.nr.instrumentation.kafka.MetricsConstants.METRIC_PREFIX; +import static com.nr.instrumentation.kafka.MetricsConstants.REPORTING_INTERVAL_IN_SECONDS; + +public class MetricsScheduler { + private static final ScheduledExecutorService executor = createScheduledExecutor(); + private static final Map> metricReporterTasks = new ConcurrentHashMap<>(); + + private MetricsScheduler() {} + + public static void addMetricsReporter(NewRelicMetricsReporter metricsReporter) { + ScheduledFuture task = executor.scheduleAtFixedRate(new MetricsSendRunnable(metricsReporter), + 0L, REPORTING_INTERVAL_IN_SECONDS, TimeUnit.SECONDS); + metricReporterTasks.put(metricsReporter, task); + } + + public static void removeMetricsReporter(NewRelicMetricsReporter metricsReporter) { + ScheduledFuture task = metricReporterTasks.remove(metricsReporter); + task.cancel(false); + } + + private static ScheduledExecutorService createScheduledExecutor() { + return Executors.newSingleThreadScheduledExecutor(runnable -> { + final Thread thread = new Thread(runnable); + thread.setDaemon(true); + thread.setName("NewRelicMetricsReporter-Kafka"); + return thread; + }); + } + + private static class MetricsSendRunnable implements Runnable { + private final NewRelicMetricsReporter nrMetricsReporter; + + private MetricsSendRunnable(NewRelicMetricsReporter nrMetricsReporter) { + this.nrMetricsReporter = nrMetricsReporter; + } + @Override + public void run() { + try { + Map eventData = new HashMap<>(); + for (final Map.Entry metric : nrMetricsReporter.getMetrics().entrySet()) { + Object metricValue = metric.getValue().metricValue(); + if (metricValue instanceof Number) { + final float value = ((Number) metricValue).floatValue(); + if (KAFKA_METRICS_DEBUG) { + AgentBridge.getAgent().getLogger().log(Level.FINEST, "getMetric: {0} = {1}", metric.getKey(), value); + } + if (!Float.isNaN(value) && !Float.isInfinite(value)) { + if (METRICS_AS_EVENTS) { + eventData.put(metric.getKey().replace('/', '.'), value); + } else { + NewRelic.recordMetric(METRIC_PREFIX + metric.getKey(), value); + } + } + } + } + + for (NewRelicMetricsReporter.NodeMetricNames consumerNodeMetricNames : nrMetricsReporter.getNodes().values()) { + if (METRICS_AS_EVENTS) { + for (String eventName : consumerNodeMetricNames.getEventNames()) { + eventData.put(eventName, 1f); + } + } else { + for (String metricName : consumerNodeMetricNames.getMetricNames()) { + NewRelic.recordMetric(metricName, 1f); + } + } + } + + if (METRICS_AS_EVENTS) { + NewRelic.getAgent().getInsights().recordCustomEvent(METRICS_EVENT_TYPE, eventData); + } + } catch (Exception e) { + AgentBridge.getAgent().getLogger().log(Level.FINE, e, "Unable to record kafka metrics"); + } + } + } +} diff --git a/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/NewRelicMetricsReporter.java b/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/NewRelicMetricsReporter.java new file mode 100644 index 0000000000..34aff98fc2 --- /dev/null +++ b/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/NewRelicMetricsReporter.java @@ -0,0 +1,189 @@ +/* + * + * * Copyright 2022 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package com.nr.instrumentation.kafka; + +import com.newrelic.agent.bridge.AgentBridge; +import org.apache.kafka.common.metrics.KafkaMetric; +import org.apache.kafka.common.metrics.MetricsReporter; + +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.logging.Level; + +import static com.nr.instrumentation.kafka.MetricsConstants.KAFKA_METRICS_DEBUG; +import static com.nr.instrumentation.kafka.MetricsConstants.NODE_PREFIX; + +public class NewRelicMetricsReporter implements MetricsReporter { + + + private final Map metrics = new ConcurrentHashMap<>(); + + private final Map nodes; + + public NewRelicMetricsReporter() { + this.nodes = Collections.emptyMap(); + } + + public NewRelicMetricsReporter(Set nodes, Mode mode) { + this.nodes = new ConcurrentHashMap<>(nodes.size()); + for(String node: nodes) { + this.nodes.put(node, new NodeMetricNames(node, mode)); + } + } + + public Map getMetrics() { + return this.metrics; + } + + public Map getNodes() { + return nodes; + } + + @Override + public void init(final List initMetrics) { + for (KafkaMetric kafkaMetric : initMetrics) { + String metricGroupAndName = getMetricGroupAndName(kafkaMetric); + if (KAFKA_METRICS_DEBUG) { + AgentBridge.getAgent().getLogger().log(Level.FINEST, "init(): {0} = {1}", metricGroupAndName, kafkaMetric.metricName()); + } + metrics.put(metricGroupAndName, kafkaMetric); + } + MetricsScheduler.addMetricsReporter(this); + } + + @Override + public void metricChange(final KafkaMetric metric) { + String metricGroupAndName = getMetricGroupAndName(metric); + if (KAFKA_METRICS_DEBUG) { + AgentBridge.getAgent().getLogger().log(Level.FINEST, "metricChange(): {0} = {1}", metricGroupAndName, metric.metricName()); + } + metrics.put(metricGroupAndName, metric); + } + + @Override + public void metricRemoval(final KafkaMetric metric) { + String metricGroupAndName = getMetricGroupAndName(metric); + if (KAFKA_METRICS_DEBUG) { + AgentBridge.getAgent().getLogger().log(Level.FINEST, "metricRemoval(): {0} = {1}", metricGroupAndName, metric.metricName()); + } + metrics.remove(metricGroupAndName); + } + + private String getMetricGroupAndName(final KafkaMetric metric) { + if (metric.metricName().tags().containsKey("topic")) { + String topic = metric.metricName().tags().get("topic"); + addTopicToNodeMetrics(topic); + + // Special case for handling topic names in metrics + return metric.metricName().group() + "/" + topic + "/" + metric.metricName().name(); + } + return metric.metricName().group() + "/" + metric.metricName().name(); + } + + private void addTopicToNodeMetrics(String topic) { + for (NodeMetricNames nodeMetricNames : nodes.values()) { + nodeMetricNames.addMetricNameForTopic(topic); + } + } + + @Override + public void close() { + MetricsScheduler.removeMetricsReporter(this); + metrics.clear(); + } + + @Override + public void configure(final Map configs) { + } + + /** + * This class is used to track all the metric names that are related to a specific node: + * + * - MessageBroker/Kafka/Nodes/host:port + * - MessageBroker/Kafka/Nodes/host:port/Consume/topicName + * - MessageBroker/Kafka/Nodes/host:port/Produce/topicName + * + * At initialization time we only have the node and the mode (is this a metrics reporter + * for a Kafka consumer or for a Kafka producer?). + * + * Then, as topics are discovered through the metricChange method, the topic metric names are + * generated. This is the best way we have to get track of the topics since they're not + * available when the KafkaConsumer/KafkaProducer is initialized. + * + * For KafkaConsumer, the SubscriptionState doesn't contain the topics and partitions + * at initialization time because it takes time for the rebalance to happen. + * + * For KafkaProducer, topics are dynamic since a producer could send records to any + * topic and the concept of subscription doesn't exist there. + * + * Alternatively we could get the topics from the records in KafkaProducer.doSend or + * KafkaConsumer.poll, and call NewRelicMetricsReporter.addTopicToNodeMetrics from there. + * This approach would have a small impact in performance, and getting the topics from the + * KafkaMetrics is a good enough solution. + */ + public static class NodeMetricNames { + + private final String node; + private final Mode mode; + + private final Set topics = new HashSet<>(); + + private final Set metricNames = new HashSet<>(); + private final Set eventNames = new HashSet<>(); + + public NodeMetricNames(String node, Mode mode) { + this.node = node; + this.mode = mode; + + String nodeMetricName = NODE_PREFIX + node; + metricNames.add(nodeMetricName); + eventNames.add(getEventNameForMetric(nodeMetricName)); + } + + private void addMetricNameForTopic(String topic) { + if (!topics.contains(topic)) { + String nodeTopicMetricName = NODE_PREFIX + node + "/" + mode.getMetricSegmentName() + "/" + topic; + metricNames.add(nodeTopicMetricName); + eventNames.add(getEventNameForMetric(nodeTopicMetricName)); + + topics.add(topic); + } + } + + private String getEventNameForMetric(String metricName) { + return metricName.replace('/', '.'); + } + + public Set getMetricNames() { + return metricNames; + } + + public Set getEventNames() { + return eventNames; + } + } + + public enum Mode { + CONSUMER("Consume"), + PRODUCER("Produce"); + + private final String metricSegmentName; + + Mode(String metricSegmentName) { + this.metricSegmentName = metricSegmentName; + } + + public String getMetricSegmentName() { + return metricSegmentName; + } + } +} diff --git a/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/org/apache/kafka/clients/consumer/ConsumerRebalanceListener_Instrumentation.java b/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/org/apache/kafka/clients/consumer/ConsumerRebalanceListener_Instrumentation.java new file mode 100644 index 0000000000..8a1ffdfbf8 --- /dev/null +++ b/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/org/apache/kafka/clients/consumer/ConsumerRebalanceListener_Instrumentation.java @@ -0,0 +1,38 @@ +/* + * + * * Copyright 2022 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package org.apache.kafka.clients.consumer; + +import com.newrelic.api.agent.NewRelic; +import com.newrelic.api.agent.weaver.MatchType; +import com.newrelic.api.agent.weaver.Weave; +import com.newrelic.api.agent.weaver.Weaver; +import org.apache.kafka.common.TopicPartition; + +import java.util.Collection; + +import static com.nr.instrumentation.kafka.Metrics.REBALANCE_ASSIGNED_BASE; +import static com.nr.instrumentation.kafka.Metrics.REBALANCE_REVOKED_BASE; + +@Weave(type = MatchType.Interface, originalName = "org.apache.kafka.clients.consumer.ConsumerRebalanceListener") +public class ConsumerRebalanceListener_Instrumentation { + + public void onPartitionsRevoked(Collection partitions) { + for (TopicPartition topicPartition : partitions) { + NewRelic.incrementCounter(REBALANCE_REVOKED_BASE + topicPartition.topic() + "/" + topicPartition.partition()); + } + Weaver.callOriginal(); + } + + public void onPartitionsAssigned(Collection partitions) { + for (TopicPartition topicPartition : partitions) { + NewRelic.incrementCounter(REBALANCE_ASSIGNED_BASE + topicPartition.topic() + "/" + topicPartition.partition()); + } + Weaver.callOriginal(); + } + +} diff --git a/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer_Instrumentation.java b/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer_Instrumentation.java new file mode 100644 index 0000000000..6749a43e65 --- /dev/null +++ b/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer_Instrumentation.java @@ -0,0 +1,98 @@ +/* + * + * * Copyright 2022 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package org.apache.kafka.clients.consumer; + +import java.time.Duration; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import org.apache.kafka.clients.consumer.internals.ConsumerMetadata; +import org.apache.kafka.common.errors.WakeupException; +import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.utils.Timer; +import org.apache.kafka.common.Node; + +import com.newrelic.agent.bridge.AgentBridge; +import com.newrelic.api.agent.DestinationType; +import com.newrelic.api.agent.MessageConsumeParameters; +import com.newrelic.api.agent.NewRelic; +import com.newrelic.api.agent.weaver.NewField; +import com.newrelic.api.agent.weaver.Weave; +import com.newrelic.api.agent.weaver.WeaveAllConstructors; +import com.newrelic.api.agent.weaver.Weaver; +import com.nr.instrumentation.kafka.NewRelicMetricsReporter; + +@Weave(originalName = "org.apache.kafka.clients.consumer.KafkaConsumer") +public class KafkaConsumer_Instrumentation { + + public ConsumerRecords poll(final long timeoutMs) { + final ConsumerRecords records; + try { + records = Weaver.callOriginal(); + } catch (Exception e) { + // Specifically ignore WakeupExceptions because they are common in non-error use cases + if (!(e instanceof WakeupException)) { + NewRelic.noticeError(e); + } + throw e; + } + + for (ConsumerRecord record : records) { + if (AgentBridge.getAgent().getTransaction(false) != null) { + MessageConsumeParameters params = MessageConsumeParameters.library("Kafka") + .destinationType(DestinationType.NAMED_TOPIC) + .destinationName(record.topic()) + .inboundHeaders(null) + .build(); + NewRelic.getAgent().getTransaction().getTracedMethod().reportAsExternal(params); + } + break; + } + return records; + } + + public ConsumerRecords poll(final Duration timeout) { + final ConsumerRecords records; + try { + records = Weaver.callOriginal(); + } catch (Exception e) { + // Specifically ignore WakeupExceptions because they are common in non-error use cases + if (!(e instanceof WakeupException)) { + NewRelic.noticeError(e); + } + throw e; + } + nrReportAsExternal(records); + return records; + } + + public void close() { + try { + Weaver.callOriginal(); + } catch (Exception e) { + NewRelic.noticeError(e); // Record an error when a consumer fails to close (most likely due to a timeout) + throw e; + } + } + + private void nrReportAsExternal(ConsumerRecords records) { + for (ConsumerRecord record : records) { + if (AgentBridge.getAgent().getTransaction(false) != null) { + MessageConsumeParameters params = MessageConsumeParameters.library("Kafka") + .destinationType(DestinationType.NAMED_TOPIC) + .destinationName(record.topic()) + .inboundHeaders(null) + .build(); + NewRelic.getAgent().getTransaction().getTracedMethod().reportAsExternal(params); + } + break; + } + } + +} diff --git a/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer_Instrumentation.java b/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer_Instrumentation.java new file mode 100644 index 0000000000..cce4c5b7af --- /dev/null +++ b/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer_Instrumentation.java @@ -0,0 +1,36 @@ +package org.apache.kafka.clients.consumer.internals; + +import com.newrelic.api.agent.weaver.NewField; +import com.newrelic.api.agent.weaver.Weave; +import com.newrelic.api.agent.weaver.WeaveAllConstructors; +import com.newrelic.api.agent.weaver.Weaver; +import com.nr.instrumentation.kafka.NewRelicMetricsReporter; +import org.apache.kafka.common.Node; +import org.apache.kafka.common.metrics.Metrics; + +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +@Weave(originalName = "org.apache.kafka.clients.consumer.internals.AsyncKafkaConsumer") +public class AsyncKafkaConsumer_Instrumentation { + private final Metrics metrics = Weaver.callOriginal(); + + private final ConsumerMetadata metadata = Weaver.callOriginal(); + + @NewField + private boolean initialized; + + @WeaveAllConstructors + public AsyncKafkaConsumer_Instrumentation() { + if (!initialized) { + List nodes = metadata.fetch().nodes(); + Set nodeNames = new HashSet<>(nodes.size()); + for (Node node : nodes) { + nodeNames.add(node.host() + ":" + node.port()); + } + metrics.addReporter(new NewRelicMetricsReporter(nodeNames, NewRelicMetricsReporter.Mode.CONSUMER)); + initialized = true; + } + } +} diff --git a/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/org/apache/kafka/clients/consumer/internals/LegacyKafkaConsumer_Instrumentation.java b/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/org/apache/kafka/clients/consumer/internals/LegacyKafkaConsumer_Instrumentation.java new file mode 100644 index 0000000000..b363d5553f --- /dev/null +++ b/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/org/apache/kafka/clients/consumer/internals/LegacyKafkaConsumer_Instrumentation.java @@ -0,0 +1,36 @@ +package org.apache.kafka.clients.consumer.internals; + +import com.newrelic.api.agent.weaver.NewField; +import com.newrelic.api.agent.weaver.Weave; +import com.newrelic.api.agent.weaver.WeaveAllConstructors; +import com.newrelic.api.agent.weaver.Weaver; +import com.nr.instrumentation.kafka.NewRelicMetricsReporter; +import org.apache.kafka.common.Node; +import org.apache.kafka.common.metrics.Metrics; + +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +@Weave(originalName = "org.apache.kafka.clients.consumer.internals.LegacyKafkaConsumer") +public class LegacyKafkaConsumer_Instrumentation { + private final Metrics metrics = Weaver.callOriginal(); + + private final ConsumerMetadata metadata = Weaver.callOriginal(); + + @NewField + private boolean initialized; + + @WeaveAllConstructors + public LegacyKafkaConsumer_Instrumentation() { + if (!initialized) { + List nodes = metadata.fetch().nodes(); + Set nodeNames = new HashSet<>(nodes.size()); + for (Node node : nodes) { + nodeNames.add(node.host() + ":" + node.port()); + } + metrics.addReporter(new NewRelicMetricsReporter(nodeNames, NewRelicMetricsReporter.Mode.CONSUMER)); + initialized = true; + } + } +} diff --git a/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/org/apache/kafka/clients/consumer/internals/PartitionAssignor.java b/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/org/apache/kafka/clients/consumer/internals/PartitionAssignor.java new file mode 100644 index 0000000000..72a4020c7d --- /dev/null +++ b/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/org/apache/kafka/clients/consumer/internals/PartitionAssignor.java @@ -0,0 +1,17 @@ +/* + * + * * Copyright 2022 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package org.apache.kafka.clients.consumer.internals; + +import com.newrelic.api.agent.weaver.SkipIfPresent; + +/** + * This class was removed on Kafka 3. So this will prevent the module from applying on older versions. + */ +@SkipIfPresent +public interface PartitionAssignor { +} diff --git a/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/org/apache/kafka/clients/producer/KafkaProducer_Instrumentation.java b/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/org/apache/kafka/clients/producer/KafkaProducer_Instrumentation.java new file mode 100644 index 0000000000..d5cecbc981 --- /dev/null +++ b/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/org/apache/kafka/clients/producer/KafkaProducer_Instrumentation.java @@ -0,0 +1,81 @@ +/* + * + * * Copyright 2022 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package org.apache.kafka.clients.producer; + +import org.apache.kafka.clients.producer.internals.ProducerMetadata; +import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.Node; + +import com.newrelic.agent.bridge.AgentBridge; +import com.newrelic.api.agent.DestinationType; +import com.newrelic.api.agent.MessageProduceParameters; +import com.newrelic.api.agent.NewRelic; +import com.newrelic.api.agent.Trace; +import com.newrelic.api.agent.weaver.NewField; +import com.newrelic.api.agent.weaver.Weave; +import com.newrelic.api.agent.weaver.WeaveAllConstructors; +import com.newrelic.api.agent.weaver.Weaver; +import com.nr.instrumentation.kafka.CallbackWrapper; +import com.nr.instrumentation.kafka.NewRelicMetricsReporter; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Future; +import java.util.List; + +@Weave(originalName = "org.apache.kafka.clients.producer.KafkaProducer") +public class KafkaProducer_Instrumentation { + + private final Metrics metrics = Weaver.callOriginal(); + + private final ProducerMetadata metadata = Weaver.callOriginal(); + + @NewField + private boolean initialized; + + @WeaveAllConstructors + public KafkaProducer_Instrumentation() { + if (!initialized) { + List nodes = metadata.fetch().nodes(); + Set nodeNames = new HashSet<>(nodes.size()); + for (Node node : nodes) { + nodeNames.add(node.host() + ":" + node.port()); + } + metrics.addReporter(new NewRelicMetricsReporter(nodeNames, NewRelicMetricsReporter.Mode.PRODUCER)); + initialized = true; + } + } + + @Trace + private Future doSend(ProducerRecord record, Callback callback) { + if (callback != null) { + // Wrap the callback so we can capture metrics about messages being produced + callback = new CallbackWrapper(callback, record.topic()); + } + if (AgentBridge.getAgent().getTransaction(false) != null) { + // use null for headers so we don't try to do CAT + MessageProduceParameters params = MessageProduceParameters.library("Kafka") + .destinationType(DestinationType.NAMED_TOPIC) + .destinationName(record.topic()) + .outboundHeaders(null) + .build(); + NewRelic.getAgent().getTransaction().getTracedMethod().reportAsExternal(params); + } + + try { + return Weaver.callOriginal(); + } catch (Exception e) { + Map atts = new HashMap<>(); + atts.put("topic_name", record.topic()); + NewRelic.noticeError(e, atts); + throw e; + } + } +} diff --git a/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/org/apache/kafka/common/serialization/Deserializer_Instrumentation.java b/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/org/apache/kafka/common/serialization/Deserializer_Instrumentation.java new file mode 100644 index 0000000000..5d066a02cb --- /dev/null +++ b/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/org/apache/kafka/common/serialization/Deserializer_Instrumentation.java @@ -0,0 +1,25 @@ +/* + * + * * Copyright 2022 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package org.apache.kafka.common.serialization; + +import com.newrelic.api.agent.NewRelic; +import com.newrelic.api.agent.weaver.MatchType; +import com.newrelic.api.agent.weaver.Weave; +import com.newrelic.api.agent.weaver.Weaver; +import com.nr.instrumentation.kafka.Metrics; + +@Weave(originalName = "org.apache.kafka.common.serialization.Deserializer", type = MatchType.Interface) +public class Deserializer_Instrumentation { + + public T deserialize(String topic, byte[] data) { + long start = System.nanoTime(); + T result = Weaver.callOriginal(); + NewRelic.recordMetric(Metrics.DESERIALIZATION_TIME_METRIC_BASE + topic, System.nanoTime() - start); + return result; + } +} diff --git a/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/org/apache/kafka/common/serialization/Serializer_Instrumentation.java b/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/org/apache/kafka/common/serialization/Serializer_Instrumentation.java new file mode 100644 index 0000000000..8c3001cf4c --- /dev/null +++ b/instrumentation/kafka-clients-metrics-3.7.0/src/main/java/org/apache/kafka/common/serialization/Serializer_Instrumentation.java @@ -0,0 +1,25 @@ +/* + * + * * Copyright 2022 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package org.apache.kafka.common.serialization; + +import com.newrelic.api.agent.NewRelic; +import com.newrelic.api.agent.weaver.MatchType; +import com.newrelic.api.agent.weaver.Weave; +import com.newrelic.api.agent.weaver.Weaver; +import com.nr.instrumentation.kafka.Metrics; + +@Weave(originalName = "org.apache.kafka.common.serialization.Serializer", type = MatchType.Interface) +public class Serializer_Instrumentation { + + public byte[] serialize(String topic, T data) { + long start = System.nanoTime(); + byte[] result = Weaver.callOriginal(); + NewRelic.recordMetric(Metrics.SERIALIZATION_TIME_METRIC_BASE + topic, System.nanoTime() - start); + return result; + } +} diff --git a/instrumentation/kafka-clients-metrics-3.7.0/src/test/java/com/nr/agent/instrumentation/kafka/Kafka37MessageTest.java b/instrumentation/kafka-clients-metrics-3.7.0/src/test/java/com/nr/agent/instrumentation/kafka/Kafka37MessageTest.java new file mode 100644 index 0000000000..ceed0a13f0 --- /dev/null +++ b/instrumentation/kafka-clients-metrics-3.7.0/src/test/java/com/nr/agent/instrumentation/kafka/Kafka37MessageTest.java @@ -0,0 +1,211 @@ +/* + * + * * Copyright 2022 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package com.nr.agent.instrumentation.kafka; + +import com.newrelic.agent.introspec.InstrumentationTestConfig; +import com.newrelic.agent.introspec.InstrumentationTestRunner; +import static com.newrelic.agent.introspec.MetricsHelper.getUnscopedMetricCount; + +import com.newrelic.agent.introspec.TracedMetricData; +import com.newrelic.api.agent.Trace; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.clients.producer.RecordMetadata; +import org.junit.After; +import org.junit.Assert; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Rule; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.testcontainers.containers.KafkaContainer; +import org.testcontainers.utility.DockerImageName; + +@Ignore("This test is flaky on GHA") +@RunWith(InstrumentationTestRunner.class) +@InstrumentationTestConfig(includePrefixes = "org.apache.kafka") +public class Kafka37MessageTest { + @Rule + public KafkaContainer kafkaContainer = new KafkaContainer(DockerImageName.parse("confluentinc/cp-kafka:7.3.0")); + + private final String TOPIC = "life-universe-everything"; + private final String ANOTHER_TOPIC = "vogon-poetry"; + @Before + public void before() { + kafkaContainer.start(); + } + + @After + public void after() { + kafkaContainer.stop(); + } + + @Test + public void testProducer() throws ExecutionException, InterruptedException { + Future msgsWereRead = asyncReadMessages(); + + // giving some time for the consumer to ready itself up prior to sending the messages + Thread.sleep(1000L); + sendMessages(); + + Assert.assertTrue("Messages weren't read", msgsWereRead.get()); + assertUnscopedMetrics(); + } + + /** + * @return a Future that holds whether the messages were read + */ + private Future asyncReadMessages() { + return Executors.newSingleThreadExecutor().submit(this::readMessages); + } + + /** + * @return whether messages were read + */ + @Trace(dispatcher = true) + private boolean readMessages() throws InterruptedException { + int messagesRead = 0; + try (KafkaConsumer consumer = KafkaHelper.newConsumer(kafkaContainer)) { + consumer.subscribe(Collections.singleton(TOPIC)); + + // setting a timeout so this does not drag forever if something goes wrong. + long waitUntil = System.currentTimeMillis() + 15000L; + while (waitUntil > System.currentTimeMillis()) { + ConsumerRecords records = consumer.poll(1000); + messagesRead += records.count(); + if (messagesRead == 2) { + // Sleep for a minute before closing the consumer so MetricsScheduler runs + // few times and all metrics are reported + Thread.sleep(60000L); + return true; + } + } + } + return false; + } + + @Trace(dispatcher = true) + private void sendMessages() throws ExecutionException, InterruptedException { + try (KafkaProducer producer = KafkaHelper.newProducer(kafkaContainer)) { + List> futures = Arrays.asList( + producer.send(new ProducerRecord<>(ANOTHER_TOPIC, "Oh freddled gruntbuggly")), + producer.send(new ProducerRecord<>(TOPIC, "Life, don't talk to me about life.")), + producer.send(new ProducerRecord<>(TOPIC, "Don't Panic")) + ); + for (Future future : futures) { + future.get(); + } + // Sleep for a minute before closing the producer so MetricsScheduler runs + // few times and all metrics are reported + Thread.sleep(60000L); + } + } + + private void assertUnscopedMetrics() { + // on the previous instrumentation module there are more metrics being verified. + // Kafka 3 changed a little how the values are retrieved and those metrics now return NaN, and thus are not reported. + assertUnscopedMetricExists( + // general kafka metrics, they can change from test to test, so only verifying they exist + "MessageBroker/Kafka/Internal/consumer-coordinator-metrics/assigned-partitions", + "MessageBroker/Kafka/Internal/consumer-coordinator-metrics/commit-rate", + "MessageBroker/Kafka/Internal/consumer-coordinator-metrics/heartbeat-rate", + "MessageBroker/Kafka/Internal/consumer-coordinator-metrics/join-rate", + "MessageBroker/Kafka/Internal/consumer-coordinator-metrics/last-heartbeat-seconds-ago", + "MessageBroker/Kafka/Internal/consumer-coordinator-metrics/sync-rate", + "MessageBroker/Kafka/Internal/consumer-fetch-manager-metrics/bytes-consumed-rate", + "MessageBroker/Kafka/Internal/consumer-fetch-manager-metrics/fetch-rate", + "MessageBroker/Kafka/Internal/consumer-fetch-manager-metrics/records-consumed-rate", + "MessageBroker/Kafka/Internal/consumer-metrics/connection-close-rate", + "MessageBroker/Kafka/Internal/consumer-metrics/connection-count", + "MessageBroker/Kafka/Internal/consumer-metrics/connection-creation-rate", + "MessageBroker/Kafka/Internal/consumer-metrics/incoming-byte-rate", + "MessageBroker/Kafka/Internal/consumer-metrics/io-ratio", + "MessageBroker/Kafka/Internal/consumer-metrics/io-wait-ratio", + "MessageBroker/Kafka/Internal/consumer-metrics/network-io-rate", + "MessageBroker/Kafka/Internal/consumer-metrics/outgoing-byte-rate", + "MessageBroker/Kafka/Internal/consumer-metrics/request-rate", + "MessageBroker/Kafka/Internal/consumer-metrics/response-rate", + "MessageBroker/Kafka/Internal/consumer-metrics/select-rate", + "MessageBroker/Kafka/Internal/kafka-metrics-count/count", + "MessageBroker/Kafka/Internal/producer-metrics/batch-split-rate", + "MessageBroker/Kafka/Internal/producer-metrics/buffer-available-bytes", + "MessageBroker/Kafka/Internal/producer-metrics/buffer-exhausted-rate", + "MessageBroker/Kafka/Internal/producer-metrics/buffer-total-bytes", + "MessageBroker/Kafka/Internal/producer-metrics/bufferpool-wait-ratio", + "MessageBroker/Kafka/Internal/producer-metrics/connection-close-rate", + "MessageBroker/Kafka/Internal/producer-metrics/connection-count", + "MessageBroker/Kafka/Internal/producer-metrics/connection-creation-rate", + "MessageBroker/Kafka/Internal/producer-metrics/incoming-byte-rate", + "MessageBroker/Kafka/Internal/producer-metrics/io-ratio", + "MessageBroker/Kafka/Internal/producer-metrics/io-wait-ratio", + "MessageBroker/Kafka/Internal/producer-metrics/metadata-age", + "MessageBroker/Kafka/Internal/producer-metrics/network-io-rate", + "MessageBroker/Kafka/Internal/producer-metrics/outgoing-byte-rate", + "MessageBroker/Kafka/Internal/producer-metrics/record-error-rate", + "MessageBroker/Kafka/Internal/producer-metrics/record-retry-rate", + "MessageBroker/Kafka/Internal/producer-metrics/record-send-rate", + "MessageBroker/Kafka/Internal/producer-metrics/request-rate", + "MessageBroker/Kafka/Internal/producer-metrics/requests-in-flight", + "MessageBroker/Kafka/Internal/producer-metrics/response-rate", + "MessageBroker/Kafka/Internal/producer-metrics/select-rate", + "MessageBroker/Kafka/Internal/producer-metrics/waiting-threads" + ); + + // serializer are called more often because they serialize the key and the value + assertEquals(0, getUnscopedMetricCount("MessageBroker/Kafka/Deserialization/" + TOPIC)); + assertEquals(4, getUnscopedMetricCount("MessageBroker/Kafka/Serialization/" + TOPIC)); + assertEquals(2, getUnscopedMetricCount("MessageBroker/Kafka/Topic/Produce/Named/" +TOPIC)); + + // deserializer is never called because this topic is never read from + assertEquals(0, getUnscopedMetricCount("MessageBroker/Kafka/Deserialization/" + ANOTHER_TOPIC)); + assertEquals(2, getUnscopedMetricCount("MessageBroker/Kafka/Serialization/" + ANOTHER_TOPIC)); + assertEquals(1, getUnscopedMetricCount("MessageBroker/Kafka/Topic/Produce/Named/" + ANOTHER_TOPIC)); + + // there are 2 messages in the topic, but they could be read in a single poll, or in 2 + int consumedCount = getUnscopedMetricCount("MessageBroker/Kafka/Topic/Consume/Named/" + TOPIC); + assertTrue(consumedCount >= 1); + assertTrue(consumedCount <= 2); + + assertEquals(0, getUnscopedMetricCount("MessageBroker/Kafka/Rebalance/Assigned/life-universe-everything/0")); + + // Nodes metrics + assertTrue(unscopedNodesMetricExists("MessageBroker/Kafka/Nodes/localhost:[0-9]*")); + assertTrue(unscopedNodesMetricExists("MessageBroker/Kafka/Nodes/localhost:[0-9]*/Consume/" + TOPIC)); + assertTrue(unscopedNodesMetricExists("MessageBroker/Kafka/Nodes/localhost:[0-9]*/Produce/" + TOPIC)); + assertFalse(unscopedNodesMetricExists("MessageBroker/Kafka/Nodes/localhost:[0-9]*/Consume/" + ANOTHER_TOPIC)); + assertTrue(unscopedNodesMetricExists("MessageBroker/Kafka/Nodes/localhost:[0-9]*/Produce/" + ANOTHER_TOPIC)); + } + + private void assertUnscopedMetricExists(String ... metricNames) { + int notFoundMetricCount = 0; + Set existingMetrics= InstrumentationTestRunner.getIntrospector().getUnscopedMetrics().keySet(); + for (String metricName : metricNames) { + Assert.assertTrue("metric not found: " + metricName, existingMetrics.contains(metricName)); + } + System.out.println(notFoundMetricCount + " metrics not found"); + } + + private boolean unscopedNodesMetricExists(String metricName) { + return InstrumentationTestRunner.getIntrospector().getUnscopedMetrics().keySet().stream() + .anyMatch(key -> key.matches(metricName)); + } +} diff --git a/instrumentation/kafka-clients-metrics-3.7.0/src/test/java/com/nr/agent/instrumentation/kafka/KafkaHelper.java b/instrumentation/kafka-clients-metrics-3.7.0/src/test/java/com/nr/agent/instrumentation/kafka/KafkaHelper.java new file mode 100644 index 0000000000..a38a26c8d5 --- /dev/null +++ b/instrumentation/kafka-clients-metrics-3.7.0/src/test/java/com/nr/agent/instrumentation/kafka/KafkaHelper.java @@ -0,0 +1,47 @@ +/* + * + * * Copyright 2022 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package com.nr.agent.instrumentation.kafka; + +import java.util.Properties; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.producer.KafkaProducer; +import org.testcontainers.containers.KafkaContainer; + +public class KafkaHelper { + + public static KafkaProducer newProducer(KafkaContainer kafkaContainer) { + Properties props = getProps(kafkaContainer.getBootstrapServers()); + return new KafkaProducer<>(props); + } + + public static KafkaConsumer newConsumer(KafkaContainer kafkaContainer) { + Properties props = getProps(kafkaContainer.getBootstrapServers()); + return new KafkaConsumer<>(props); + } + + public static Properties getProps(String bootstrapServers) { + Properties props = new Properties(); + props.put("bootstrap.servers", bootstrapServers); + props.put("acks", "all"); + props.put("retries", 0); + props.put("batch.size", 16384); + props.put("linger.ms", 1); + props.put("buffer.memory", 33554432); + props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); + props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); + props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); + props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); + props.put("group.id", "test-consumer-group"); + props.put("group.protocol", "CLASSIC"); + return props; + } + + private KafkaHelper() { + // prevents instantiations + } +} diff --git a/instrumentation/kafka-clients-node-metrics-3.7.0/build.gradle b/instrumentation/kafka-clients-node-metrics-3.7.0/build.gradle new file mode 100644 index 0000000000..5a4e6ef29d --- /dev/null +++ b/instrumentation/kafka-clients-node-metrics-3.7.0/build.gradle @@ -0,0 +1,24 @@ + +dependencies { + implementation(project(":newrelic-api")) + implementation(project(":newrelic-weaver-api")) + implementation("org.apache.kafka:kafka-clients:3.7.0") + + testImplementation("org.awaitility:awaitility:4.2.0") + testImplementation("org.mockito:mockito-inline:4.11.0") +} + +jar { + manifest { attributes 'Implementation-Title': 'com.newrelic.instrumentation.kafka-clients-node-metrics-3.7.0', 'Enabled': 'false' , + 'Implementation-Title-Alias': 'kafka-clients-node-metrics' } +} + +verifyInstrumentation { + passesOnly 'org.apache.kafka:kafka-clients:[3.7.0,)' +} + + +site { + title 'Kafka' + type 'Messaging' +} diff --git a/instrumentation/kafka-clients-node-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/CachedKafkaMetric.java b/instrumentation/kafka-clients-node-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/CachedKafkaMetric.java new file mode 100644 index 0000000000..dbe471ab64 --- /dev/null +++ b/instrumentation/kafka-clients-node-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/CachedKafkaMetric.java @@ -0,0 +1,9 @@ +package com.nr.instrumentation.kafka; + +public interface CachedKafkaMetric { + boolean isValid(); + + String displayName(); + + void report(final FiniteMetricRecorder recorder); +} diff --git a/instrumentation/kafka-clients-node-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/CachedKafkaMetrics.java b/instrumentation/kafka-clients-node-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/CachedKafkaMetrics.java new file mode 100644 index 0000000000..3aa651b195 --- /dev/null +++ b/instrumentation/kafka-clients-node-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/CachedKafkaMetrics.java @@ -0,0 +1,163 @@ +package com.nr.instrumentation.kafka; + +import java.util.regex.Pattern; +import org.apache.kafka.common.metrics.KafkaMetric; +import org.apache.kafka.common.metrics.Measurable; + +public class CachedKafkaMetrics { + + public static CachedKafkaMetric newCachedKafkaMetric(final KafkaMetric metric) { + if ("app-info".equals(metric.metricName().group()) && "version".equals(metric.metricName().name())) { + return new CachedKafkaVersion(metric); + } + + Measurable measurable = null; + try { + measurable = metric.measurable(); + } catch (final IllegalStateException e) { + } + + final boolean isCumulativeSumType = measurable != null && + CumulativeSumSupport.isCumulativeSumClass(measurable.getClass().getName()); + if (isCumulativeSumType) { + return new CachedKafkaCounter(metric); + } + + if (!(metric.metricValue() instanceof Number)) { + return new InvalidCachedKafkaMetric(metric); + } + + return new CachedKafkaSummary(metric); + } + + private static class CachedKafkaVersion implements CachedKafkaMetric { + private final KafkaMetric metric; + private final String newRelicMetricName; + + public CachedKafkaVersion(final KafkaMetric metric) { + this.metric = metric; + this.newRelicMetricName = MetricNameUtil.METRIC_PREFIX + "app-info/version/" + metric.metricValue(); + } + + @Override + public boolean isValid() { + return true; + } + + @Override + public String displayName() { + return "app-info/version/" + metric.metricValue(); + } + + @Override + public void report(final FiniteMetricRecorder recorder) { + recorder.recordMetric(newRelicMetricName, 1.0f); + } + } + + private static class InvalidCachedKafkaMetric implements CachedKafkaMetric { + private final KafkaMetric metric; + + public InvalidCachedKafkaMetric(final KafkaMetric metric) { + this.metric = metric; + } + + @Override + public boolean isValid() { + return false; + } + + @Override + public String displayName() { + return MetricNameUtil.buildDisplayName(metric); + } + + @Override + public void report(FiniteMetricRecorder recorder) { + // no-op + } + } + + private static class CachedKafkaSummary implements CachedKafkaMetric { + private final KafkaMetric metric; + private final String newRelicMetricName; + + public CachedKafkaSummary(final KafkaMetric metric) { + this.metric = metric; + this.newRelicMetricName = MetricNameUtil.buildMetricName(metric); + } + + @Override + public boolean isValid() { + return true; + } + + @Override + public String displayName() { + return MetricNameUtil.buildDisplayName(metric); + } + + @Override + public void report(final FiniteMetricRecorder recorder) { + recorder.recordMetric(newRelicMetricName, ((Number) metric.metricValue()).floatValue()); + } + } + + private static class CachedKafkaCounter implements CachedKafkaMetric { + private final KafkaMetric metric; + private static final Pattern totalPattern = Pattern.compile("-total$"); + + private final String counterMetricName; + private final String totalMetricName; + + private int previous = -1; + + public CachedKafkaCounter(final KafkaMetric metric) { + this.metric = metric; + + totalMetricName = MetricNameUtil.buildMetricName(metric); + + String metricName = metric.metricName().name(); + String counterName = totalPattern.matcher(metricName).replaceAll("-counter"); + if (counterName.equals(metricName)) { + counterName = metricName + "-counter"; + } + counterMetricName = MetricNameUtil.buildMetricName(metric, counterName); + } + + @Override + public boolean isValid() { + return true; + } + + @Override + public String displayName() { + return MetricNameUtil.buildDisplayName(metric); + } + + @Override + public void report(final FiniteMetricRecorder recorder) { + final Number value = ((Number) metric.metricValue()); + if (!recorder.tryRecordMetric(totalMetricName, value.floatValue())) { + // we can't trust the last observed value, so reset + previous = -1; + return; + } + + final int intValue = value.intValue(); + if (previous == -1L) { + previous = intValue; + return; + } + + final int delta = intValue - previous; + previous = intValue; + + recorder.incrementCounter(counterMetricName, delta); + } + } + + private CachedKafkaMetrics() { + // prevents instantiation + } +} diff --git a/instrumentation/kafka-clients-node-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/CumulativeSumSupport.java b/instrumentation/kafka-clients-node-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/CumulativeSumSupport.java new file mode 100644 index 0000000000..7dc226c077 --- /dev/null +++ b/instrumentation/kafka-clients-node-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/CumulativeSumSupport.java @@ -0,0 +1,26 @@ +package com.nr.instrumentation.kafka; + +public class CumulativeSumSupport { + private static final String CUMULATIVE_SUM_CLASS = "org.apache.kafka.common.metrics.stats.CumulativeSum"; + + private static final boolean SUPPORTS_CUMULATIVE_SUM; + static { + boolean supportsCumulativeSum; + try { + Class.forName(CUMULATIVE_SUM_CLASS); + supportsCumulativeSum = true; + } catch (final ClassNotFoundException e) { + supportsCumulativeSum = false; + } + SUPPORTS_CUMULATIVE_SUM = supportsCumulativeSum; + } + + public static boolean isCumulativeSumClass(String className) { + // only do the string comparison if the system has the cumulative sum class + return SUPPORTS_CUMULATIVE_SUM && CUMULATIVE_SUM_CLASS.equals(className); + } + + public static boolean isCumulativeSumSupported() { + return SUPPORTS_CUMULATIVE_SUM; + } +} diff --git a/instrumentation/kafka-clients-node-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/FiniteMetricRecorder.java b/instrumentation/kafka-clients-node-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/FiniteMetricRecorder.java new file mode 100644 index 0000000000..97af0dec88 --- /dev/null +++ b/instrumentation/kafka-clients-node-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/FiniteMetricRecorder.java @@ -0,0 +1,26 @@ +package com.nr.instrumentation.kafka; + +import com.newrelic.api.agent.NewRelic; + +public class FiniteMetricRecorder { + public void incrementCounter(final String metric, final int value) { + NewRelic.incrementCounter(metric, value); + } + + public boolean tryRecordMetric(final String metric, final float value) { + if (Float.isNaN(value) || Float.isInfinite(value)) { + return false; + } + + NewRelic.recordMetric(metric, value); + return true; + } + + public void recordMetric(final String metric, final float value) { + if (Float.isNaN(value) || Float.isInfinite(value)) { + return; + } + + NewRelic.recordMetric(metric, value); + } +} diff --git a/instrumentation/kafka-clients-node-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/MetricNameUtil.java b/instrumentation/kafka-clients-node-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/MetricNameUtil.java new file mode 100644 index 0000000000..64c8196edc --- /dev/null +++ b/instrumentation/kafka-clients-node-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/MetricNameUtil.java @@ -0,0 +1,106 @@ +package com.nr.instrumentation.kafka; + +import org.apache.kafka.common.metrics.KafkaMetric; + +public class MetricNameUtil { + static final String METRIC_PREFIX = "MessageBroker/Kafka/Internal/"; + + static String buildDisplayName(final KafkaMetric metric) { + return String.format("%s/%s %s", metric.metricName().group(), metric.metricName().name(), metric.metricName().tags()); + } + + static String buildMetricName(final KafkaMetric metric) { + return buildMetricName(metric, null); + } + + static String buildMetricName(final KafkaMetric metric, final String nameOverride) { + final String name = nameOverride != null ? nameOverride : metric.metricName().name(); + final String metricPrefix = METRIC_PREFIX + metric.metricName().group() + "/"; + + final String clientId = metric.metricName().tags().get("client-id"); + if (clientId == null) { + return metricPrefix + name; + } + + // is it a per-topic metric? + final String topic = metric.metricName().tags().get("topic"); + if (topic != null) { + return metricPrefix + "topic/" + topic + "/client/" + clientId + "/" + name; + } + + // is it a per-node metric? + String nodeId = metric.metricName().tags().get("node-id"); + if (nodeId != null) { + nodeId = normalizeNodeId(nodeId); + return metricPrefix + "node/" + nodeId + "/client/" + clientId + "/" + name; + } + + return metricPrefix + "client/" + clientId + "/" + name; + } + + private static String normalizeNodeId(final String nodeId) { + // + // sometimes node IDs get weird. let's try to clean things up a bit. + // + + final String[] parts = nodeId.split("-", 2); + if (parts.length != 2) { + // + // a strange node ID that doesn't conform to the expected pattern. leave it be. + // + return nodeId; + } + + final int num; + try { + num = Integer.parseInt(parts[1]); + } catch (final NumberFormatException e) { + // + // non-numeric value in the node ID. weird, but OK. + // + return nodeId; + } + + // + // negative node IDs are used for seed brokers (i.e. initial metadata bootstrap) + // the negative values are pretty useless in practice and just act as placeholders + // for the metadata request. once the metadata request is complete we know the real + // broker IDs and things get more interesting. + // + // return "seed" for negative node IDs since it's probably more useful to users + // than some confusing pseudo-ID. + // + if (num < 0) { + return "seed"; + } + + // + // try to detect coordinator node IDs. what is this nonsense? I'm so glad you asked. + // + // group coordinator node IDs get munged in order to separate the coordinator + // "control plane" from the data plane. this is achieved by subtracting the + // true node ID from Integer.MAX_VALUE. here we just unmunge the node ID to + // get the true ID of the group coordinator to report something more useful + // to users. + // + // note there's no "guaranteed" way to avoid conflicts across the node ID + // "namespace" so we can't actually tell the difference between a coordinator + // node ID and a "regular" node ID, but here we assume that node IDs aren't + // typically huge (in practice I believe they're limited to fairly small but + // configurable values on the broker side anyway) + // + final int coordinatorNodeId = Integer.MAX_VALUE - num; + if (coordinatorNodeId > 0 && coordinatorNodeId < (Integer.MAX_VALUE & 0xff000000)){ + return "coordinator-" + coordinatorNodeId; + } + + // + // fall back to the unmodified node ID that was passed in (this should be the typical case) + // + return nodeId; + } + + private MetricNameUtil() { + // prevents instantiation + } +} diff --git a/instrumentation/kafka-clients-node-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/NewRelicMetricsReporter.java b/instrumentation/kafka-clients-node-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/NewRelicMetricsReporter.java new file mode 100644 index 0000000000..22680a9c18 --- /dev/null +++ b/instrumentation/kafka-clients-node-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/NewRelicMetricsReporter.java @@ -0,0 +1,117 @@ +package com.nr.instrumentation.kafka; + +import com.newrelic.api.agent.NewRelic; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.logging.Level; +import org.apache.kafka.common.MetricName; +import org.apache.kafka.common.metrics.KafkaMetric; +import org.apache.kafka.common.metrics.MetricsReporter; + +public class NewRelicMetricsReporter implements MetricsReporter { + + private static final boolean METRICS_DEBUG = NewRelic.getAgent().getConfig().getValue("kafka.metrics.debug.enabled", false); + private static final boolean NODE_METRICS_DISABLED = NewRelic.getAgent().getConfig().getValue("kafka.metrics.node.metrics.disabled", false); + private static final boolean TOPIC_METRICS_DISABLED = NewRelic.getAgent().getConfig().getValue("kafka.metrics.topic.metrics.disabled", false); + private static final long REPORTING_INTERVAL_IN_SECONDS = NewRelic.getAgent().getConfig().getValue("kafka.metrics.interval", 30); + private static final ScheduledExecutorService SCHEDULER = + Executors.newSingleThreadScheduledExecutor(ThreadFactories.build("NewRelicMetricsReporter-Kafka")); + private ScheduledFuture future; + + private final ConcurrentHashMap metrics = new ConcurrentHashMap<>(); + private final FiniteMetricRecorder recorder = new FiniteMetricRecorder(); + + @Override + public void init(List metrics) { + NewRelic.getAgent().getLogger().log(Level.INFO, + "newrelic-kafka-clients-enhancements: initializing with SUPPORTS_CUMULATIVE_SUM={0}", + CumulativeSumSupport.isCumulativeSumSupported()); + + for (final KafkaMetric metric : metrics) { + registerMetric(metric); + } + + future = SCHEDULER.scheduleAtFixedRate(new Runnable() { + @Override + public void run() { + report(); + } + }, 0, REPORTING_INTERVAL_IN_SECONDS, TimeUnit.SECONDS); + } + + @Override + public void metricChange(KafkaMetric metric) { + registerMetric(metric); + } + + @Override + public void metricRemoval(KafkaMetric metric) { + metrics.remove(metric.metricName()); + + final CachedKafkaMetric cachedMetric = metrics.remove(metric.metricName()); + if (cachedMetric != null) { + debugLog("newrelic-kafka-clients-enhancements: deregister metric: {0}", cachedMetric.displayName()); + } + } + + @Override + public void close() { + if (future != null) { + future.cancel(false); + future = null; + } + } + + @Override + public void configure(Map configs) { + + } + + private void registerMetric(final KafkaMetric metric) { + if (NODE_METRICS_DISABLED && metric.metricName().tags().get("node-id") != null) { + debugLog("newrelic-kafka-clients-enhancements: skipping node metric registration: {0}", + MetricNameUtil.buildDisplayName(metric)); + return; + } + + if (TOPIC_METRICS_DISABLED && metric.metricName().tags().get("topic") != null) { + debugLog("newrelic-kafka-clients-enhancements: skipping topic metric registration: {0}", + MetricNameUtil.buildDisplayName(metric)); + return; + } + + final CachedKafkaMetric cachedMetric = CachedKafkaMetrics.newCachedKafkaMetric(metric); + if (cachedMetric.isValid()) { + debugLog("newrelic-kafka-clients-enhancements: register metric: {0}", cachedMetric.displayName()); + + this.metrics.put(metric.metricName(), cachedMetric); + } else { + debugLog("newrelic-kafka-clients-enhancements: skipping metric registration: {0}", cachedMetric.displayName()); + } + } + + private void report() { + debugLog("newrelic-kafka-clients-enhancements: reporting Kafka metrics"); + + for (final CachedKafkaMetric metric : metrics.values()) { + metric.report(recorder); + } + } + + private void debugLog(String message) { + if (METRICS_DEBUG) { + NewRelic.getAgent().getLogger().log(Level.INFO, message); + } + } + + private void debugLog(String message, Object value) { + if (METRICS_DEBUG) { + NewRelic.getAgent().getLogger().log(Level.INFO, message, value); + } + } +} diff --git a/instrumentation/kafka-clients-node-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/ThreadFactories.java b/instrumentation/kafka-clients-node-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/ThreadFactories.java new file mode 100644 index 0000000000..857c8fd807 --- /dev/null +++ b/instrumentation/kafka-clients-node-metrics-3.7.0/src/main/java/com/nr/instrumentation/kafka/ThreadFactories.java @@ -0,0 +1,33 @@ +package com.nr.instrumentation.kafka; + +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.atomic.AtomicInteger; + +public class ThreadFactories { + public static ThreadFactory build(final String serviceName) { + return new DefaultThreadFactory(serviceName, true); + } + + private ThreadFactories() { + // prevents instantiation + } + + private static class DefaultThreadFactory implements ThreadFactory { + private final String serviceName; + private final AtomicInteger counter; + private final boolean daemon; + + private DefaultThreadFactory(String serviceName, boolean daemon) { + this.serviceName = serviceName; + counter = new AtomicInteger(0); + this.daemon = daemon; + } + + @Override + public Thread newThread(Runnable runnable) { + Thread thread = new Thread(runnable, "New Relic " + serviceName + " #" + counter.incrementAndGet()); + thread.setDaemon(daemon); + return thread; + } + } +} diff --git a/instrumentation/kafka-clients-node-metrics-3.7.0/src/main/java/org/apache/kafka/kafka/clients/consumer/KafkaConsumer_Instrumentation.java b/instrumentation/kafka-clients-node-metrics-3.7.0/src/main/java/org/apache/kafka/kafka/clients/consumer/KafkaConsumer_Instrumentation.java new file mode 100644 index 0000000000..e5265d868d --- /dev/null +++ b/instrumentation/kafka-clients-node-metrics-3.7.0/src/main/java/org/apache/kafka/kafka/clients/consumer/KafkaConsumer_Instrumentation.java @@ -0,0 +1,50 @@ +/* + * + * * Copyright 2020 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package org.apache.kafka.kafka.clients.consumer; + +import com.newrelic.api.agent.NewRelic; +import com.newrelic.api.agent.weaver.NewField; +import com.newrelic.api.agent.weaver.Weave; +import com.newrelic.api.agent.weaver.WeaveAllConstructors; +import com.nr.instrumentation.kafka.NewRelicMetricsReporter; + +import java.time.Duration; +import java.util.Map; +import java.util.logging.Level; + +import org.apache.kafka.common.Metric; +import org.apache.kafka.common.MetricName; +import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.metrics.Metrics; + +@Weave(originalName = "org.apache.kafka.clients.consumer.KafkaConsumer") +public abstract class KafkaConsumer_Instrumentation { + + // It's possible for constructors to be invoked multiple times (e.g. `C() { C("some default") }` ). + // When this happens we don't want to register the metrics reporter multiple times. + @NewField + private boolean metricsReporterInstalled; + + @WeaveAllConstructors + public KafkaConsumer_Instrumentation() { + + String clientId = clientInstanceId(Duration.ofSeconds(1)).toString(); + Metrics metrics = (Metrics) metrics(); + + if (!metricsReporterInstalled) { + NewRelic.getAgent().getLogger().log(Level.INFO, + "newrelic-kafka-clients-enhancements engaged for consumer {0}", clientId); + metrics.addReporter(new NewRelicMetricsReporter()); + metricsReporterInstalled = true; + } + } + + public abstract Uuid clientInstanceId(Duration timeout); + + public abstract Map metrics(); +} diff --git a/instrumentation/kafka-clients-node-metrics-3.7.0/src/main/java/org/apache/kafka/kafka/clients/producer/KafkaProducer_Instrumentation.java b/instrumentation/kafka-clients-node-metrics-3.7.0/src/main/java/org/apache/kafka/kafka/clients/producer/KafkaProducer_Instrumentation.java new file mode 100644 index 0000000000..c5bec36efa --- /dev/null +++ b/instrumentation/kafka-clients-node-metrics-3.7.0/src/main/java/org/apache/kafka/kafka/clients/producer/KafkaProducer_Instrumentation.java @@ -0,0 +1,32 @@ +package org.apache.kafka.kafka.clients.producer; + +import com.newrelic.api.agent.NewRelic; +import com.newrelic.api.agent.weaver.NewField; +import com.newrelic.api.agent.weaver.Weave; +import com.newrelic.api.agent.weaver.WeaveAllConstructors; +import com.newrelic.api.agent.weaver.Weaver; +import com.nr.instrumentation.kafka.NewRelicMetricsReporter; +import java.util.logging.Level; +import org.apache.kafka.common.metrics.Metrics; + +@Weave(originalName = "org.apache.kafka.clients.producer.KafkaProducer") +public class KafkaProducer_Instrumentation { + + private final Metrics metrics = Weaver.callOriginal(); + private final String clientId = Weaver.callOriginal(); + + // It's possible for constructors to be invoked multiple times (e.g. `C() { C("some default") }` ). + // When this happens we don't want to register the metrics reporter multiple times. + @NewField + private boolean metricsReporterInstalled; + + @WeaveAllConstructors + public KafkaProducer_Instrumentation() { + if (!metricsReporterInstalled) { + NewRelic.getAgent().getLogger().log(Level.INFO, + "newrelic-kafka-clients-enhancements engaged for producer {0}", clientId); + metrics.addReporter(new NewRelicMetricsReporter()); + metricsReporterInstalled = true; + } + } +} diff --git a/instrumentation/kafka-clients-node-metrics-3.7.0/src/test/java/com/nr/instrumentation/kafka/CachedKafkaMetricsTest.java b/instrumentation/kafka-clients-node-metrics-3.7.0/src/test/java/com/nr/instrumentation/kafka/CachedKafkaMetricsTest.java new file mode 100644 index 0000000000..86ed5a5a1c --- /dev/null +++ b/instrumentation/kafka-clients-node-metrics-3.7.0/src/test/java/com/nr/instrumentation/kafka/CachedKafkaMetricsTest.java @@ -0,0 +1,217 @@ +package com.nr.instrumentation.kafka; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyFloat; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +import java.util.HashMap; +import org.apache.kafka.common.metrics.KafkaMetric; +import org.apache.kafka.common.metrics.Measurable; +import org.apache.kafka.common.metrics.stats.Avg; +import org.apache.kafka.common.metrics.stats.CumulativeSum; +import org.apache.kafka.common.metrics.stats.Max; +import org.apache.kafka.common.metrics.stats.Value; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.mockito.stubbing.OngoingStubbing; + +@RunWith(MockitoJUnitRunner.class) +public class CachedKafkaMetricsTest { + + @Mock + private FiniteMetricRecorder finiteMetricRecorder; + + @Test + public void cachedKafkaVersion() { + KafkaMetric versionKafkaMetric = createKafkaMetric(KafkaMetricType.VERSION); + + CachedKafkaMetric cachedKafkaMetric = CachedKafkaMetrics.newCachedKafkaMetric(versionKafkaMetric); + + assertThat(cachedKafkaMetric.getClass().getName(), + equalTo("com.nr.instrumentation.kafka.CachedKafkaMetrics$CachedKafkaVersion")); + assertThat(cachedKafkaMetric.isValid(), is(true)); + assertThat(cachedKafkaMetric.displayName(), + equalTo("app-info/version/42")); + + cachedKafkaMetric.report(finiteMetricRecorder); + verify(finiteMetricRecorder).recordMetric(eq("MessageBroker/Kafka/Internal/app-info/version/42"), eq(1.0f)); + } + + @Test + public void invalidCachedKafkaMetric() { + KafkaMetric invalidKafkaMetric = createKafkaMetric(KafkaMetricType.INVALID); + + CachedKafkaMetric cachedKafkaMetric = CachedKafkaMetrics.newCachedKafkaMetric(invalidKafkaMetric); + + assertThat(cachedKafkaMetric.getClass().getName(), + equalTo("com.nr.instrumentation.kafka.CachedKafkaMetrics$InvalidCachedKafkaMetric")); + assertThat(cachedKafkaMetric.isValid(), is(false)); + assertThat(cachedKafkaMetric.displayName(), + equalTo("data/invalid {}")); + + cachedKafkaMetric.report(finiteMetricRecorder); + verifyNoInteractions(finiteMetricRecorder); + } + + @Test + public void cachedKafkaSummary() { + KafkaMetric summaryKafkaMetric = createKafkaMetric(KafkaMetricType.SUMMARY); + + CachedKafkaMetric cachedKafkaMetric = CachedKafkaMetrics.newCachedKafkaMetric(summaryKafkaMetric); + + assertThat(cachedKafkaMetric.getClass().getName(), + equalTo("com.nr.instrumentation.kafka.CachedKafkaMetrics$CachedKafkaSummary")); + assertThat(cachedKafkaMetric.isValid(), is(true)); + assertThat(cachedKafkaMetric.displayName(), + equalTo("data/summary {}")); + + cachedKafkaMetric.report(finiteMetricRecorder); + verify(finiteMetricRecorder).recordMetric(eq("MessageBroker/Kafka/Internal/data/summary"), eq(2.0f)); + } + + @Test + public void cachedKafkaCounter() { + KafkaMetric counterKafkaMetric = createKafkaMetric(KafkaMetricType.COUNTER); + + CachedKafkaMetric cachedKafkaMetric = CachedKafkaMetrics.newCachedKafkaMetric(counterKafkaMetric); + + assertThat(cachedKafkaMetric.getClass().getName(), + equalTo("com.nr.instrumentation.kafka.CachedKafkaMetrics$CachedKafkaCounter")); + assertThat(cachedKafkaMetric.isValid(), is(true)); + assertThat(cachedKafkaMetric.displayName(), + equalTo("data/something {}")); + + when(finiteMetricRecorder.tryRecordMetric(any(), anyFloat())) + .thenReturn(true); + + cachedKafkaMetric.report(finiteMetricRecorder); + verify(finiteMetricRecorder).tryRecordMetric(eq("MessageBroker/Kafka/Internal/data/something"), eq(3.0f)); + verifyNoMoreInteractions(finiteMetricRecorder); + + cachedKafkaMetric.report(finiteMetricRecorder); + verify(finiteMetricRecorder).tryRecordMetric(eq("MessageBroker/Kafka/Internal/data/something"), eq(4.0f)); + verify(finiteMetricRecorder).incrementCounter(eq("MessageBroker/Kafka/Internal/data/something-counter"), eq(1)); + } + + @Test + public void cachedKafkaCounterTotal() { + KafkaMetric counterKafkaMetric = createKafkaMetric(KafkaMetricType.COUNTER_TOTAL); + + CachedKafkaMetric cachedKafkaMetric = CachedKafkaMetrics.newCachedKafkaMetric(counterKafkaMetric); + + assertThat(cachedKafkaMetric.getClass().getName(), + equalTo("com.nr.instrumentation.kafka.CachedKafkaMetrics$CachedKafkaCounter")); + assertThat(cachedKafkaMetric.isValid(), is(true)); + assertThat(cachedKafkaMetric.displayName(), + equalTo("data/something-total {}")); + + when(finiteMetricRecorder.tryRecordMetric(any(), anyFloat())) + .thenReturn(true); + + cachedKafkaMetric.report(finiteMetricRecorder); + verify(finiteMetricRecorder).tryRecordMetric(eq("MessageBroker/Kafka/Internal/data/something-total"), eq(4.0f)); + verifyNoMoreInteractions(finiteMetricRecorder); + + cachedKafkaMetric.report(finiteMetricRecorder); + verify(finiteMetricRecorder).tryRecordMetric(eq("MessageBroker/Kafka/Internal/data/something-total"), eq(5.0f)); + verify(finiteMetricRecorder).incrementCounter(eq("MessageBroker/Kafka/Internal/data/something-counter"), eq(1)); + } + + @Test + public void cachedKafkaCounterTotalCantTrustValue() { + KafkaMetric counterKafkaMetric = createKafkaMetric(KafkaMetricType.COUNTER_TOTAL); + + CachedKafkaMetric cachedKafkaMetric = CachedKafkaMetrics.newCachedKafkaMetric(counterKafkaMetric); + + assertThat(cachedKafkaMetric.getClass().getName(), + equalTo("com.nr.instrumentation.kafka.CachedKafkaMetrics$CachedKafkaCounter")); + assertThat(cachedKafkaMetric.isValid(), is(true)); + assertThat(cachedKafkaMetric.displayName(), + equalTo("data/something-total {}")); + + // when this method returns false, it means that the value was not recorded + // and thus, the increaseCount will not be called. + when(finiteMetricRecorder.tryRecordMetric(any(), anyFloat())) + .thenReturn(false); + + cachedKafkaMetric.report(finiteMetricRecorder); + verify(finiteMetricRecorder).tryRecordMetric(eq("MessageBroker/Kafka/Internal/data/something-total"), eq(4.0f)); + verifyNoMoreInteractions(finiteMetricRecorder); + + cachedKafkaMetric.report(finiteMetricRecorder); + verify(finiteMetricRecorder).tryRecordMetric(eq("MessageBroker/Kafka/Internal/data/something-total"), eq(5.0f)); + verifyNoMoreInteractions(finiteMetricRecorder); + } + + + @Test + public void cachedKafkaWithoutMeasurable() { + KafkaMetric counterKafkaMetric = createKafkaMetric(KafkaMetricType.WITHOUT_MEASURABLE); + + CachedKafkaMetric cachedKafkaMetric = CachedKafkaMetrics.newCachedKafkaMetric(counterKafkaMetric); + + assertThat(cachedKafkaMetric.getClass().getName(), + equalTo("com.nr.instrumentation.kafka.CachedKafkaMetrics$CachedKafkaSummary")); + assertThat(cachedKafkaMetric.isValid(), is(true)); + assertThat(cachedKafkaMetric.displayName(), + equalTo("data/unmeasurable {}")); + + cachedKafkaMetric.report(finiteMetricRecorder); + verify(finiteMetricRecorder).recordMetric(eq("MessageBroker/Kafka/Internal/data/unmeasurable"), eq(6.0f)); + } + + private KafkaMetric createKafkaMetric(KafkaMetricType metricType) { + KafkaMetric kafkaMetric = mock(KafkaMetric.class, Mockito.RETURNS_DEEP_STUBS); + when(kafkaMetric.metricName().name()) + .thenReturn(metricType.metricName); + when(kafkaMetric.metricName().group()) + .thenReturn(metricType.metricGroup); + + OngoingStubbing valuesStubbing = when(kafkaMetric.metricValue()); + for (Object value : metricType.values) { + valuesStubbing = valuesStubbing.thenReturn(value); + } + + when(kafkaMetric.measurable()) + .thenReturn(metricType.measurable); + when(kafkaMetric.metricName().tags()) + .thenReturn(new HashMap<>()); + return kafkaMetric; + } + + /** + * These are the scenarios being tested and respective values. + */ + private enum KafkaMetricType { + VERSION("app-info", "version", new Value(), 42), + INVALID("data", "invalid", new Max(), "towel"), + SUMMARY("data", "summary", new Avg(), 2.0f), + COUNTER("data", "something", new CumulativeSum(), 3, 4), + COUNTER_TOTAL("data", "something-total", new CumulativeSum(), 4, 5), + WITHOUT_MEASURABLE("data", "unmeasurable", null, 6), + ; + + KafkaMetricType(String metricGroup, String metricName, Measurable measurable, Object... values) { + this.metricGroup = metricGroup; + this.metricName = metricName; + this.values = values; + this.measurable = measurable; + } + + private final String metricGroup; + private final String metricName; + private final Object[] values; + private final Measurable measurable; + } +} \ No newline at end of file diff --git a/instrumentation/kafka-clients-node-metrics-3.7.0/src/test/java/com/nr/instrumentation/kafka/FiniteMetricRecorderTest.java b/instrumentation/kafka-clients-node-metrics-3.7.0/src/test/java/com/nr/instrumentation/kafka/FiniteMetricRecorderTest.java new file mode 100644 index 0000000000..e9cad0f3bd --- /dev/null +++ b/instrumentation/kafka-clients-node-metrics-3.7.0/src/test/java/com/nr/instrumentation/kafka/FiniteMetricRecorderTest.java @@ -0,0 +1,70 @@ +package com.nr.instrumentation.kafka; + +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.assertThat; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mockStatic; + +import com.newrelic.api.agent.NewRelic; +import org.junit.Before; +import org.junit.Test; +import org.mockito.MockedStatic; + +public class FiniteMetricRecorderTest { + + private FiniteMetricRecorder recorder; + private static final String METRIC = "metric"; + private static final float VALUE = 42.0f; + private static final int COUNT = 11; + + + @Before + public void setUp() { + recorder = new FiniteMetricRecorder(); + } + + @Test + public void incrementCounter() { + try (MockedStatic newRelic = mockStatic(NewRelic.class)) { + recorder.incrementCounter(METRIC, COUNT); + newRelic.verify(() -> NewRelic.incrementCounter(eq(METRIC), eq(COUNT))); + } + } + + @Test + public void tryRecordMetric() { + try (MockedStatic newRelic = mockStatic(NewRelic.class)) { + boolean returnValue = recorder.tryRecordMetric(METRIC, VALUE); + + assertThat(returnValue, is(true)); + newRelic.verify(() -> NewRelic.recordMetric(eq(METRIC), eq(VALUE))); + } + } + + @Test + public void tryRecordInfiniteMetric() { + try (MockedStatic newRelic = mockStatic(NewRelic.class)) { + boolean returnValue = recorder.tryRecordMetric(METRIC, Float.POSITIVE_INFINITY); + + assertThat(returnValue, is(false)); + newRelic.verifyNoInteractions(); + } + } + + @Test + public void recordMetric() { + try (MockedStatic newRelic = mockStatic(NewRelic.class)) { + recorder.recordMetric(METRIC, VALUE); + newRelic.verify(() -> NewRelic.recordMetric(eq(METRIC), eq(VALUE))); + } + } + + @Test + public void recordInfiniteMetric() { + try (MockedStatic newRelic = mockStatic(NewRelic.class)) { + recorder.tryRecordMetric(METRIC, Float.POSITIVE_INFINITY); + newRelic.verifyNoInteractions(); + } + } + +} \ No newline at end of file diff --git a/instrumentation/kafka-clients-node-metrics-3.7.0/src/test/java/com/nr/instrumentation/kafka/MetricNameUtilTest.java b/instrumentation/kafka-clients-node-metrics-3.7.0/src/test/java/com/nr/instrumentation/kafka/MetricNameUtilTest.java new file mode 100644 index 0000000000..0c881e57f4 --- /dev/null +++ b/instrumentation/kafka-clients-node-metrics-3.7.0/src/test/java/com/nr/instrumentation/kafka/MetricNameUtilTest.java @@ -0,0 +1,178 @@ +package com.nr.instrumentation.kafka; + +import static org.hamcrest.Matchers.equalTo; +import static org.junit.Assert.assertThat; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.when; + +import java.util.HashMap; +import java.util.Map; +import org.apache.kafka.common.metrics.KafkaMetric; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Answers; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class MetricNameUtilTest { + + @Mock(answer = Answers.RETURNS_DEEP_STUBS) + private KafkaMetric kafkaMetric; + + @Test + public void buildDisplayName() { + setupKafkaMetric(); + String actual = MetricNameUtil.buildDisplayName(kafkaMetric); + assertThat(actual, equalTo("group/name {}")); + } + + // not testing with more than one tag because iteration order in a Hashmap is not guaranteed. + @Test + public void buildDisplayName_withTag() { + setupKafkaMetric(Tag.TOPIC); + String actual = MetricNameUtil.buildDisplayName(kafkaMetric); + assertThat(actual, equalTo("group/name {topic=t}")); + } + + @Test + public void buildMetricName() { + setupKafkaMetric(); + String actual = MetricNameUtil.buildMetricName(kafkaMetric); + assertThat(actual, equalTo("MessageBroker/Kafka/Internal/group/name")); + } + + @Test + public void buildMetricName_withAllTags() { + setupKafkaMetric(Tag.CLIENT_ID, Tag.TOPIC, Tag.NODE_ID); + String actual = MetricNameUtil.buildMetricName(kafkaMetric); + assertThat(actual, equalTo("MessageBroker/Kafka/Internal/group/topic/t/client/ci/name")); + } + + @Test + public void buildMetricName_withClientId() { + setupKafkaMetric(Tag.CLIENT_ID); + String actual = MetricNameUtil.buildMetricName(kafkaMetric); + assertThat(actual, equalTo("MessageBroker/Kafka/Internal/group/client/ci/name")); + } + + @Test + public void buildMetricName_withTopic() { + setupKafkaMetric(Tag.TOPIC); + String actual = MetricNameUtil.buildMetricName(kafkaMetric); + assertThat(actual, equalTo("MessageBroker/Kafka/Internal/group/name")); + } + + @Test + public void buildMetricName_withNodeId() { + setupKafkaMetric(Tag.NODE_ID); + String actual = MetricNameUtil.buildMetricName(kafkaMetric); + assertThat(actual, equalTo("MessageBroker/Kafka/Internal/group/name")); + } + + @Test + public void buildMetricName_withClientIdTopic() { + setupKafkaMetric(Tag.CLIENT_ID, Tag.TOPIC); + String actual = MetricNameUtil.buildMetricName(kafkaMetric); + assertThat(actual, equalTo("MessageBroker/Kafka/Internal/group/topic/t/client/ci/name")); + } + + @Test + public void buildMetricName_withClientIdNodeId() { + setupKafkaMetric(Tag.CLIENT_ID, Tag.NODE_ID); + String actual = MetricNameUtil.buildMetricName(kafkaMetric); + assertThat(actual, equalTo("MessageBroker/Kafka/Internal/group/node/ni/client/ci/name")); + } + + @Test + public void buildMetricName_withTopicNodeId() { + setupKafkaMetric(Tag.TOPIC, Tag.NODE_ID); + String actual = MetricNameUtil.buildMetricName(kafkaMetric); + assertThat(actual, equalTo("MessageBroker/Kafka/Internal/group/name")); + } + + @Test + public void buildMetricName_nameOverride() { + setupKafkaMetric(); + String actual = MetricNameUtil.buildMetricName(kafkaMetric, "diffName"); + assertThat(actual, equalTo("MessageBroker/Kafka/Internal/group/diffName")); + } + + @Test + public void buildMetricName_nameOverride_withAllTags() { + setupKafkaMetric(Tag.CLIENT_ID, Tag.TOPIC, Tag.NODE_ID); + String actual = MetricNameUtil.buildMetricName(kafkaMetric, "diffName"); + assertThat(actual, equalTo("MessageBroker/Kafka/Internal/group/topic/t/client/ci/diffName")); + } + + @Test + public void buildMetricName_nameOverride_withClientId() { + setupKafkaMetric(Tag.CLIENT_ID); + String actual = MetricNameUtil.buildMetricName(kafkaMetric, "diffName"); + assertThat(actual, equalTo("MessageBroker/Kafka/Internal/group/client/ci/diffName")); + } + + @Test + public void buildMetricName_nameOverride_withTopic() { + setupKafkaMetric(Tag.TOPIC); + String actual = MetricNameUtil.buildMetricName(kafkaMetric, "diffName"); + assertThat(actual, equalTo("MessageBroker/Kafka/Internal/group/diffName")); + } + + @Test + public void buildMetricName_nameOverride_withNodeId() { + setupKafkaMetric(Tag.NODE_ID); + String actual = MetricNameUtil.buildMetricName(kafkaMetric, "diffName"); + assertThat(actual, equalTo("MessageBroker/Kafka/Internal/group/diffName")); + } + + @Test + public void buildMetricName_nameOverride_withClientIdTopic() { + setupKafkaMetric(Tag.CLIENT_ID, Tag.TOPIC); + String actual = MetricNameUtil.buildMetricName(kafkaMetric, "diffName"); + assertThat(actual, equalTo("MessageBroker/Kafka/Internal/group/topic/t/client/ci/diffName")); + } + + @Test + public void buildMetricName_nameOverride_withClientIdNodeId() { + setupKafkaMetric(Tag.CLIENT_ID, Tag.NODE_ID); + String actual = MetricNameUtil.buildMetricName(kafkaMetric, "diffName"); + assertThat(actual, equalTo("MessageBroker/Kafka/Internal/group/node/ni/client/ci/diffName")); + } + + @Test + public void buildMetricName_nameOverride_withTopicNodeId() { + setupKafkaMetric(Tag.TOPIC, Tag.NODE_ID); + String actual = MetricNameUtil.buildMetricName(kafkaMetric, "diffName"); + assertThat(actual, equalTo("MessageBroker/Kafka/Internal/group/diffName")); + } + + private void setupKafkaMetric(Tag... tags) { + reset(kafkaMetric); + when(kafkaMetric.metricName().group()) + .thenReturn("group"); + when(kafkaMetric.metricName().name()) + .thenReturn("name"); + + Map tagMap = new HashMap<>(); + for (Tag tag : tags) { + tagMap.put(tag.label, tag.value); + } + when(kafkaMetric.metricName().tags()) + .thenReturn(tagMap); + } + + private enum Tag { + CLIENT_ID("client-id", "ci"), + NODE_ID("node-id", "ni"), + TOPIC("topic", "t"), + ; + private final String label; + private final String value; + + Tag(String label, String value) { + this.label = label; + this.value = value; + } + } +} \ No newline at end of file diff --git a/instrumentation/kafka-clients-node-metrics-3.7.0/src/test/java/com/nr/instrumentation/kafka/NewRelicMetricsReporterTest.java b/instrumentation/kafka-clients-node-metrics-3.7.0/src/test/java/com/nr/instrumentation/kafka/NewRelicMetricsReporterTest.java new file mode 100644 index 0000000000..4089dc1c8e --- /dev/null +++ b/instrumentation/kafka-clients-node-metrics-3.7.0/src/test/java/com/nr/instrumentation/kafka/NewRelicMetricsReporterTest.java @@ -0,0 +1,130 @@ +package com.nr.instrumentation.kafka; + +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.RETURNS_DEEP_STUBS; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.newrelic.agent.introspec.InstrumentationTestConfig; +import com.newrelic.agent.introspec.InstrumentationTestRunner; +import com.newrelic.agent.introspec.Introspector; +import com.newrelic.agent.introspec.TracedMetricData; +import java.lang.reflect.Method; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import org.apache.kafka.common.metrics.KafkaMetric; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; + +/** + * These are not unit tests but a functionality test for the whole class. + * Unit testing the methods would require a lot of reflection to check that it worked. + * It uses the InstrumentationTestRunner even though it is not testing any weave class + * so the introspector processes the calls to NewRelic. This prevents static mocking of + * NewRelic, which is extra complicated because static mocking is thread based, and there + * are other threads in the code being tested. + */ +@RunWith(InstrumentationTestRunner.class) +@InstrumentationTestConfig(includePrefixes = "org.apache.kafka") +public class NewRelicMetricsReporterTest { + private Introspector introspector; + private static final KafkaMetric METRIC1 = getMetricMock("metric1", 42.0f); + private static final KafkaMetric METRIC2 = getMetricMock("metric2", 11.0f); + + @Before + public void setup() { + introspector = InstrumentationTestRunner.getIntrospector(); + } + + @Test + public void initialLoad() throws InterruptedException { + List initialMetrics = Arrays.asList(METRIC1, METRIC2); + + NewRelicMetricsReporter reporter = initMetricsReporter(initialMetrics, Collections.emptyList()); + + Map unscopedMetrics = introspector.getUnscopedMetrics(); + TracedMetricData metric1 = unscopedMetrics.get("MessageBroker/Kafka/Internal/group/metric1"); + assertEquals(42.0f, metric1.getTotalTimeInSec(), 0.1f); + TracedMetricData metric2 = unscopedMetrics.get("MessageBroker/Kafka/Internal/group/metric2"); + assertEquals(11.0f, metric2.getTotalTimeInSec(), 0.1f); + + reporter.close(); + } + + @Test + public void laterLoad() throws Exception { + List otherMetrics = Arrays.asList(METRIC1, METRIC2); + + NewRelicMetricsReporter reporter = initMetricsReporter(Collections.emptyList(), otherMetrics); + + Map unscopedMetrics = introspector.getUnscopedMetrics(); + assertEquals(0, unscopedMetrics.size()); + + forceHarvest(reporter); + + unscopedMetrics = introspector.getUnscopedMetrics(); + TracedMetricData metric1 = unscopedMetrics.get("MessageBroker/Kafka/Internal/group/metric1"); + assertEquals(42.0f, metric1.getTotalTimeInSec(), 0.1f); + TracedMetricData metric2 = unscopedMetrics.get("MessageBroker/Kafka/Internal/group/metric2"); + assertEquals(11.0f, metric2.getTotalTimeInSec(), 0.1f); + + reporter.close(); + } + + @Test + public void removeMetric() throws Exception { + List otherMetrics = Arrays.asList(METRIC1, METRIC2); + + NewRelicMetricsReporter reporter = initMetricsReporter(otherMetrics, Collections.emptyList()); + + Map unscopedMetrics = introspector.getUnscopedMetrics(); + assertEquals(2, unscopedMetrics.size()); + + introspector.clear(); + reporter.metricRemoval(METRIC2); + forceHarvest(reporter); + + unscopedMetrics = introspector.getUnscopedMetrics(); + assertEquals(1, unscopedMetrics.size()); + TracedMetricData metric1 = unscopedMetrics.get("MessageBroker/Kafka/Internal/group/metric1"); + assertEquals(42.0f, metric1.getTotalTimeInSec(), 0.1f); + + reporter.close(); + } + + protected static NewRelicMetricsReporter initMetricsReporter(List initMetrics, Collection otherMetrics) throws InterruptedException { + NewRelicMetricsReporter metricsReporter = new NewRelicMetricsReporter(); + metricsReporter.init(initMetrics); + // init triggers the first harvest that happens in a different thread. Sleeping to let it finish. + Thread.sleep(100L); + + for (KafkaMetric otherMetric : otherMetrics) { + metricsReporter.metricChange(otherMetric); + } + return metricsReporter; + } + + protected static KafkaMetric getMetricMock(String name, Object value) { + KafkaMetric metric = mock(KafkaMetric.class, RETURNS_DEEP_STUBS); + when(metric.metricName().group()) + .thenReturn("group"); + when(metric.metricName().name()) + .thenReturn(name); + when(metric.metricValue()) + .thenReturn(value); + return metric; + } + + + private void forceHarvest(NewRelicMetricsReporter reporter) throws Exception { + Method report = NewRelicMetricsReporter.class.getDeclaredMethod("report"); + if (!report.isAccessible()) { + report.setAccessible(true); + } + report.invoke(reporter); + } +} \ No newline at end of file diff --git a/instrumentation/kafka-clients-node-metrics-3.7.0/src/test/java/com/nr/instrumentation/kafka/ThreadFactoriesTest.java b/instrumentation/kafka-clients-node-metrics-3.7.0/src/test/java/com/nr/instrumentation/kafka/ThreadFactoriesTest.java new file mode 100644 index 0000000000..7d6cc7015b --- /dev/null +++ b/instrumentation/kafka-clients-node-metrics-3.7.0/src/test/java/com/nr/instrumentation/kafka/ThreadFactoriesTest.java @@ -0,0 +1,23 @@ +package com.nr.instrumentation.kafka; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.assertThat; + +import java.util.concurrent.ThreadFactory; +import org.junit.Test; + +public class ThreadFactoriesTest { + + @Test + public void build() { + ThreadFactory threadFactory = ThreadFactories.build("TestService"); + Thread thread1 = threadFactory.newThread(() -> {}); + Thread thread2 = threadFactory.newThread(() -> {}); + + assertThat(thread1.getName(), equalTo("New Relic TestService #1")); + assertThat(thread2.getName(), equalTo("New Relic TestService #2")); + assertThat(thread1.isDaemon(), is(true)); + assertThat(thread2.isDaemon(), is(true)); + } +} \ No newline at end of file diff --git a/instrumentation/mongodb-3.7/build.gradle b/instrumentation/mongodb-3.7/build.gradle index 4375c1de8b..448bca780c 100644 --- a/instrumentation/mongodb-3.7/build.gradle +++ b/instrumentation/mongodb-3.7/build.gradle @@ -18,6 +18,7 @@ verifyInstrumentation { // MongoClientOptions and MongoClientSettings can be present thus both the mongodb-3.1 and mongodb-3.7 // instrumentation modules can apply at the same time without weaving the same thing. passes('org.mongodb:mongo-java-driver:[3.7.0-rc0,)') + passes('org.mongodb:mongodb-driver-sync:[3.7.0-rc0,)') fails('org.mongodb:mongo-java-driver:[0.9.1,3.7.0-rc0)') } diff --git a/instrumentation/netty-4.1.16/src/main/java/com/agent/instrumentation/netty4116/Http2RequestHeaderWrapper.java b/instrumentation/netty-4.1.16/src/main/java/com/agent/instrumentation/netty4116/Http2RequestHeaderWrapper.java index 904da2cd1b..8bcbefcd35 100644 --- a/instrumentation/netty-4.1.16/src/main/java/com/agent/instrumentation/netty4116/Http2RequestHeaderWrapper.java +++ b/instrumentation/netty-4.1.16/src/main/java/com/agent/instrumentation/netty4116/Http2RequestHeaderWrapper.java @@ -38,40 +38,79 @@ public class Http2RequestHeaderWrapper extends ExtendedRequest { public Http2RequestHeaderWrapper(Http2Headers http2Headers) { super(); this.http2Headers = http2Headers; - this.method = http2Headers.method(); - this.path = http2Headers.path(); - this.authority = http2Headers.authority(); - - Set rawCookies = null; - if (http2Headers.contains(HttpHeaderNames.COOKIE)) { - CharSequence cookie = http2Headers.get(HttpHeaderNames.COOKIE); - try { - if (cookie != null) { - rawCookies = ServerCookieDecoder.STRICT.decode(cookie.toString()); - } - } catch (Exception e) { - AgentBridge.getAgent().getLogger().log(Level.FINER, e, "Unable to decode cookie: {0}", cookie); - rawCookies = Collections.emptySet(); - } - } - this.cookies = rawCookies; + this.method = getMethodHeader(); + this.path = getPathHeader(); + this.authority = getAuthorityHeader(); + this.cookies = getCookies(); + this.parameters = getParameters(); + } + private Map> getParameters() { Map> params = null; - CharSequence path = http2Headers.path(); try { String uri; if (path != null) { uri = path.toString(); - uri = URL_REPLACEMENT_PATTERN.matcher(uri).replaceAll("%25"); // Escape any percent signs in the URI + // Escape any percent signs in the URI + uri = URL_REPLACEMENT_PATTERN.matcher(uri).replaceAll("%25"); QueryStringDecoder decoderQuery = new QueryStringDecoder(uri); params = decoderQuery.parameters(); - } } catch (Exception e) { AgentBridge.getAgent().getLogger().log(Level.FINER, e, "Unable to decode URI: {0}", path); params = new LinkedHashMap<>(); } - this.parameters = params; + return params; + } + + private Set getCookies() { + Set rawCookies = null; + try { + if (http2Headers.contains(HttpHeaderNames.COOKIE)) { + CharSequence cookie = http2Headers.get(HttpHeaderNames.COOKIE); + try { + if (cookie != null) { + rawCookies = ServerCookieDecoder.STRICT.decode(cookie.toString()); + } + } catch (Exception e) { + AgentBridge.getAgent().getLogger().log(Level.FINER, e, "Unable to decode cookie: {0}", cookie); + rawCookies = Collections.emptySet(); + } + } + } catch (Exception e) { + AgentBridge.getAgent().getLogger().log(Level.FINER, e, "Unable to get Http2Headers cookies: {0}", e.getMessage()); + } + return rawCookies; + } + + private CharSequence getMethodHeader() { + CharSequence method = null; + try { + method = http2Headers.method(); + } catch (Exception e) { + AgentBridge.getAgent().getLogger().log(Level.FINER, e, "Unable to get Http2Headers method: {0}", e.getMessage()); + } + return method; + } + + private CharSequence getPathHeader() { + CharSequence path = null; + try { + path = http2Headers.path(); + } catch (Exception e) { + AgentBridge.getAgent().getLogger().log(Level.FINER, e, "Unable to get Http2Headers path: {0}", e.getMessage()); + } + return path; + } + + private CharSequence getAuthorityHeader() { + CharSequence authority = null; + try { + authority = http2Headers.authority(); + } catch (Exception e) { + AgentBridge.getAgent().getLogger().log(Level.FINER, e, "Unable to get Http2Headers authority: {0}", e.getMessage()); + } + return authority; } @Override @@ -84,14 +123,18 @@ public String getRequestURI() { @Override public String getHeader(String name) { - // HTTP/2 only supports lowercase headers - String lowerCaseHeaderName = name.toLowerCase(); - if (lowerCaseHeaderName.equals(HttpHeaderNames.HOST.toString())) { - return getHost(); - } + try { + // HTTP/2 only supports lowercase headers + String lowerCaseHeaderName = name.toLowerCase(); + if (lowerCaseHeaderName.equals(HttpHeaderNames.HOST.toString())) { + return getHost(); + } - if (http2Headers.contains(lowerCaseHeaderName)) { - return http2Headers.get(lowerCaseHeaderName).toString(); + if (http2Headers.contains(lowerCaseHeaderName)) { + return http2Headers.get(lowerCaseHeaderName).toString(); + } + } catch (Exception e) { + AgentBridge.getAgent().getLogger().log(Level.FINER, e, "Unable to get Http2Headers header: {0}", e.getMessage()); } return null; } @@ -145,10 +188,13 @@ public String getMethod() { } public String getHost() { - if (http2Headers.contains(HttpHeaderNames.HOST)) { - return http2Headers.get(HttpHeaderNames.HOST).toString(); + try { + if (http2Headers.contains(HttpHeaderNames.HOST)) { + return http2Headers.get(HttpHeaderNames.HOST).toString(); + } + } catch (Exception e) { + AgentBridge.getAgent().getLogger().log(Level.FINER, "host header is not present in Http2Headers"); } - if (authority == null) { return null; } @@ -157,12 +203,16 @@ public String getHost() { @Override public List getHeaders(String name) { - // HTTP/2 only supports lowercase headers - String lowerCaseHeaderName = name.toLowerCase(); List headers = new ArrayList<>(); - List allHeaders = http2Headers.getAll(lowerCaseHeaderName); - for (CharSequence header : allHeaders) { - headers.add(header.toString()); + try { + // HTTP/2 only supports lowercase headers + String lowerCaseHeaderName = name.toLowerCase(); + List allHeaders = http2Headers.getAll(lowerCaseHeaderName); + for (CharSequence header : allHeaders) { + headers.add(header.toString()); + } + } catch (Exception e) { + AgentBridge.getAgent().getLogger().log(Level.FINER, e, "Unable to get Http2Headers headers: {0}", e.getMessage()); } return headers; } diff --git a/instrumentation/netty-4.1.16/src/main/java/com/agent/instrumentation/netty4116/Http2ResponseHeaderWrapper.java b/instrumentation/netty-4.1.16/src/main/java/com/agent/instrumentation/netty4116/Http2ResponseHeaderWrapper.java index 0cf70395fc..8bff7ee03b 100644 --- a/instrumentation/netty-4.1.16/src/main/java/com/agent/instrumentation/netty4116/Http2ResponseHeaderWrapper.java +++ b/instrumentation/netty-4.1.16/src/main/java/com/agent/instrumentation/netty4116/Http2ResponseHeaderWrapper.java @@ -7,11 +7,14 @@ package com.agent.instrumentation.netty4116; +import com.newrelic.agent.bridge.AgentBridge; import com.newrelic.api.agent.ExtendedResponse; import com.newrelic.api.agent.HeaderType; import io.netty.handler.codec.http.HttpHeaderNames; import io.netty.handler.codec.http2.Http2Headers; +import java.util.logging.Level; + public class Http2ResponseHeaderWrapper extends ExtendedResponse { private final Http2Headers http2Headers; @@ -26,23 +29,29 @@ public HeaderType getHeaderType() { @Override public void setHeader(String name, String value) { - // HTTP/2 only supports lowercase headers - String lowerCaseHeaderName = name.toLowerCase(); try { + // HTTP/2 only supports lowercase headers + String lowerCaseHeaderName = name.toLowerCase(); http2Headers.set(lowerCaseHeaderName, value); - } catch (Exception ignored) { + } catch (Exception e) { + AgentBridge.getAgent().getLogger().log(Level.FINER, e, "Unable to set Http2Headers header: {0}", e.getMessage()); } } @Override public int getStatus() { - CharSequence status = http2Headers.status(); - if (status == null) { - return -1; - } try { - return Integer.parseInt(status.toString()); - } catch (NumberFormatException e) { + CharSequence status = http2Headers.status(); + if (status == null) { + return -1; + } + try { + return Integer.parseInt(status.toString()); + } catch (NumberFormatException e) { + return -1; + } + } catch (Exception e) { + AgentBridge.getAgent().getLogger().log(Level.FINER, e, "Unable to get Http2Headers status: {0}", e.getMessage()); return -1; } } @@ -55,26 +64,34 @@ public String getStatusMessage() { @Override public String getContentType() { - if (http2Headers.contains(HttpHeaderNames.CONTENT_TYPE)) { - CharSequence contentType = http2Headers.get(HttpHeaderNames.CONTENT_TYPE); - if (contentType != null) { - return contentType.toString(); + try { + if (http2Headers.contains(HttpHeaderNames.CONTENT_TYPE)) { + CharSequence contentType = http2Headers.get(HttpHeaderNames.CONTENT_TYPE); + if (contentType != null) { + return contentType.toString(); + } } + } catch (Exception e) { + AgentBridge.getAgent().getLogger().log(Level.FINER, e, "Unable to get Http2Headers content-type: {0}", e.getMessage()); } return null; } @Override public long getContentLength() { - if (http2Headers.contains(HttpHeaderNames.CONTENT_LENGTH)) { - CharSequence contentLength = http2Headers.get(HttpHeaderNames.CONTENT_LENGTH); - if (contentLength != null) { - try { - return Long.parseLong(contentLength.toString()); - } catch (NumberFormatException e) { - return -1; + try { + if (http2Headers.contains(HttpHeaderNames.CONTENT_LENGTH)) { + CharSequence contentLength = http2Headers.get(HttpHeaderNames.CONTENT_LENGTH); + if (contentLength != null) { + try { + return Long.parseLong(contentLength.toString()); + } catch (NumberFormatException e) { + return -1; + } } } + } catch (Exception e) { + AgentBridge.getAgent().getLogger().log(Level.FINER, e, "Unable to get Http2Headers content-length: {0}", e.getMessage()); } return -1; } diff --git a/instrumentation/slick-2.12_3.2.0/build.gradle b/instrumentation/slick-2.12_3.2.0/build.gradle index dd1d404dfa..445b3360c2 100644 --- a/instrumentation/slick-2.12_3.2.0/build.gradle +++ b/instrumentation/slick-2.12_3.2.0/build.gradle @@ -25,10 +25,10 @@ verifyInstrumentation { fails 'com.typesafe.slick:slick_2.11:[3.2.0,)' // scala 12 - passesOnly 'com.typesafe.slick:slick_2.12:[3.2.0,)' + passesOnly 'com.typesafe.slick:slick_2.12:[3.2.0,3.5.0)' // scala 13 - passesOnly 'com.typesafe.slick:slick_2.13:[3.3.2,)' + passesOnly 'com.typesafe.slick:slick_2.13:[3.3.2,3.5.0)' excludeRegex ".*(RC|M)[0-9].*" } diff --git a/instrumentation/slick-2.12_3.2.0/src/main/scala/slick/util/SkipDefaultAsyncExecutor.java b/instrumentation/slick-2.12_3.2.0/src/main/scala/slick/util/SkipDefaultAsyncExecutor.java new file mode 100644 index 0000000000..f4f557dafb --- /dev/null +++ b/instrumentation/slick-2.12_3.2.0/src/main/scala/slick/util/SkipDefaultAsyncExecutor.java @@ -0,0 +1,15 @@ +/* + * + * * Copyright 2024 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + + +package slick.util; + +import com.newrelic.api.agent.weaver.SkipIfPresent; + +@SkipIfPresent(originalName = "slick.util.AsyncExecutor$DefaultAsyncExecutor") +public class SkipDefaultAsyncExecutor { +} diff --git a/instrumentation/slick-2.12_3.5.0/README.md b/instrumentation/slick-2.12_3.5.0/README.md new file mode 100644 index 0000000000..54e44a0583 --- /dev/null +++ b/instrumentation/slick-2.12_3.5.0/README.md @@ -0,0 +1,11 @@ +# Slick 3.5.0 Instrumentation + +This instrumentation hooks into the `run` Function parameter of `AsyncExecutor.prioritizedRunnable`. +We create a segment, named `ORM/Slick/SlickQuery`, whenever the `run` Function is eventually called. + +Previous versions of the Slick instrumentation wrapped various executors, execution contexts, and runnables +related to Slick's `AsyncExecutor`. This created casting issues in Slick 3.5.0+, because our wrappers effectively upcasted +these types, which have concrete implementations in the Slick source code. Our wrappers ignored critical overridden methods, +falling through to the default implementations and disrupting Slick's underlying concurrency mechanism. + +To avoid these casting issues, we are no longer wrapping anything, except the `run` Function. \ No newline at end of file diff --git a/instrumentation/slick-2.12_3.5.0/build.gradle b/instrumentation/slick-2.12_3.5.0/build.gradle new file mode 100644 index 0000000000..c41e27aa6e --- /dev/null +++ b/instrumentation/slick-2.12_3.5.0/build.gradle @@ -0,0 +1,39 @@ +apply plugin: 'scala' +scala.zincVersion = "1.7.1" + +isScalaProjectEnabled(project, "scala-2.13") + +dependencies { + implementation(project(":newrelic-api")) + implementation(project(":agent-bridge")) + implementation(project(":newrelic-weaver-api")) + implementation(project(":newrelic-weaver-scala-api")) + implementation("org.scala-lang:scala-library:2.13.0") + implementation("com.typesafe.slick:slick_2.13:3.5.1") + + testImplementation("com.h2database:h2:1.4.190") + testImplementation(project(":instrumentation:jdbc-h2")) { transitive = false } + testImplementation(project(":instrumentation:jdbc-generic")) { transitive = false } +} + +jar { + manifest { attributes 'Implementation-Title': 'com.newrelic.instrumentation.slick-2.12_3.5.0' } +} + +verifyInstrumentation { + // scala 11 should be instrumented by another module + fails 'com.typesafe.slick:slick_2.11:[3.2.0,)' + + // scala 12 + passesOnly 'com.typesafe.slick:slick_2.12:[3.5.0,)' + + // scala 13 + passesOnly 'com.typesafe.slick:slick_2.13:[3.5.0,)' + + excludeRegex ".*(RC|M)[0-9].*" +} + +site { + title 'Slick' + type 'Datastore' +} \ No newline at end of file diff --git a/instrumentation/slick-2.12_3.5.0/src/main/scala/slick/util/AsyncExecutorUtil.scala b/instrumentation/slick-2.12_3.5.0/src/main/scala/slick/util/AsyncExecutorUtil.scala new file mode 100644 index 0000000000..4ec6d07eb7 --- /dev/null +++ b/instrumentation/slick-2.12_3.5.0/src/main/scala/slick/util/AsyncExecutorUtil.scala @@ -0,0 +1,29 @@ +/* + * + * * Copyright 2024 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + + +package slick.util + +import com.newrelic.api.agent.{NewRelic, Token, Trace} +import slick.util.AsyncExecutor.PrioritizedRunnable +import slick.util.AsyncExecutor.PrioritizedRunnable.SetConnectionReleased + +object AsyncExecutorUtil{ + + def wrapRunMethod(run: SetConnectionReleased => Unit, token: Token): SetConnectionReleased => Unit = setConnectionReleased => { + doRun(run, setConnectionReleased, token) + } + + @Trace(async = true) + def doRun(run: SetConnectionReleased => Unit, setConnectionReleased: SetConnectionReleased, token: Token): Unit = { + if (token != null) { + token.linkAndExpire(); + } + NewRelic.getAgent.getTracedMethod.setMetricName("ORM", "Slick", "slickQuery") + run(setConnectionReleased) + } +} diff --git a/instrumentation/slick-2.12_3.5.0/src/main/scala/slick/util/AsyncExecutor_Instrumentation.java b/instrumentation/slick-2.12_3.5.0/src/main/scala/slick/util/AsyncExecutor_Instrumentation.java new file mode 100644 index 0000000000..78be5c3d81 --- /dev/null +++ b/instrumentation/slick-2.12_3.5.0/src/main/scala/slick/util/AsyncExecutor_Instrumentation.java @@ -0,0 +1,33 @@ +/* + * + * * Copyright 2024 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + + +package slick.util; + +import com.newrelic.agent.bridge.AgentBridge; +import com.newrelic.api.agent.Token; +import com.newrelic.api.agent.Transaction; +import com.newrelic.api.agent.weaver.MatchType; +import com.newrelic.api.agent.weaver.Weave; +import com.newrelic.api.agent.weaver.Weaver; + +import scala.Function0; +import scala.Function1; +import slick.util.AsyncExecutor.PrioritizedRunnable; + +@Weave(type = MatchType.Interface, originalName = "slick.util.AsyncExecutor") +public class AsyncExecutor_Instrumentation { + public PrioritizedRunnable prioritizedRunnable(Function0 priority, Function1 run) { + Transaction txn = AgentBridge.getAgent().getTransaction(false); + Token token = null; + if ( txn != null) { + token = txn.getToken(); + run = AsyncExecutorUtil.wrapRunMethod(run, token); + } + return Weaver.callOriginal(); + } +} diff --git a/instrumentation/slick-2.12_3.5.0/src/test/scala/com/nr/agent/instrumentation/slickdb/SlickTest_350.scala b/instrumentation/slick-2.12_3.5.0/src/test/scala/com/nr/agent/instrumentation/slickdb/SlickTest_350.scala new file mode 100644 index 0000000000..59ce10a1a3 --- /dev/null +++ b/instrumentation/slick-2.12_3.5.0/src/test/scala/com/nr/agent/instrumentation/slickdb/SlickTest_350.scala @@ -0,0 +1,161 @@ +/* + * + * * Copyright 2024 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package com.nr.agent.instrumentation.slickdb + +import com.newrelic.agent.introspec.InstrumentationTestConfig +import com.newrelic.agent.introspec.InstrumentationTestRunner +import com.newrelic.agent.introspec.Introspector; +import com.newrelic.api.agent.Trace; + +import scala.language.postfixOps + +import org.junit._ +import org.junit.runner.RunWith; + +import scala.concurrent._ +import scala.concurrent.duration._ +import scala.concurrent.ExecutionContext.Implicits.global; +import slick.jdbc.H2Profile.api._ + +// Copied from slick-3.0.0 module +@RunWith(classOf[InstrumentationTestRunner]) +@InstrumentationTestConfig(includePrefixes = Array("slick", "org.h2")) +class SlickTest_350 { + import SlickTest_350.slickdb + import SlickTest_350.users + + @Test + def testCrud() { + slickInsert(); + slickUpdate(); + slickDelete(); + Await.result(slickResult(), 20 seconds) + val introspector :Introspector = InstrumentationTestRunner.getIntrospector() + awaitFinishedTx(introspector, 4); + val txnNames = introspector.getTransactionNames() + txnNames.forEach(name => { + val metrics = introspector.getMetricsForTransaction(name) + Assert.assertTrue(metrics.containsKey("ORM/Slick/slickQuery")) + }) + } + + /* + This test runs 50 concurrent queries, exceeding the default number of db connections provided by slick (20). + A bug was discovered in the previous instrumentation for slick versions 3.5.0+, where for slick queries occurring + outside a transaction, the executor would eventually pause and stop taking up any new work (despite having available + threads). This module was created in response to that bug (see the README) and this test captures the bug's behavior. + It will fail if run with previous instrumentation and slick <3.5.0. + */ + @Test + def testNoTxn(): Unit = { + try { + Await.result(runConcurrentQueries, 10.seconds) + } catch { + case _: Throwable => Assert.fail("Futures timed out running concurrent queries.") + } + + } + + + @Trace(dispatcher = true) + def slickResult() :Future[String] = { + slickdb.run(users.result).map(units => { + var res :String = "" + units.foreach { + case (id, first_name, last_name) => + res += " * " + id + ": " + first_name + " " + last_name + "\n" + } + "Got results: \n"+res + }) + } + + @Trace(dispatcher = true) + def slickInsert() :Future[String] = { + slickdb.run(users.map(u => (u.id, u.first_name, u.last_name)) += (4, "John", "JacobJingle")).map(rowsInserted => { + "Table now has "+rowsInserted+" users" + }) + } + + @Trace(dispatcher = true) + def slickUpdate() :Future[String] = { + slickdb.run(users.filter(_.id === 1).map(u => (u.first_name)).update(("Fred"))).map(result => { + "result: "+result + }) + } + + @Trace(dispatcher = true) + def slickDelete() :Future[String] = { + // people.filter(p => p.name === "M. Odersky").delete + slickdb.run(users.filter(_.id === 2).delete).map(result => { + "result: "+result + }) + } + + def testQuery(id: Int) = { + users.filter(_.id === id) + } + + def runConcurrentQueries = Future.traverse(1 to 50) { x => + val whichId = (x % 3) + 1 + slickdb.run(testQuery(whichId).result).map { v => println(s"Query Result $x: " + v) } + } + + // introspector does not handle async tx finishing very well so we're sleeping as a workaround + private def awaitFinishedTx(introspector :Introspector, expectedTxCount: Int = 1) { + while(introspector.getFinishedTransactionCount() <= expectedTxCount-1) { + Thread.sleep(100) + } + Thread.sleep(100) + } + +} + +class Users(tag: Tag) extends Table[(Int, String, String)] (tag, "user") { + def id = column[Int]("id", O.PrimaryKey) + def first_name = column[String]("first_name") + def last_name = column[String]("last_name") + // Every table needs a * projection with the same type as the table's type parameter + def * = (id, first_name, last_name) +} + +object SlickTest_350 { + val DB_DRIVER: String = "org.h2.Driver"; + val DB_CONNECTION: String = "jdbc:h2:mem:test;DB_CLOSE_DELAY=-1;DATABASE_TO_UPPER=false"; + + val slickdb = Database.forURL(DB_CONNECTION, DB_DRIVER) + val users = TableQuery[Users] + + @BeforeClass + def setup() { + // set up h2 + Assert.assertNotNull("Unable to create h2 db.", slickdb) + Assert.assertNotNull("Unable to create user table.", users) + Await.result(initData(), 10.seconds) //make sure we don't enter the test suite until the init task finishes + } + + @AfterClass + def teardown() { + // tear down h2 + if (null != slickdb) { + slickdb.close(); + } + } + + def initData() = { + val setup = DBIO.seq( + // Create and populate the tables + users.schema.create, + users ++= Seq( + (1, "Fakus", "Namus"), + (2, "Some", "Guy"), + (3, "Whatser", "Name") + )) + slickdb.run(setup) + } + +} diff --git a/instrumentation/vertx-postgres-sqlclient-4.4.2/build.gradle b/instrumentation/vertx-postgres-sqlclient-4.4.2/build.gradle new file mode 100644 index 0000000000..5320d62cc2 --- /dev/null +++ b/instrumentation/vertx-postgres-sqlclient-4.4.2/build.gradle @@ -0,0 +1,30 @@ +jar { + manifest { + attributes 'Implementation-Title': 'com.newrelic.instrumentation.vertx-sqlclient-4.4.2' + } +} + +dependencies { + implementation(project(":agent-bridge")) + implementation(project(":agent-bridge-datastore")) + implementation("io.vertx:vertx-pg-client:4.4.2") + + testImplementation("io.vertx:vertx-core:4.4.2") + testImplementation("io.vertx:vertx-web:4.4.2") + testImplementation('org.testcontainers:postgresql:1.20.1') + testImplementation('com.ongres.scram:client:2.1') +} + +verifyInstrumentation { + passes 'io.vertx:vertx-pg-client:[4.4.2,)' + excludeRegex '.*SNAPSHOT' + excludeRegex '.*milestone.*' + excludeRegex '.*alpha.*' + excludeRegex '.*Beta.*' + excludeRegex '.*CR.*' +} + +site { + title 'Vertx' + type 'Framework' +} \ No newline at end of file diff --git a/instrumentation/vertx-postgres-sqlclient-4.4.2/src/main/java/com/nr/vertx/sqlclient/instrumentation/NRSqlClientWrapper.java b/instrumentation/vertx-postgres-sqlclient-4.4.2/src/main/java/com/nr/vertx/sqlclient/instrumentation/NRSqlClientWrapper.java new file mode 100644 index 0000000000..3cbc6a74ed --- /dev/null +++ b/instrumentation/vertx-postgres-sqlclient-4.4.2/src/main/java/com/nr/vertx/sqlclient/instrumentation/NRSqlClientWrapper.java @@ -0,0 +1,47 @@ +/* + * + * * Copyright 2024 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ +package com.nr.vertx.sqlclient.instrumentation; + +import com.newrelic.api.agent.DatastoreParameters; +import com.newrelic.api.agent.NewRelic; +import com.newrelic.api.agent.Segment; +import com.newrelic.api.agent.Token; +import com.newrelic.api.agent.Trace; +import io.vertx.core.Handler; + +import java.util.logging.Level; + +public class NRSqlClientWrapper implements Handler { + private final Handler delegate; + private Token token; + private Segment segment; + + public NRSqlClientWrapper(Handler delegate, Segment segment) { + this.delegate = delegate; + token = NewRelic.getAgent().getTransaction().getToken(); + this.segment = segment; + } + + @Override + @Trace(async = true) + public void handle(E event) { + if (token != null) { + token.linkAndExpire(); + token = null; + } + + if (segment != null) { + segment.end(); + segment = null; + } + + if (delegate != null) { + NewRelic.getAgent().getTracedMethod().setMetricName("Java", delegate.getClass().getName(), "handle"); + delegate.handle(event); + } + } +} diff --git a/instrumentation/vertx-postgres-sqlclient-4.4.2/src/main/java/com/nr/vertx/sqlclient/instrumentation/SqlClientUtils.java b/instrumentation/vertx-postgres-sqlclient-4.4.2/src/main/java/com/nr/vertx/sqlclient/instrumentation/SqlClientUtils.java new file mode 100644 index 0000000000..88bea011db --- /dev/null +++ b/instrumentation/vertx-postgres-sqlclient-4.4.2/src/main/java/com/nr/vertx/sqlclient/instrumentation/SqlClientUtils.java @@ -0,0 +1,42 @@ +/* + * + * * Copyright 2024 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ +package com.nr.vertx.sqlclient.instrumentation; + +import com.newrelic.agent.bridge.datastore.OperationAndTableName; +import com.newrelic.agent.bridge.datastore.R2dbcOperation; +import com.newrelic.api.agent.NewRelic; +import io.vertx.sqlclient.impl.command.CommandBase; +import io.vertx.sqlclient.impl.command.PrepareStatementCommand; +import io.vertx.sqlclient.impl.command.QueryCommandBase; + +import java.util.logging.Level; + +public class SqlClientUtils { + public static OperationAndTableName extractSqlFromSqlClientCommand(CommandBase cmd) { + String sql = null; + + if (cmd != null) { + if (cmd instanceof QueryCommandBase) { + QueryCommandBase qCmd = (QueryCommandBase)cmd; + sql = qCmd.sql(); + } + if (cmd instanceof PrepareStatementCommand) { + PrepareStatementCommand pCmd = (PrepareStatementCommand)cmd; + sql = pCmd.sql(); + } + + if (sql != null) { + OperationAndTableName operationAndTableName = R2dbcOperation.extractFrom(sql); + if (operationAndTableName != null) { + return operationAndTableName; + } + } + } + + return R2dbcOperation.UNKNOWN_OPERATION_AND_TABLE_NAME; + } +} diff --git a/instrumentation/vertx-postgres-sqlclient-4.4.2/src/main/java/io/vertx/pgclient/impl/PgSocketConnection_Instrumentation.java b/instrumentation/vertx-postgres-sqlclient-4.4.2/src/main/java/io/vertx/pgclient/impl/PgSocketConnection_Instrumentation.java new file mode 100644 index 0000000000..9a0a9f33fe --- /dev/null +++ b/instrumentation/vertx-postgres-sqlclient-4.4.2/src/main/java/io/vertx/pgclient/impl/PgSocketConnection_Instrumentation.java @@ -0,0 +1,51 @@ +/* + * + * * Copyright 2024 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ +package io.vertx.pgclient.impl; + +import com.newrelic.agent.bridge.datastore.DatastoreVendor; +import com.newrelic.agent.bridge.datastore.OperationAndTableName; +import com.newrelic.api.agent.DatastoreParameters; +import com.newrelic.api.agent.NewRelic; +import com.newrelic.api.agent.Segment; +import com.newrelic.api.agent.Trace; +import com.newrelic.api.agent.weaver.MatchType; +import com.newrelic.api.agent.weaver.Weave; +import com.newrelic.api.agent.weaver.Weaver; +import com.nr.vertx.sqlclient.instrumentation.NRSqlClientWrapper; +import com.nr.vertx.sqlclient.instrumentation.SqlClientUtils; +import io.vertx.core.AsyncResult; +import io.vertx.core.Handler; +import io.vertx.pgclient.PgConnectOptions; +import io.vertx.sqlclient.impl.command.CommandBase; + +import java.util.logging.Level; + +@Weave(type = MatchType.ExactClass, originalName = "io.vertx.pgclient.impl.PgSocketConnection") +public abstract class PgSocketConnection_Instrumentation { + @Trace + protected void doSchedule(CommandBase cmd, Handler> handler) { + if (!(handler instanceof NRSqlClientWrapper)) { + OperationAndTableName operationAndTableName = SqlClientUtils.extractSqlFromSqlClientCommand(cmd); + PgConnectOptions pgConnectOptions = connectOptions(); + Segment segment = NewRelic.getAgent().getTransaction().startSegment("Query"); + + DatastoreParameters databaseParams = DatastoreParameters.product(DatastoreVendor.Postgres.name()) + .collection(operationAndTableName.getTableName()) + .operation(operationAndTableName.getOperation()) + .instance(pgConnectOptions.getHost(), pgConnectOptions.getPort()) + .databaseName(pgConnectOptions.getDatabase()) + .build(); + + segment.reportAsExternal(databaseParams); + handler = new NRSqlClientWrapper(handler, segment); + } + + Weaver.callOriginal(); + } + + protected abstract PgConnectOptions connectOptions(); +} diff --git a/instrumentation/vertx-postgres-sqlclient-4.4.2/src/main/java/io/vertx/sqlclient/impl/SqlClientBase_Instrumentation.java b/instrumentation/vertx-postgres-sqlclient-4.4.2/src/main/java/io/vertx/sqlclient/impl/SqlClientBase_Instrumentation.java new file mode 100644 index 0000000000..a749d10510 --- /dev/null +++ b/instrumentation/vertx-postgres-sqlclient-4.4.2/src/main/java/io/vertx/sqlclient/impl/SqlClientBase_Instrumentation.java @@ -0,0 +1,43 @@ +/* + * + * * Copyright 2024 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ +package io.vertx.sqlclient.impl; + +import com.newrelic.api.agent.Trace; +import com.newrelic.api.agent.weaver.MatchType; +import com.newrelic.api.agent.weaver.Weave; +import com.newrelic.api.agent.weaver.Weaver; + +import io.vertx.core.AsyncResult; +import io.vertx.core.Handler; +import io.vertx.sqlclient.Query; +import io.vertx.sqlclient.Row; +import io.vertx.sqlclient.RowSet; +import io.vertx.sqlclient.SqlResult; +import io.vertx.sqlclient.Tuple; + +import java.util.List; + +@Weave(type = MatchType.ExactClass, originalName = "io.vertx.sqlclient.impl.SqlClientBase") +public abstract class SqlClientBase_Instrumentation { + @Trace + public abstract Query> query(String sql); + + @Weave(type = MatchType.ExactClass, originalName = "io.vertx.sqlclient.impl.SqlClientBase$QueryImpl") + private static abstract class QueryImpl_Instrumentation> { + @Trace + public abstract void execute(Handler> handler); + } + + @Weave(type = MatchType.ExactClass, originalName = "io.vertx.sqlclient.impl.SqlClientBase$PreparedQueryImpl") + private static abstract class PreparedQueryImpl_Instrumentation> { + @Trace + public abstract void execute(Tuple arguments, Handler> handler); + + @Trace + public abstract void executeBatch(List batch, Handler> handler); + } +} diff --git a/instrumentation/vertx-postgres-sqlclient-4.4.2/src/test/java/com/nr/vertx/instrumentation/VertxPostgresSqlClientTest.java b/instrumentation/vertx-postgres-sqlclient-4.4.2/src/test/java/com/nr/vertx/instrumentation/VertxPostgresSqlClientTest.java new file mode 100644 index 0000000000..d5743a98f3 --- /dev/null +++ b/instrumentation/vertx-postgres-sqlclient-4.4.2/src/test/java/com/nr/vertx/instrumentation/VertxPostgresSqlClientTest.java @@ -0,0 +1,278 @@ +/* + * + * * Copyright 2020 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package com.nr.vertx.instrumentation; + +import com.github.dockerjava.api.command.CreateContainerCmd; +import com.github.dockerjava.api.model.ExposedPort; +import com.github.dockerjava.api.model.PortBinding; +import com.github.dockerjava.api.model.Ports; +import com.newrelic.agent.introspec.CatHelper; +import com.newrelic.agent.introspec.DataStoreRequest; +import com.newrelic.agent.introspec.ExternalRequest; +import com.newrelic.agent.introspec.HttpTestServer; +import com.newrelic.agent.introspec.InstrumentationTestConfig; +import com.newrelic.agent.introspec.InstrumentationTestRunner; +import com.newrelic.agent.introspec.Introspector; +import com.newrelic.agent.introspec.MetricsHelper; +import com.newrelic.agent.introspec.TraceSegment; +import com.newrelic.agent.introspec.TransactionEvent; +import com.newrelic.agent.introspec.TransactionTrace; +import com.newrelic.agent.introspec.internal.HttpServerLocator; +import com.newrelic.api.agent.NewRelic; +import com.newrelic.api.agent.Trace; +import io.vertx.core.Future; +import io.vertx.core.Vertx; +import io.vertx.core.http.HttpClient; +import io.vertx.core.http.HttpClientRequest; +import io.vertx.core.http.HttpClientResponse; +import io.vertx.core.http.HttpMethod; +import io.vertx.core.http.HttpServer; +import io.vertx.ext.web.Router; +import io.vertx.ext.web.RoutingContext; +import io.vertx.pgclient.PgConnectOptions; +import io.vertx.sqlclient.Pool; +import io.vertx.sqlclient.PoolOptions; +import io.vertx.sqlclient.Row; +import io.vertx.sqlclient.RowSet; +import io.vertx.sqlclient.SqlClient; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.testcontainers.containers.PostgreSQLContainer; + +import java.io.IOException; +import java.net.ServerSocket; +import java.util.ArrayList; +import java.util.Collection; +import java.util.concurrent.CountDownLatch; +import java.util.function.Consumer; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +@RunWith(InstrumentationTestRunner.class) +@InstrumentationTestConfig(includePrefixes = { "io.vertx" }) +public class VertxPostgresSqlClientTest { + + private static int port; + private static Future server; + private static Vertx vertx; + private static SqlClient sqlClient; + + private static PostgreSQLContainer postgres; + + @BeforeClass + public static void beforeClass() throws InterruptedException { + + postgres = new PostgreSQLContainer<>("postgres:16-alpine"); + postgres.start(); + port = getAvailablePort(); + vertx = Vertx.vertx(); + + final Router router = Router.router(vertx); + router.get("/fetch").handler(VertxPostgresSqlClientTest::handleFetchRequest); + router.get("/insert").handler(VertxPostgresSqlClientTest::handleInsertRequest); + + server = vertx.createHttpServer().requestHandler(router).listen(port); + + configureSqlClient(); + initDbTable(); + } + + @AfterClass + public static void afterClass() { + server.result().close(); + vertx.close(); + postgres.stop(); + } + + @Test + public void testPgSqlClientFetch() throws InterruptedException { + doPgFetchTransaction(); + // Wait for transaction to finish + Introspector introspector = InstrumentationTestRunner.getIntrospector(); + assertEquals(1, introspector.getFinishedTransactionCount(1000)); + assertTrue(introspector.getTransactionNames().contains("OtherTransaction/Custom/com.nr.vertx.instrumentation.VertxPostgresSqlClientTest/performSimpleQuery")); + + ArrayList traces = new ArrayList<>(introspector.getTransactionTracesForTransaction("OtherTransaction/Custom/com.nr.vertx.instrumentation.VertxPostgresSqlClientTest/performSimpleQuery")); + ArrayList segments = new ArrayList<>(traces.get(0).getInitialTraceSegment().getChildren()); + + assertEquals(2, segments.size()); + assertEquals("Java/io.vertx.sqlclient.impl.PoolImpl/query", segments.get(0).getName()); + assertEquals("Java/io.vertx.sqlclient.impl.SqlClientBase$QueryImpl/execute", segments.get(1).getName()); + + ArrayList datastores = new ArrayList<>(introspector.getDataStores("OtherTransaction/Custom/com.nr.vertx.instrumentation.VertxPostgresSqlClientTest/performSimpleQuery")); + assertEquals(1, datastores.get(0).getCount()); + assertEquals("test", datastores.get(0).getTable()); + assertEquals("SELECT", datastores.get(0).getOperation()); + assertEquals("Postgres", datastores.get(0).getDatastore()); + } + + public void doPgFetchTransaction() throws InterruptedException { + CountDownLatch latch = new CountDownLatch(1); + HttpClient httpClient = vertx.createHttpClient(); + + httpClient.request(HttpMethod.GET, port,"localhost", "/fetch", reqAsyncResult -> { + if (reqAsyncResult.succeeded()) { //Request object successfully created + HttpClientRequest request = reqAsyncResult.result(); + request.send(respAsyncResult -> { //Sending the request + if (respAsyncResult.succeeded()) { + HttpClientResponse response = respAsyncResult.result(); + latch.countDown(); + response.body(respBufferAsyncResult -> { //Retrieve response + if (respBufferAsyncResult.succeeded()) { + System.out.println(respBufferAsyncResult.result().toString()); + } else { + // Handle server error, for example, connection closed + } + }); + } else { + // Handle server error, for example, connection closed + } + }); + } else { + // Connection error, for example, invalid server or invalid SSL certificate + } + }); + latch.await(); + } + + @Test + public void testPgSqlClientInsert() throws InterruptedException { + doPgInsertTransaction(); + // Wait for transaction to finish + Introspector introspector = InstrumentationTestRunner.getIntrospector(); + assertEquals(1, introspector.getFinishedTransactionCount(1000)); + assertTrue(introspector.getTransactionNames().contains("OtherTransaction/Custom/com.nr.vertx.instrumentation.VertxPostgresSqlClientTest/performInsertQuery")); + + ArrayList traces = new ArrayList<>(introspector.getTransactionTracesForTransaction("OtherTransaction/Custom/com.nr.vertx.instrumentation.VertxPostgresSqlClientTest/performInsertQuery")); + ArrayList segments = new ArrayList<>(traces.get(0).getInitialTraceSegment().getChildren()); + + assertEquals(2, segments.size()); + assertEquals("Java/io.vertx.sqlclient.impl.PoolImpl/query", segments.get(0).getName()); + assertEquals("Java/io.vertx.sqlclient.impl.SqlClientBase$QueryImpl/execute", segments.get(1).getName()); + + ArrayList datastores = new ArrayList<>(introspector.getDataStores("OtherTransaction/Custom/com.nr.vertx.instrumentation.VertxPostgresSqlClientTest/performInsertQuery")); + assertEquals(1, datastores.get(0).getCount()); + assertEquals("test", datastores.get(0).getTable()); + assertEquals("INSERT", datastores.get(0).getOperation()); + assertEquals("Postgres", datastores.get(0).getDatastore()); + } + + public void doPgInsertTransaction() throws InterruptedException { + CountDownLatch latch = new CountDownLatch(1); + HttpClient httpClient = vertx.createHttpClient(); + + httpClient.request(HttpMethod.GET, port,"localhost", "/insert", reqAsyncResult -> { + if (reqAsyncResult.succeeded()) { //Request object successfully created + HttpClientRequest request = reqAsyncResult.result(); + request.send(respAsyncResult -> { //Sending the request + if (respAsyncResult.succeeded()) { + HttpClientResponse response = respAsyncResult.result(); + latch.countDown(); + response.body(respBufferAsyncResult -> { //Retrieve response + if (respBufferAsyncResult.succeeded()) { + System.out.println(respBufferAsyncResult.result().toString()); + } else { + // Handle server error, for example, connection closed + } + }); + } else { + // Handle server error, for example, connection closed + } + }); + } else { + // Connection error, for example, invalid server or invalid SSL certificate + } + }); + latch.await(); + } + + @Trace + private static void handleFetchRequest(RoutingContext routingContext) { + performSimpleQuery().onComplete(ar -> { + if (ar.succeeded()) { + StringBuilder payload = new StringBuilder(); + for (Row r : ar.result()) { + payload.append(r.getLong("id")).append(" ").append(r.getString("name")).append("\n"); + } + routingContext.response().end(payload.toString()); + } else { + routingContext.response().end("Failed to fetch data from Postgres"); + } + }); + } + + @Trace(dispatcher = true) + public static Future> performSimpleQuery() { + return sqlClient.query("select * from test").execute(); + } + + @Trace + private static void handleInsertRequest(RoutingContext routingContext) { + performInsertQuery().onComplete(ar -> { + if (ar.succeeded()) { + routingContext.response().end("row inserted"); + } else { + routingContext.response().end("Failed to insert data into table"); + } + }); + } + + @Trace(dispatcher = true) + public static Future> performInsertQuery() { + return sqlClient.query("insert into test (id, name) values (1, 'pandora')").execute(); + } + + private static int getAvailablePort() { + int port; + + try { + ServerSocket socket = new ServerSocket(0); + port = socket.getLocalPort(); + socket.close(); + } catch (IOException e) { + throw new RuntimeException("Unable to allocate ephemeral port"); + } + return port; + } + + private static void initDbTable() throws InterruptedException { + System.out.println("Creating table..."); + final CountDownLatch createTableLatch = new CountDownLatch(1); + sqlClient.query("create table test (name character varying(255), id bigint NOT NULL)") + .execute() + .onComplete(result -> createTableLatch.countDown()); + + createTableLatch.await(); + System.out.println("Table created"); + + System.out.println("Inserting rows..."); + final CountDownLatch insertRowsLatch = new CountDownLatch(3); + sqlClient.query("insert into test (id, name) values (1, 'claptrap')").execute().onComplete(result -> insertRowsLatch.countDown()); + sqlClient.query("insert into test (id, name) values (2, 'krieg')").execute().onComplete(result -> insertRowsLatch.countDown()); + sqlClient.query("insert into test (id, name) values (3, 'lilith')").execute().onComplete(result -> insertRowsLatch.countDown()); + insertRowsLatch.await(); + System.out.println("All rows inserted"); + } + + private static void configureSqlClient() { + System.out.println("Configuring SqlClient..."); + PgConnectOptions options = new PgConnectOptions() + .setPort(postgres.getMappedPort(5432)) + .setHost(postgres.getHost()) + .setDatabase(postgres.getDatabaseName()) + .setUser(postgres.getUsername()) + .setPassword(postgres.getPassword()); + + sqlClient = Pool.pool(vertx, options, new PoolOptions().setMaxSize(4)); + System.out.println("SqlClient configured"); + } +} diff --git a/newrelic-agent/src/main/java/com/newrelic/agent/attributes/AttributeNames.java b/newrelic-agent/src/main/java/com/newrelic/agent/attributes/AttributeNames.java index 4734523a3c..5d86065fd4 100644 --- a/newrelic-agent/src/main/java/com/newrelic/agent/attributes/AttributeNames.java +++ b/newrelic-agent/src/main/java/com/newrelic/agent/attributes/AttributeNames.java @@ -53,17 +53,18 @@ public final class AttributeNames { public static final String REQUEST_USER_AGENT_PARAMETER_NAME = "request.headers.userAgent"; public static final String REQUEST_METHOD_PARAMETER_NAME = "request.method"; + public static final String RESPONSE_CONTENT_TYPE_PARAMETER_NAME = "response.headers.contentType"; + // Opem Telemetry compatible attributes for host and port public static final String SERVER_ADDRESS = "server.address"; public static final String SERVER_PORT = "server.port"; // cloud provider fields - public static final String CLOUD_RESOURCE_ID = "cloud.resource_id"; public static final String CLOUD_ACCOUNT_ID = "cloud.account.id"; + public static final String CLOUD_PLATFORM = "cloud.platform"; + public static final String CLOUD_RESOURCE_ID = "cloud.resource_id"; public static final String CLOUD_REGION = "cloud.region"; - public static final String RESPONSE_CONTENT_TYPE_PARAMETER_NAME = "response.headers.contentType"; - // high security matches public static final String HTTP_REQUEST_STAR = "request.parameters.*"; public static final String MESSAGE_REQUEST_STAR = "message.parameters.*"; diff --git a/newrelic-agent/src/main/java/com/newrelic/agent/config/ConfigFileHelper.java b/newrelic-agent/src/main/java/com/newrelic/agent/config/ConfigFileHelper.java index 963e660afb..95164ee36f 100644 --- a/newrelic-agent/src/main/java/com/newrelic/agent/config/ConfigFileHelper.java +++ b/newrelic-agent/src/main/java/com/newrelic/agent/config/ConfigFileHelper.java @@ -19,18 +19,39 @@ public class ConfigFileHelper { public static final String NEW_RELIC_YAML_FILE = "newrelic.yml"; + private static final String CONFIG_FILE_ENVIRONMENT_VARIABLE = "NEWRELIC_FILE"; private static final String CONFIG_FILE_PROPERTY = "newrelic.config.file"; private static final String NEW_RELIC_HOME_DIRECTORY_PROPERTY = "newrelic.home"; private static final String NEW_RELIC_HOME_DIRECTORY_ENVIRONMENT_VARIABLE = "NEWRELIC_HOME"; private static final String[] SEARCH_DIRECTORIES = { ".", "conf", "config", "etc" }; + private static enum ConfigFileLocationSpecifier { + ENV_VAR("environment variable"), + SYS_PROP("system property"); + + private final String friendlyName; + + ConfigFileLocationSpecifier(String friendlyName) { + this.friendlyName = friendlyName; + } + + public String getFriendlyName() { + return this.friendlyName; + } + } + /** * Find the New Relic configuration file. * * @return the configuration file or null */ public static File findConfigFile() { - File configFile = findFromProperty(); + File configFile = findFromEnvVariable(); + if (configFile != null) { + return configFile; + } + + configFile = findFromProperty(); if (configFile != null) { return configFile; } @@ -40,9 +61,7 @@ public static File findConfigFile() { if (DebugFlag.DEBUG) { System.err.println(MessageFormat.format("New Relic home directory: {0}", parentDir)); } - } - if (parentDir != null) { configFile = findConfigFile(parentDir); if (configFile != null) { return configFile; @@ -66,25 +85,38 @@ public static File getNewRelicDirectory() { } + /** + * Find the configuration file from a environment variable. + * + * @return the configuration file or null + */ + private static File findFromEnvVariable() { + return getFileFromFilePath(System.getenv(CONFIG_FILE_ENVIRONMENT_VARIABLE), ConfigFileLocationSpecifier.ENV_VAR); + } + /** * Find the configuration file from a System property. * * @return the configuration file or null */ private static File findFromProperty() { - String filePath = System.getProperty(CONFIG_FILE_PROPERTY); + return getFileFromFilePath(System.getProperty(CONFIG_FILE_PROPERTY), ConfigFileLocationSpecifier.SYS_PROP); + } + + private static File getFileFromFilePath(String filePath, ConfigFileLocationSpecifier configFileLocationSpecifier) { if (filePath != null) { File configFile = new File(filePath); if (configFile.exists()) { return configFile; } System.err.println(MessageFormat.format( - "The configuration file {0} specified with the {1} property does not exist", - configFile.getAbsolutePath(), CONFIG_FILE_PROPERTY)); + "The configuration file {0} specified with the {1} [{2}] does not exist", + configFile.getAbsolutePath(), configFileLocationSpecifier.getFriendlyName(), filePath)); } return null; } + /** * Find the New Relic home directory. * diff --git a/newrelic-agent/src/main/java/com/newrelic/agent/instrumentation/ClassTransformerServiceImpl.java b/newrelic-agent/src/main/java/com/newrelic/agent/instrumentation/ClassTransformerServiceImpl.java index 49bf7d2406..3a4be3f38b 100644 --- a/newrelic-agent/src/main/java/com/newrelic/agent/instrumentation/ClassTransformerServiceImpl.java +++ b/newrelic-agent/src/main/java/com/newrelic/agent/instrumentation/ClassTransformerServiceImpl.java @@ -24,13 +24,13 @@ import com.newrelic.agent.instrumentation.tracing.TraceDetails; import com.newrelic.agent.instrumentation.tracing.TraceDetailsBuilder; import com.newrelic.agent.instrumentation.weaver.ClassLoaderClassTransformer; -import com.newrelic.agent.security.deps.org.apache.commons.lang3.StringUtils; import com.newrelic.agent.service.AbstractService; import com.newrelic.agent.service.ServiceFactory; import com.newrelic.agent.util.DefaultThreadFactory; import com.newrelic.agent.util.asm.Utils; import com.newrelic.api.agent.NewRelic; import com.newrelic.api.agent.security.schema.SecurityMetaData; +import org.apache.commons.lang3.StringUtils; import org.objectweb.asm.commons.Method; import java.lang.instrument.ClassFileTransformer; diff --git a/newrelic-agent/src/main/java/com/newrelic/agent/service/analytics/SpanEventFactory.java b/newrelic-agent/src/main/java/com/newrelic/agent/service/analytics/SpanEventFactory.java index 320c2e79f4..c178f61393 100644 --- a/newrelic-agent/src/main/java/com/newrelic/agent/service/analytics/SpanEventFactory.java +++ b/newrelic-agent/src/main/java/com/newrelic/agent/service/analytics/SpanEventFactory.java @@ -12,7 +12,6 @@ import com.newrelic.agent.attributes.AttributeValidator; import com.newrelic.agent.config.AgentConfig; import com.newrelic.agent.config.AttributesConfig; -import com.newrelic.agent.config.TransactionEventsConfig; import com.newrelic.agent.database.SqlObfuscator; import com.newrelic.agent.model.AttributeFilter; import com.newrelic.agent.model.SpanCategory; @@ -24,6 +23,7 @@ import com.newrelic.agent.util.StackTraces; import com.newrelic.api.agent.DatastoreParameters; import com.newrelic.api.agent.ExternalParameters; +import com.newrelic.api.agent.CloudParameters; import com.newrelic.api.agent.HttpParameters; import com.newrelic.api.agent.MessageConsumeParameters; import com.newrelic.api.agent.MessageProduceParameters; @@ -301,6 +301,15 @@ public SpanEventFactory setCloudRegion(String region) { return this; } + private void setCloudPlatform(String platform){ + builder.putAgentAttribute(AttributeNames.CLOUD_PLATFORM, platform); + } + + private void setCloudResourceId(String name){ + builder.putAgentAttribute(AttributeNames.CLOUD_RESOURCE_ID, name); + } + + public SpanEventFactory setMessagingSystem(String messagingSystem) { builder.putAgentAttribute(AttributeNames.MESSAGING_SYSTEM, messagingSystem); return this; @@ -448,6 +457,11 @@ public SpanEventFactory setExternalParameterAttributes(ExternalParameters parame setServerAddress(messageConsumeParameters.getHost()); setServerPort(messageConsumeParameters.getPort()); setKind("consumer"); + } else if (parameters instanceof CloudParameters) { + CloudParameters cloudParameters = (CloudParameters) parameters; + setCategory(SpanCategory.generic); + setCloudPlatform(cloudParameters.getPlatform()); + setCloudResourceId(cloudParameters.getResourceId()); } else { setCategory(SpanCategory.generic); } diff --git a/newrelic-agent/src/main/java/com/newrelic/agent/tracers/DefaultTracer.java b/newrelic-agent/src/main/java/com/newrelic/agent/tracers/DefaultTracer.java index 8f3c11f5b1..e19172a547 100644 --- a/newrelic-agent/src/main/java/com/newrelic/agent/tracers/DefaultTracer.java +++ b/newrelic-agent/src/main/java/com/newrelic/agent/tracers/DefaultTracer.java @@ -28,9 +28,11 @@ import com.newrelic.agent.tracers.metricname.SimpleMetricNameFormat; import com.newrelic.agent.util.ExternalsUtil; import com.newrelic.agent.util.Strings; +import com.newrelic.api.agent.CloudParameters; import com.newrelic.api.agent.DatastoreParameters; import com.newrelic.api.agent.DestinationType; import com.newrelic.api.agent.ExternalParameters; +import com.newrelic.api.agent.CloudParameters; import com.newrelic.api.agent.GenericParameters; import com.newrelic.api.agent.HttpParameters; import com.newrelic.api.agent.InboundHeaders; @@ -664,6 +666,8 @@ private void recordExternalMetrics() { recordMessageBrokerMetrics((MessageProduceParameters) this.externalParameters); } else if (externalParameters instanceof MessageConsumeParameters) { recordMessageBrokerMetrics((MessageConsumeParameters) this.externalParameters); + } else if (externalParameters instanceof CloudParameters) { + recordFaasAttributes((CloudParameters) externalParameters); } else { Agent.LOG.log(Level.SEVERE, "Unknown externalParameters type. This should not happen. {0} -- {1}", externalParameters, externalParameters.getClass()); @@ -848,6 +852,11 @@ private void recordMessageBrokerMetrics(MessageConsumeParameters messageConsumeP } } + private void recordFaasAttributes(CloudParameters cloudParameters) { + setAgentAttribute(AttributeNames.CLOUD_PLATFORM, cloudParameters.getPlatform()); + setAgentAttribute(AttributeNames.CLOUD_RESOURCE_ID, cloudParameters.getResourceId()); + } + private void recordSlowQueryData(SlowQueryDatastoreParameters slowQueryDatastoreParameters) { Transaction transaction = getTransactionActivity().getTransaction(); if (transaction != null && slowQueryDatastoreParameters.getRawQuery() != null diff --git a/newrelic-agent/src/main/java/com/newrelic/agent/util/AgentCollectionFactory.java b/newrelic-agent/src/main/java/com/newrelic/agent/util/AgentCollectionFactory.java index 4f2bf184eb..6afb5ecaf3 100644 --- a/newrelic-agent/src/main/java/com/newrelic/agent/util/AgentCollectionFactory.java +++ b/newrelic-agent/src/main/java/com/newrelic/agent/util/AgentCollectionFactory.java @@ -47,4 +47,13 @@ public Function memorize(Function loader, int maxSize) { .build(loader::apply); return cache::get; } + + @Override + public Function createAccessTimeBasedCache(long ageInSeconds, int initialCapacity, Function loader) { + LoadingCache cache = Caffeine.newBuilder() + .initialCapacity(initialCapacity) + .expireAfterAccess(ageInSeconds, TimeUnit.SECONDS) + .build(loader::apply); + return cache::get; + } } diff --git a/newrelic-agent/src/test/java/com/newrelic/agent/config/ConfigFileHelperTest.java b/newrelic-agent/src/test/java/com/newrelic/agent/config/ConfigFileHelperTest.java index f0971046ed..c5ce6604d3 100644 --- a/newrelic-agent/src/test/java/com/newrelic/agent/config/ConfigFileHelperTest.java +++ b/newrelic-agent/src/test/java/com/newrelic/agent/config/ConfigFileHelperTest.java @@ -1,6 +1,6 @@ package com.newrelic.agent.config; -import com.newrelic.agent.security.deps.org.junit.Assert; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; diff --git a/newrelic-api/src/main/java/com/newrelic/api/agent/CloudParameters.java b/newrelic-api/src/main/java/com/newrelic/api/agent/CloudParameters.java new file mode 100644 index 0000000000..960816a122 --- /dev/null +++ b/newrelic-api/src/main/java/com/newrelic/api/agent/CloudParameters.java @@ -0,0 +1,82 @@ +/* + * + * * Copyright 2024 New Relic Corporation. All rights reserved. + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package com.newrelic.api.agent; + +/** + * Use to report a cloud service that do not match HTTP nor messaging. + * + * @since 8.14.0 + */ +public class CloudParameters implements ExternalParameters { + + private final String platform; + + private final String resourceId; + + private CloudParameters(Builder builder) { + this.platform = builder.platform; + this.resourceId = builder.resourceId; + } + + public String getPlatform() { + return platform; + } + + public String getResourceId() { + return resourceId; + } + + /** + * This method starts the process of creating a CloudParameters object. + * @param provider The cloud platform being invoked. E.g. aws_lambda, azure_function, gcp_cloud_run... + * @since 8.14.0 + */ + public static ResourceIdParameter provider(String provider) { + return new Builder(provider); + } + + private static class Builder implements ResourceIdParameter, Build { + private String platform; + private String resourceId; + + private Builder(String platform) { + this.platform = platform; + } + + public Build resourceId(String resourceId) { + this.resourceId = resourceId; + return this; + } + + public CloudParameters build() { + return new CloudParameters(this); + } + } + + public interface ResourceIdParameter extends Build { + /** + * @param resourceId the cloud provider unique identifier for the service instance. This should be an ARN for AWS, + * a fully qualified resource ID on Azure or a full resource name on GCP. + * @return the object that can be used to build the CloudParameters + * + * @since 8.14.0 + */ + Build resourceId(String resourceId); + } + + public interface Build { + /** + * Builds the CloudParameters object. + * @return the CloudParameters object with the specified parameters. + * + * @since 8.14.0 + */ + CloudParameters build(); + } + +} diff --git a/newrelic-api/src/main/java/com/newrelic/api/agent/Trace.java b/newrelic-api/src/main/java/com/newrelic/api/agent/Trace.java index ab2d0465a8..53f9c58e8c 100644 --- a/newrelic-api/src/main/java/com/newrelic/api/agent/Trace.java +++ b/newrelic-api/src/main/java/com/newrelic/api/agent/Trace.java @@ -47,6 +47,10 @@ /** * Sets the metric name for this tracer. If unspecified, the class / method name will be used. + * + * When using both nameTransaction and metricName, nameTransaction will take precedence and the + * transaction will use the name from the underlying tracer (typically class/method), rather than the + * custom name set by metricName. It is recommended to use one or the other depending on naming preferences. * * @return The metric name for this tracer. * @since 1.3.0 @@ -75,6 +79,10 @@ /** * Names the current transaction using this tracer's metric name. + * + * When using both nameTransaction and metricName, nameTransaction will take precedence and the + * transaction will use the name from the underlying tracer (typically class/method), rather than the + * custom name set by metricName. It is recommended to use one or the other depending on naming preferences. * * @return True if this traced method should be used to name the transaction, else false. * @since 3.1.0 diff --git a/settings.gradle b/settings.gradle index 7b29465680..57a9c25fa1 100644 --- a/settings.gradle +++ b/settings.gradle @@ -70,13 +70,17 @@ include 'instrumentation:activemq-client-5.8.0' include 'instrumentation:anorm-2.3' include 'instrumentation:anorm-2.4' include 'instrumentation:aws-bedrock-runtime-2.20' +include 'instrumentation:aws-java-sdk-dynamodb-1.11.106' +include 'instrumentation:aws-java-sdk-dynamodb-2.15.34' +include 'instrumentation:aws-java-sdk-kinesis-1.11.106' +include 'instrumentation:aws-java-sdk-kinesis-2.0.6' +include 'instrumentation:aws-java-sdk-lambda-1.11.280' +include 'instrumentation:aws-java-sdk-lambda-2.1' include 'instrumentation:aws-java-sdk-sqs-1.10.44' include 'instrumentation:aws-java-sdk-s3-1.2.13' include 'instrumentation:aws-java-sdk-s3-2.0' include 'instrumentation:aws-java-sdk-sns-1.11.12' include 'instrumentation:aws-java-sdk-sqs-2.1.0' -include 'instrumentation:aws-java-sdk-dynamodb-1.11.106' -include 'instrumentation:aws-java-sdk-dynamodb-2.15.34' include 'instrumentation:aws-java-sdk-sns-2.0' include 'instrumentation:aws-wrap-0.7.0' include 'instrumentation:akka-2.2' @@ -108,9 +112,11 @@ include 'instrumentation:cassandra-datastax-4.0.0' include 'instrumentation:cxf-2.7' include 'instrumentation:ejb-3.0' include 'instrumentation:ejb-4.0' +include 'instrumentation:flyway-core-8.0.0' include 'instrumentation:glassfish-3' include 'instrumentation:glassfish-6' include 'instrumentation:glassfish-jmx' +include 'instrumentation:glassfish-jul-extension-7' include 'instrumentation:grails-1.3' include 'instrumentation:grails-2' include 'instrumentation:grails-async-2.3' @@ -236,7 +242,9 @@ include 'instrumentation:kafka-clients-heartbeat-2.1.0' include 'instrumentation:kafka-clients-metrics-0.10.0.0' include 'instrumentation:kafka-clients-metrics-2.0.0' include 'instrumentation:kafka-clients-metrics-3.0.0' +include 'instrumentation:kafka-clients-metrics-3.7.0' include 'instrumentation:kafka-clients-node-metrics-1.0.0' +include 'instrumentation:kafka-clients-node-metrics-3.7.0' include 'instrumentation:kafka-clients-spans-0.11.0.0' include 'instrumentation:kafka-connect-metrics-1.0.0' include 'instrumentation:kafka-connect-spans-2.0.0' @@ -335,6 +343,7 @@ include 'instrumentation:servlet-user-5.0' include 'instrumentation:slick-3.0.0' include 'instrumentation:slick-2.11_3.2.0' include 'instrumentation:slick-2.12_3.2.0' +include 'instrumentation:slick-2.12_3.5.0' include 'instrumentation:solr-4.0.0' include 'instrumentation:solr-5.0.0' include 'instrumentation:solr-5.1.0' @@ -398,6 +407,7 @@ include 'instrumentation:vertx-core-4.1.0' include 'instrumentation:vertx-core-4.3.2' include 'instrumentation:vertx-core-4.5.0' include 'instrumentation:vertx-core-4.5.1' +include 'instrumentation:vertx-postgres-sqlclient-4.4.2' include 'instrumentation:vertx-web-3.2.0' include 'instrumentation:vertx-web-3.3.0' include 'instrumentation:vertx-web-3.5.0'