Skip to content

Commit

Permalink
test(uat): add test for upload speed (#208)
Browse files Browse the repository at this point in the history
  • Loading branch information
MikeDombo authored Jun 19, 2023
1 parent e4d432c commit b23ade6
Show file tree
Hide file tree
Showing 3 changed files with 88 additions and 9 deletions.
1 change: 1 addition & 0 deletions pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -357,6 +357,7 @@
</goals>
<configuration>
<dataFile>${project.build.directory}/jacoco.exec</dataFile>
<skip>${skipTests}</skip>
<rules>
<rule>
<element>BUNDLE</element>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,20 +19,25 @@
import software.amazon.awssdk.core.pagination.sync.SdkIterable;
import software.amazon.awssdk.services.cloudwatchlogs.CloudWatchLogsClient;
import software.amazon.awssdk.services.cloudwatchlogs.model.GetLogEventsRequest;
import software.amazon.awssdk.services.cloudwatchlogs.model.GetLogEventsResponse;
import software.amazon.awssdk.services.cloudwatchlogs.model.LogStream;
import software.amazon.awssdk.services.cloudwatchlogs.model.OutputLogEvent;
import software.amazon.awssdk.services.cloudwatchlogs.model.ResourceNotFoundException;
import software.amazon.awssdk.services.cloudwatchlogs.model.ServiceUnavailableException;
import software.amazon.awssdk.services.cloudwatchlogs.paginators.GetLogEventsIterable;

import java.nio.charset.StandardCharsets;
import java.text.SimpleDateFormat;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.Date;
import java.util.List;
import java.util.Locale;
import java.util.TimeZone;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;

import static org.junit.jupiter.api.Assertions.assertTrue;


@ScenarioScoped
Expand Down Expand Up @@ -137,31 +142,61 @@ public void verifyLogs(int numberOfLogLines, String componentName, String compon
}
}

@Then("I verify that logs for {word} of type {word} uploaded with a rate greater than {double} MBps")
public void verifyLogRate(String componentName, String componentType, double desiredMBps) throws
Exception {
// Logs written by the log generator append a sequence number per log line along with the component name
GetLogEventsRequest request = GetLogEventsRequest.builder()
.logGroupName(getLogGroupName(componentType, componentName))
.logStreamName(getLogStreamName())
.startFromHead(true)
.endTime(Instant.now().toEpochMilli())
.limit(10_000) // limit of 10000 logs
.build();
GetLogEventsIterable response = cwClient.getLogEventsPaginator(request);
List<OutputLogEvent> events = response.events().stream().collect(Collectors.toCollection(ArrayList::new));
events.sort(Comparator.comparingLong(OutputLogEvent::ingestionTime));

OutputLogEvent first = events.get(0);
OutputLogEvent last = events.get(events.size() - 1);

long totalSizeBytes = events.stream().mapToLong(i -> i.message().getBytes(StandardCharsets.UTF_8).length).sum();
long ingestionTimeSpanMs = last.ingestionTime() - first.ingestionTime();

double bytesPerSecond = totalSizeBytes / (ingestionTimeSpanMs / 1000.0);

double gotMBps = bytesPerSecond / (1024.0 * 1024.0);
LOGGER.info("Found {} logs uploaded at an average rate of {} MBps", events.size(), gotMBps);
assertTrue(gotMBps >= desiredMBps,
String.format("Got %f MBps uploaded to CloudWatch, but wanted at least %f", gotMBps, desiredMBps));
}


private boolean haveAllLogsBeenUploaded(int numberOfLogLines, String componentName, String componentType) {
// Logs written by the log generator append a sequence number per log line along with the component name
GetLogEventsRequest request = GetLogEventsRequest.builder()
.logGroupName(getLogGroupName(componentType, componentName))
.logStreamName(getLogStreamName())
.limit(numberOfLogLines) // limit of 10000 logs (this method could be optimized
.startFromHead(true)
.endTime(Instant.now().toEpochMilli())
.limit(10_000) // limit of 10000 logs
.build();

try {
// The OTF watch steps check evey 100ms this to avoids hammering the api. Ideally OTF
// can allow us to configure the check interval rate
TimeUnit.SECONDS.sleep(5L);
GetLogEventsResponse response = cwClient.getLogEvents(request);
List<OutputLogEvent> events = response.events();
GetLogEventsIterable response = cwClient.getLogEventsPaginator(request);
List<OutputLogEvent> events = response.events().stream().collect(Collectors.toCollection(ArrayList::new));

if (events.size() != numberOfLogLines) {
this.lastReceivedCloudWatchEvents = events;
LOGGER.info("Found {} events, not the expected {}", events.size(), numberOfLogLines);
return false;
}

// events is an unmodifiable list
List<OutputLogEvent> copy = new ArrayList<>(events);
copy.sort(Comparator.comparingLong(OutputLogEvent::timestamp));
return wereThereDuplicatesOrMisses(numberOfLogLines, componentName, copy);
events.sort(Comparator.comparingLong(OutputLogEvent::timestamp));
return wereThereDuplicatesOrMisses(numberOfLogLines, componentName, events);
} catch (ServiceUnavailableException | InterruptedException e) {
return false;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -281,4 +281,47 @@ Feature: Greengrass V2 LogManager
And the local Greengrass deployment is SUCCEEDED on the device after 180 seconds
Then I verify that it created a log group of type GreengrassSystemComponent for component System, with streams within 300 seconds in CloudWatch
And I verify that it created a log group of type UserComponent for component UserComponentW, with streams within 300 seconds in CloudWatch
And I verify 10000 logs for UserComponentW of type UserComponent have been uploaded to Cloudwatch within 120 seconds
And I verify 10000 logs for UserComponentW of type UserComponent have been uploaded to Cloudwatch within 120 seconds

Scenario: LogManager-1-T6: Log manager can upload 200,000 logs quickly
Given I create a log directory for component called UserComponentWLogDirectory
And I create a Greengrass deployment with components
| aws.greengrass.Cli | LATEST |
| aws.greengrass.LogManager | classpath:/greengrass/recipes/recipe.yaml |
When I update my Greengrass deployment configuration, setting the component aws.greengrass.LogManager configuration to:
"""
{
"MERGE": {
"logsUploaderConfiguration": {
"componentLogsConfiguration": [
{
"logFileRegex": "UserComponentW(.*).log",
"logFileDirectoryPath": "${UserComponentWLogDirectory}",
"componentName": "UserComponentW"
}
]
},
"periodicUploadIntervalSec": "0.001",
"deprecatedVersionSupport": "false"
}
}
"""
And I deploy the Greengrass deployment configuration
And the Greengrass deployment is COMPLETED on the device after 3 minutes
And I install the component LogGenerator from local store with configuration
"""
{
"MERGE":{
"LogFileName": "UserComponentW",
"WriteFrequencyMs": "0",
"LogsDirectory": "${UserComponentWLogDirectory}",
"NumberOfLogLines": "200000"
}
}
"""
And the local Greengrass deployment is SUCCEEDED on the device after 180 seconds
And I verify that it created a log group of type UserComponent for component UserComponentW, with streams within 300 seconds in CloudWatch
And I verify 200000 logs for UserComponentW of type UserComponent have been uploaded to Cloudwatch within 120 seconds
# Can achieve ~1.4MBps running on codebuild, presumably due to high upload speed and low ping time.
# This test may fail when run on your own computer as your ping time to CloudWatch will be higher.
And I verify that logs for UserComponentW of type UserComponent uploaded with a rate greater than 1.0 MBps

0 comments on commit b23ade6

Please sign in to comment.