diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/deserialization/SimpleRecordDeserializer.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/deserialization/SimpleRecordDeserializer.java index 419090f33a2..1a4eacc7f3e 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/deserialization/SimpleRecordDeserializer.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/deserialization/SimpleRecordDeserializer.java @@ -10,6 +10,10 @@ public class SimpleRecordDeserializer implements RecordDeserializer { @Override public Object deserialize(ConsumerRecord record) { - return stringDeserializer.deserialize(record.topic(), record.value().get()); + if (record.value()!=null) { + return stringDeserializer.deserialize(record.topic(), record.value().get()); + } else { + return "empty"; + } } } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/mapper/ClusterMapper.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/mapper/ClusterMapper.java index 8414fe42f7d..7e963c64e46 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/mapper/ClusterMapper.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/mapper/ClusterMapper.java @@ -1,10 +1,7 @@ package com.provectus.kafka.ui.cluster.mapper; import com.provectus.kafka.ui.cluster.config.ClustersProperties; -import com.provectus.kafka.ui.cluster.model.InternalClusterMetrics; -import com.provectus.kafka.ui.cluster.model.InternalTopic; -import com.provectus.kafka.ui.cluster.model.InternalTopicConfig; -import com.provectus.kafka.ui.cluster.model.KafkaCluster; +import com.provectus.kafka.ui.cluster.model.*; import com.provectus.kafka.ui.model.*; import org.mapstruct.Mapper; import org.mapstruct.Mapping; @@ -19,8 +16,10 @@ public interface ClusterMapper { Cluster toCluster(KafkaCluster cluster); KafkaCluster toKafkaCluster(ClustersProperties.Cluster clusterProperties); - BrokersMetrics toBrokerMetrics(InternalClusterMetrics metrics); + ClusterMetrics toClusterMetrics(InternalClusterMetrics metrics); + BrokerMetrics toBrokerMetrics(InternalBrokerMetrics metrics); Topic toTopic(InternalTopic topic); TopicDetails toTopicDetails(InternalTopic topic); TopicConfig toTopicConfig(InternalTopicConfig topic); + Replica toReplica(InternalReplica replica); } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/model/InternalBrokerMetrics.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/model/InternalBrokerMetrics.java index 5350aa3c24d..662cf0ddf56 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/model/InternalBrokerMetrics.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/model/InternalBrokerMetrics.java @@ -10,5 +10,5 @@ @Builder(toBuilder = true) public class InternalBrokerMetrics { private final Long segmentSize; - private final List jmxMetrics; + private final List metrics; } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/model/InternalPartition.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/model/InternalPartition.java index 3ca03533b93..21502f216a4 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/model/InternalPartition.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/model/InternalPartition.java @@ -6,11 +6,13 @@ import java.util.List; @Data -@Builder +@Builder(toBuilder = true) public class InternalPartition { private final int partition; private final Integer leader; private final List replicas; private final int inSyncReplicasCount; private final int replicasCount; + private final long offsetMin; + private final long offsetMax; } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/model/InternalTopic.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/model/InternalTopic.java index 40deb0d6d9b..164c1073250 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/model/InternalTopic.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/model/InternalTopic.java @@ -1,6 +1,5 @@ package com.provectus.kafka.ui.cluster.model; -import com.provectus.kafka.ui.model.TopicPartitionDto; import lombok.Builder; import lombok.Data; import org.apache.kafka.common.TopicPartition; diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/model/KafkaCluster.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/model/KafkaCluster.java index daa1da98f57..a4d8ac5c5c0 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/model/KafkaCluster.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/model/KafkaCluster.java @@ -11,7 +11,7 @@ public class KafkaCluster { private final String name; - private final int jmxPort; + private final Integer jmxPort; private final String bootstrapServers; private final String zookeeper; private final String schemaRegistry; diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/service/ClusterService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/service/ClusterService.java index 508247ef8e2..78b7c99b4f3 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/service/ClusterService.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/service/ClusterService.java @@ -38,17 +38,22 @@ public List getClusters() { .collect(Collectors.toList()); } - public Mono getBrokersMetrics(String name, Integer id) { + public Mono getBrokerMetrics(String name, Integer id) { return Mono.justOrEmpty(clustersStorage.getClusterByName(name) - .map(KafkaCluster::getMetrics) - .map(s -> { - var brokerMetrics = clusterMapper.toBrokerMetrics(s); - brokerMetrics.setMetrics(s.getInternalBrokerMetrics().get(id).getJmxMetrics()); - brokerMetrics.setSegmentZise(Long.valueOf(s.getSegmentSize()).intValue()); - return brokerMetrics; - })); + .map( c -> c.getMetrics().getInternalBrokerMetrics()) + .map( m -> m.get(id)) + .map(clusterMapper::toBrokerMetrics)); } + public Mono getClusterMetrics(String name) { + return Mono.justOrEmpty( + clustersStorage.getClusterByName(name) + .map(KafkaCluster::getMetrics) + .map(clusterMapper::toClusterMetrics) + ); + } + + public List getTopics(String name) { return clustersStorage.getClusterByName(name) .map(c -> @@ -60,12 +65,15 @@ public List getTopics(String name) { public Optional getTopicDetails(String name, String topicName) { return clustersStorage.getClusterByName(name) - .map(c -> { - var topic = c.getTopics().get(topicName); - return clusterMapper - .toTopicDetails(topic) - .partitions(kafkaService.partitionDtoList(topic, c)); - }); + .flatMap( c -> + Optional.ofNullable( + c.getTopics().get(topicName) + ).map( + t -> t.toBuilder().partitions( + kafkaService.getTopicPartitions(c, t) + ).build() + ).map(clusterMapper::toTopicDetails) + ); } public Optional> getTopicConfigs(String name, String topicName) { @@ -143,6 +151,7 @@ public Mono updateTopic(String clusterName, String topicName, Mono topicFormData .flatMap(t -> kafkaService.updateTopic(cl, topicName, t)) + .map(clusterMapper::toTopic) .flatMap(t -> updateCluster(t, clusterName, cl)) ) .orElse(Mono.empty()); @@ -161,4 +170,5 @@ public Flux getMessages(String clusterName, String topicName, Cons .map(c -> consumingService.loadMessages(c, topicName, consumerPosition, query, limit)) .orElse(Flux.empty()); } + } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/util/ClusterUtil.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/util/ClusterUtil.java index fc593330f10..59d18f622ba 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/util/ClusterUtil.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/util/ClusterUtil.java @@ -68,10 +68,12 @@ public static List convertToConsumerTopicPartition ) { return consumer.assignment().topicPartitions().stream() .map(tp -> { - Long currentOffset = groupOffsets.get(tp).offset(); - Long endOffset = endOffsets.get(tp); + Long currentOffset = Optional.ofNullable( + groupOffsets.get(tp)).map(o -> o.offset()).orElse(0L); + Long endOffset = Optional.ofNullable(endOffsets.get(tp)).orElse(0L); ConsumerTopicPartitionDetail cd = new ConsumerTopicPartitionDetail(); cd.setConsumerId(consumer.consumerId()); + cd.setHost(consumer.host()); cd.setTopic(tp.topic()); cd.setPartition(tp.partition()); cd.setCurrentOffset(currentOffset); @@ -116,7 +118,7 @@ public static InternalTopic mapToInternalTopic(TopicDescription topicDescription int urpCount = partitions.stream() .flatMap(partition -> partition.getReplicas().stream()) - .filter(InternalReplica::isInSync).mapToInt(e -> 1) + .filter(p -> !p.isInSync()).mapToInt(e -> 1) .sum(); int inSyncReplicasCount = partitions.stream() @@ -199,6 +201,10 @@ private static ExtendedAdminClient.SupportedFeature getSupportedUpdateFeature(Ma .filter(entry -> entry.name().contains(CLUSTER_VERSION_PARAM_KEY)) .findFirst().orElseThrow().value(); try { + final String[] parts = version.split("\\."); + if (parts.length>2) { + version = parts[0] + "." + parts[1]; + } return Float.parseFloat(version.split("-")[0]) <= 2.3f ? ExtendedAdminClient.SupportedFeature.ALTER_CONFIGS : ExtendedAdminClient.SupportedFeature.INCREMENTAL_ALTER_CONFIGS; } catch (Exception e) { @@ -207,24 +213,6 @@ private static ExtendedAdminClient.SupportedFeature getSupportedUpdateFeature(Ma } } - public static Topic convertToTopic(InternalTopic internalTopic) { - Topic topic = new Topic(); - topic.setName(internalTopic.getName()); - List partitions = internalTopic.getPartitions().stream().flatMap(s -> { - Partition partition = new Partition(); - partition.setPartition(s.getPartition()); - partition.setLeader(s.getLeader()); - partition.setReplicas(s.getReplicas().stream().flatMap(r -> { - Replica replica = new Replica(); - replica.setBroker(r.getBroker()); - return Stream.of(replica); - }).collect(Collectors.toList())); - return Stream.of(partition); - }).collect(Collectors.toList()); - topic.setPartitions(partitions); - return topic; - } - public static Map toSingleMap (Stream> streamOfMaps) { return streamOfMaps.reduce((map1, map2) -> Stream.concat(map1.entrySet().stream(), map2.entrySet().stream()) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))).orElseThrow(); diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/util/JmxClusterUtil.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/util/JmxClusterUtil.java index 8554a712c61..f45f9688533 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/util/JmxClusterUtil.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/util/JmxClusterUtil.java @@ -100,7 +100,7 @@ private void closeConnectionExceptionally(String url, JMXConnector srv) { public List convertToMetricDto(InternalClusterMetrics internalClusterMetrics) { return internalClusterMetrics.getInternalBrokerMetrics().values().stream() .map(c -> - c.getJmxMetrics().stream() + c.getMetrics().stream() .filter(j -> isSameMetric(j.getCanonicalName())) .map(j -> j.getValue().entrySet().stream() .map(e -> new MetricDto(j.getCanonicalName(), e.getKey(), e.getValue())))) diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/kafka/KafkaService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/kafka/KafkaService.java index a3d466bcaf9..b5572bd41fe 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/kafka/KafkaService.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/kafka/KafkaService.java @@ -265,7 +265,7 @@ private Mono createTopic(AdminClient adminClient, NewTopic newTopic) { } @SneakyThrows - public Mono updateTopic(KafkaCluster cluster, String topicName, TopicFormData topicFormData) { + public Mono updateTopic(KafkaCluster cluster, String topicName, TopicFormData topicFormData) { ConfigResource topicCR = new ConfigResource(ConfigResource.Type.TOPIC, topicName); return getOrCreateAdminClient(cluster) .flatMap(ac -> { @@ -281,11 +281,10 @@ public Mono updateTopic(KafkaCluster cluster, String topicName, TopicForm - private Mono getUpdatedTopic (ExtendedAdminClient ac, String topicName) { + private Mono getUpdatedTopic (ExtendedAdminClient ac, String topicName) { return getTopicsData(ac.getAdminClient()) .map(s -> s.stream() - .filter(t -> t.getName().equals(topicName)).findFirst().orElseThrow()) - .map(ClusterUtil::convertToTopic); + .filter(t -> t.getName().equals(topicName)).findFirst().orElseThrow()); } private Mono incrementalAlterConfig(TopicFormData topicFormData, ConfigResource topicCR, ExtendedAdminClient ac) { @@ -346,6 +345,8 @@ private Mono updateSegmentMetrics(AdminClient ac, Intern public List getJmxMetric(String clusterName, Node node) { return clustersStorage.getClusterByName(clusterName) + .filter( c -> c.getJmxPort() != null) + .filter( c -> c.getJmxPort() > 0) .map(c -> jmxClusterUtil.getJmxMetrics(c.getJmxPort(), node.host())).orElse(Collections.emptyList()); } @@ -357,7 +358,7 @@ private Mono fillBrokerMetrics(InternalClusterMetrics in return ClusterUtil.toMono(ac.describeCluster().nodes()) .flatMapIterable(nodes -> nodes) .map(broker -> Map.of(broker.id(), InternalBrokerMetrics.builder(). - jmxMetrics(getJmxMetric(clusterName, broker)).build())) + metrics(getJmxMetric(clusterName, broker)).build())) .collectList() .map(s -> internalClusterMetrics.toBuilder().internalBrokerMetrics(ClusterUtil.toSingleMap(s.stream())).build()); } @@ -377,22 +378,25 @@ private InternalClusterMetrics calculateClusterMetrics(InternalClusterMetrics in .collect(Collectors.toList())).build(); } - public List partitionDtoList (InternalTopic topic, KafkaCluster cluster) { - var topicPartitions = topic.getPartitions().stream().map(t -> new TopicPartition(topic.getName(), t.getPartition())).collect(Collectors.toList()); - return getTopicPartitionOffset(cluster, topicPartitions); - } + public List getTopicPartitions(KafkaCluster c, InternalTopic topic ) { + var tps = topic.getPartitions().stream() + .map(t -> new TopicPartition(topic.getName(), t.getPartition())) + .collect(Collectors.toList()); + Map partitions = + topic.getPartitions().stream().collect(Collectors.toMap( + InternalPartition::getPartition, + tp -> tp + )); - private List getTopicPartitionOffset(KafkaCluster c, List topicPartitions ) { try (var consumer = createConsumer(c)) { - final Map earliest = consumer.beginningOffsets(topicPartitions); - final Map latest = consumer.endOffsets(topicPartitions); + final Map earliest = consumer.beginningOffsets(tps); + final Map latest = consumer.endOffsets(tps); - return topicPartitions.stream() - .map( tp -> new TopicPartitionDto() - .topic(tp.topic()) - .partition(tp.partition()) + return tps.stream() + .map( tp -> partitions.get(tp.partition()).toBuilder() .offsetMin(Optional.ofNullable(earliest.get(tp)).orElse(0L)) .offsetMax(Optional.ofNullable(latest.get(tp)).orElse(0L)) + .build() ).collect(Collectors.toList()); } catch (Exception e) { return Collections.emptyList(); diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/rest/MetricsRestController.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/rest/MetricsRestController.java index f7308fdc69e..ce65342b28f 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/rest/MetricsRestController.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/rest/MetricsRestController.java @@ -30,12 +30,19 @@ public Mono>> getClusters(ServerWebExchange exchang } @Override - public Mono> getBrokersMetrics(String clusterName, Integer id, ServerWebExchange exchange) { - return clusterService.getBrokersMetrics(clusterName, id) + public Mono> getBrokersMetrics(String clusterName, Integer id, ServerWebExchange exchange) { + return clusterService.getBrokerMetrics(clusterName, id) .map(ResponseEntity::ok) .onErrorReturn(ResponseEntity.notFound().build()); } + @Override + public Mono> getClusterMetrics(String clusterName, ServerWebExchange exchange) { + return clusterService.getClusterMetrics(clusterName) + .map(ResponseEntity::ok) + .onErrorReturn(ResponseEntity.notFound().build()); + } + @Override public Mono>> getTopics(String clusterName, ServerWebExchange exchange) { return Mono.just(ResponseEntity.ok(Flux.fromIterable(clusterService.getTopics(clusterName)))); diff --git a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml index 8e2212b7b11..2cc6d8a830a 100644 --- a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml +++ b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml @@ -52,6 +52,26 @@ paths: items: $ref: '#/components/schemas/Broker' + /api/clusters/{clusterName}/metrics: + get: + tags: + - /api/clusters + summary: getClusterMetrics + operationId: getClusterMetrics + parameters: + - name: clusterName + in: path + required: true + schema: + type: string + responses: + 200: + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/ClusterMetrics' + /api/clusters/{clusterName}/metrics/broker/{id}: get: tags: @@ -75,7 +95,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/BrokersMetrics' + $ref: '#/components/schemas/BrokerMetrics' /api/clusters/{clusterName}/topics: get: @@ -327,16 +347,16 @@ components: - online - offline - BrokersMetrics: + ClusterMetrics: type: object properties: + brokerCount: + type: integer zooKeeperStatus: type: integer activeControllers: type: integer - uncleanLeaderElectionCount: - type: integer - underReplicatedPartitionCount: + onlinePartitionCount: type: integer offlinePartitionCount: type: integer @@ -344,6 +364,25 @@ components: type: integer outOfSyncReplicasCount: type: integer + underReplicatedPartitionCount: + type: integer + diskUsage: + type: array + items: + $ref: '#/components/schemas/BrokerDiskUsage' + + BrokerDiskUsage: + type: object + properties: + brokerId: + type: integer + segmentSize: + type: integer + format: int64 + + BrokerMetrics: + type: object + properties: segmentZise: type: integer metrics: @@ -358,22 +397,24 @@ components: type: string internal: type: boolean - partitions: - type: array - items: - $ref: '#/components/schemas/Partition' - - Partition: - type: object - properties: - partition: + partitionCount: type: integer - leader: + replicationFactor: type: integer replicas: + type: integer + inSyncReplicas: + type: integer + segmentSize: + type: integer + segmentCount: + type: integer + underReplicatedPartitions: + type: integer + partitions: type: array items: - $ref: '#/components/schemas/Replica' + $ref: "#/components/schemas/Partition" Replica: type: object @@ -388,10 +429,14 @@ components: TopicDetails: type: object properties: + name: + type: string + internal: + type: boolean partitions: type: array items: - $ref: "#/components/schemas/TopicPartitionDto" + $ref: "#/components/schemas/Partition" partitionCount: type: integer replicationFactor: @@ -490,13 +535,17 @@ components: - OFFSET - TIMESTAMP - TopicPartitionDto: + Partition: type: object properties: - topic: - type: string partition: type: integer + leader: + type: integer + replicas: + type: array + items: + $ref: '#/components/schemas/Replica' offsetMax: type: integer format: int64 @@ -516,6 +565,8 @@ components: type: string topic: type: string + host: + type: string partition: type: integer currentOffset: diff --git a/kafka-ui-react-app/src/components/ConsumerGroups/Details/Details.tsx b/kafka-ui-react-app/src/components/ConsumerGroups/Details/Details.tsx index 39780bb6e84..7109212d698 100644 --- a/kafka-ui-react-app/src/components/ConsumerGroups/Details/Details.tsx +++ b/kafka-ui-react-app/src/components/ConsumerGroups/Details/Details.tsx @@ -58,6 +58,7 @@ const Details: React.FC = ({ Consumer ID + Host Topic Partition Messages behind diff --git a/kafka-ui-react-app/src/components/ConsumerGroups/Details/ListItem.tsx b/kafka-ui-react-app/src/components/ConsumerGroups/Details/ListItem.tsx index 35a7b02a820..1061d2241ae 100644 --- a/kafka-ui-react-app/src/components/ConsumerGroups/Details/ListItem.tsx +++ b/kafka-ui-react-app/src/components/ConsumerGroups/Details/ListItem.tsx @@ -12,6 +12,7 @@ const ListItem: React.FC = ({ clusterName, consumer }) => { return ( {consumer.consumerId} + {consumer.host} = ({ }); }; - const getTimestampDate = (timestamp: number) => { - return format(new Date(timestamp * 1000), 'MM.dd.yyyy HH:mm:ss'); + const getTimestampDate = (timestamp: string) => { + return format(Date.parse(timestamp), 'yyyy-MM-dd HH:mm:ss'); }; const getMessageContentHeaders = React.useMemo(() => { diff --git a/kafka-ui-react-app/src/components/Topics/Details/Overview/Overview.tsx b/kafka-ui-react-app/src/components/Topics/Details/Overview/Overview.tsx index 69ee5e7a497..41cc5971982 100644 --- a/kafka-ui-react-app/src/components/Topics/Details/Overview/Overview.tsx +++ b/kafka-ui-react-app/src/components/Topics/Details/Overview/Overview.tsx @@ -23,10 +23,9 @@ const Overview: React.FC = ({ replicationFactor, fetchTopicDetails, }) => { - React.useEffect( - () => { fetchTopicDetails(clusterName, topicName); }, - [fetchTopicDetails, clusterName, topicName], - ); + React.useEffect(() => { + fetchTopicDetails(clusterName, topicName); + }, [fetchTopicDetails, clusterName, topicName]); if (!isFetched) { return null; @@ -35,18 +34,18 @@ const Overview: React.FC = ({ return ( <> - - {partitionCount} - - - {replicationFactor} - + {partitionCount} + {replicationFactor} {underReplicatedPartitions} {inSyncReplicas} - of {replicas} + + {' '} + of + {replicas} + @@ -60,15 +59,20 @@ const Overview: React.FC = ({ Partition ID Broker leader + Min offset + Max offset - {partitions && partitions.map(({ partition, leader }) => ( - - {partition} - {leader} - - ))} + {partitions && + partitions.map(({ partition, leader, offsetMin, offsetMax }) => ( + + {partition} + {leader} + {offsetMin} + {offsetMax} + + ))} diff --git a/kafka-ui-react-app/src/redux/api/brokers.ts b/kafka-ui-react-app/src/redux/api/brokers.ts index 2b4216fb27c..3e5adea205d 100644 --- a/kafka-ui-react-app/src/redux/api/brokers.ts +++ b/kafka-ui-react-app/src/redux/api/brokers.ts @@ -1,17 +1,14 @@ -import { - Broker, - ClusterName, - BrokerMetrics, -} from 'redux/interfaces'; -import { - BASE_URL, - BASE_PARAMS, -} from 'lib/constants'; +import { Broker, ClusterName, BrokerMetrics } from 'redux/interfaces'; +import { BASE_URL, BASE_PARAMS } from 'lib/constants'; export const getBrokers = (clusterName: ClusterName): Promise => - fetch(`${BASE_URL}/clusters/${clusterName}/brokers`, { ...BASE_PARAMS }) - .then(res => res.json()); + fetch(`${BASE_URL}/clusters/${clusterName}/brokers`, { + ...BASE_PARAMS, + }).then((res) => res.json()); -export const getBrokerMetrics = (clusterName: ClusterName): Promise => - fetch(`${BASE_URL}/clusters/${clusterName}/metrics/broker`, { ...BASE_PARAMS }) - .then(res => res.json()); +export const getBrokerMetrics = ( + clusterName: ClusterName +): Promise => + fetch(`${BASE_URL}/clusters/${clusterName}/metrics`, { + ...BASE_PARAMS, + }).then((res) => res.json()); diff --git a/kafka-ui-react-app/src/redux/interfaces/consumerGroup.ts b/kafka-ui-react-app/src/redux/interfaces/consumerGroup.ts index b762ebbba83..349b4ba9f62 100644 --- a/kafka-ui-react-app/src/redux/interfaces/consumerGroup.ts +++ b/kafka-ui-react-app/src/redux/interfaces/consumerGroup.ts @@ -16,16 +16,18 @@ export interface ConsumerGroupDetails { export interface Consumer { consumerId: string; topic: string; + host: string; partition: number; messagesBehind: number; currentOffset: number; endOffset: number; } -export interface ConsumerGroupDetailedInfo extends ConsumerGroup, ConsumerGroupDetails { -} +export interface ConsumerGroupDetailedInfo + extends ConsumerGroup, + ConsumerGroupDetails {} export interface ConsumerGroupsState { - byID: { [consumerGroupID: string]: ConsumerGroupDetailedInfo }, - allIDs: string[] -} \ No newline at end of file + byID: { [consumerGroupID: string]: ConsumerGroupDetailedInfo }; + allIDs: string[]; +} diff --git a/kafka-ui-react-app/src/redux/interfaces/topic.ts b/kafka-ui-react-app/src/redux/interfaces/topic.ts index 1c98bb20e97..6c5b8a7af7b 100644 --- a/kafka-ui-react-app/src/redux/interfaces/topic.ts +++ b/kafka-ui-react-app/src/redux/interfaces/topic.ts @@ -26,6 +26,8 @@ export interface TopicReplica { export interface TopicPartition { partition: number; leader: number; + offsetMin: number; + offsetMax: number; replicas: TopicReplica[]; } @@ -35,25 +37,26 @@ export interface TopicCustomParamOption { } export interface TopicDetails { + partitions: TopicPartition[]; +} + +export interface Topic { + name: TopicName; + internal: boolean; partitionCount?: number; replicationFactor?: number; replicas?: number; - segmentSize?: number; inSyncReplicas?: number; + segmentSize?: number; segmentCount?: number; underReplicatedPartitions?: number; -} - -export interface Topic { - name: TopicName; - internal: boolean; partitions: TopicPartition[]; } export interface TopicMessage { partition: number; offset: number; - timestamp: number; + timestamp: string; timestampType: string; key: string; headers: Record;