Skip to content

Commit

Permalink
feat: Close clients gracefully (#56)
Browse files Browse the repository at this point in the history
* update

* update

* update

* update

* update

* update
  • Loading branch information
jiangmichaellll committed Feb 5, 2021
1 parent b91ecb2 commit ac48f12
Show file tree
Hide file tree
Showing 4 changed files with 16 additions and 18 deletions.
Expand Up @@ -98,8 +98,8 @@ public void commit(Offset end) {

@Override
public void stop() {
cursorClient.shutdown();
committer.close();
cursorClient.close();
}

@Override
Expand Down
24 changes: 12 additions & 12 deletions src/main/java/com/google/cloud/pubsublite/spark/PslDataSource.java
Expand Up @@ -16,13 +16,14 @@

package com.google.cloud.pubsublite.spark;

import static com.google.cloud.pubsublite.internal.ExtractStatus.toCanonical;

import com.github.benmanes.caffeine.cache.Ticker;
import com.google.auto.service.AutoService;
import com.google.cloud.pubsublite.AdminClient;
import com.google.cloud.pubsublite.PartitionLookupUtils;
import com.google.cloud.pubsublite.SubscriptionPath;
import com.google.cloud.pubsublite.TopicPath;
import com.google.cloud.pubsublite.internal.CursorClient;
import java.util.Objects;
import java.util.Optional;
import org.apache.spark.sql.sources.DataSourceRegister;
Expand Down Expand Up @@ -53,12 +54,13 @@ public ContinuousReader createContinuousReader(

PslDataSourceOptions pslDataSourceOptions =
PslDataSourceOptions.fromSparkDataSourceOptions(options);
CursorClient cursorClient = pslDataSourceOptions.newCursorClient();
AdminClient adminClient = pslDataSourceOptions.newAdminClient();
SubscriptionPath subscriptionPath = pslDataSourceOptions.subscriptionPath();
long topicPartitionCount = PartitionLookupUtils.numPartitions(subscriptionPath, adminClient);
long topicPartitionCount;
try (AdminClient adminClient = pslDataSourceOptions.newAdminClient()) {
topicPartitionCount = PartitionLookupUtils.numPartitions(subscriptionPath, adminClient);
}
return new PslContinuousReader(
cursorClient,
pslDataSourceOptions.newCursorClient(),
pslDataSourceOptions.newMultiPartitionCommitter(topicPartitionCount),
pslDataSourceOptions.getSubscriberFactory(),
subscriptionPath,
Expand All @@ -76,19 +78,17 @@ public MicroBatchReader createMicroBatchReader(

PslDataSourceOptions pslDataSourceOptions =
PslDataSourceOptions.fromSparkDataSourceOptions(options);
CursorClient cursorClient = pslDataSourceOptions.newCursorClient();
AdminClient adminClient = pslDataSourceOptions.newAdminClient();
SubscriptionPath subscriptionPath = pslDataSourceOptions.subscriptionPath();
TopicPath topicPath;
try {
long topicPartitionCount;
try (AdminClient adminClient = pslDataSourceOptions.newAdminClient()) {
topicPath = TopicPath.parse(adminClient.getSubscription(subscriptionPath).get().getTopic());
topicPartitionCount = PartitionLookupUtils.numPartitions(topicPath, adminClient);
} catch (Throwable t) {
throw new IllegalStateException(
"Unable to get topic for subscription " + subscriptionPath, t);
throw toCanonical(t).underlying;
}
long topicPartitionCount = PartitionLookupUtils.numPartitions(topicPath, adminClient);
return new PslMicroBatchReader(
cursorClient,
pslDataSourceOptions.newCursorClient(),
pslDataSourceOptions.newMultiPartitionCommitter(topicPartitionCount),
pslDataSourceOptions.getSubscriberFactory(),
new LimitingHeadOffsetReader(
Expand Down
Expand Up @@ -97,10 +97,6 @@ public InternalRow get() {

@Override
public void close() {
try {
subscriber.close();
} catch (Exception e) {
log.atWarning().log("Subscriber failed to close.");
}
subscriber.close();
}
}
Expand Up @@ -115,6 +115,8 @@ public void commit(Offset end) {
@Override
public void stop() {
committer.close();
cursorClient.close();
headOffsetReader.close();
}

@Override
Expand Down

0 comments on commit ac48f12

Please sign in to comment.