Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,148 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.beam.sdk.io.gcp.bigquery;

import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import org.apache.beam.vendor.guava.v32_1_2_jre.com.google.common.cache.Cache;
import org.apache.beam.vendor.guava.v32_1_2_jre.com.google.common.cache.CacheBuilder;
import org.apache.beam.vendor.guava.v32_1_2_jre.com.google.common.cache.RemovalNotification;
import org.checkerframework.checker.nullness.qual.NonNull;
import org.checkerframework.checker.nullness.qual.Nullable;
import org.joda.time.Duration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
* Encapsulates the cache of {@link AppendClientInfo} objects and the synchronization protocol
* required to use them safely. The Guava cache object is thread-safe. However our protocol requires
* that client pin the StreamAppendClient after looking up the cache, and we must ensure that the
* cache is not accessed in between the lookup and the pin (any access of the cache could trigger
* element expiration).
*/
class AppendClientCache<KeyT extends @NonNull Object> {
private static final Logger LOG = LoggerFactory.getLogger(AppendClientCache.class);
private final ExecutorService closeWriterExecutor = Executors.newCachedThreadPool();

private final Cache<KeyT, AppendClientInfo> appendCache;

@SuppressWarnings({"FutureReturnValueIgnored"})
AppendClientCache(Duration expireAfterAccess) {
this.appendCache =
CacheBuilder.newBuilder()
.expireAfterAccess(expireAfterAccess.getMillis(), TimeUnit.MILLISECONDS)
.removalListener(
(RemovalNotification<KeyT, AppendClientInfo> removal) -> {
LOG.info("Expiring append client for {}", removal.getKey());
final @Nullable AppendClientInfo appendClientInfo = removal.getValue();
if (appendClientInfo != null) {
// Remove the pin owned by the cache itself. Since the client has not been
// marked as closed, we
// can call unpin in this thread without worrying about blocking the thread.
appendClientInfo.unpinAppendClient(null);
// Close the client in another thread to avoid blocking the main thread.
closeWriterExecutor.submit(appendClientInfo::close);
}
})
.build();
}

// The cache itself always own one pin on the object. This Callable is always used to ensure that
// the cache
// adds a pin before loading a value.
private static Callable<AppendClientInfo> wrapWithPin(Callable<AppendClientInfo> loader) {
return () -> {
AppendClientInfo client = loader.call();
client.pinAppendClient();
return client;
};
}

/**
* Atomically get an append client from the cache and add a pin. This pin is owned by the client,
* which has the responsibility of removing it. If the client is not in the cache, loader will be
* used to load the client; in this case an additional pin will be added owned by the cache,
* removed when the item is evicted.
*/
public AppendClientInfo getAndPin(KeyT key, Callable<AppendClientInfo> loader) throws Exception {
synchronized (this) {
AppendClientInfo info = appendCache.get(key, wrapWithPin(loader));
info.pinAppendClient();
return info;
}
}

public AppendClientInfo get(KeyT key, Callable<AppendClientInfo> loader) throws Exception {
return appendCache.get(key, wrapWithPin(loader));
}

public AppendClientInfo putAndPin(KeyT key, Callable<AppendClientInfo> loader) throws Exception {
synchronized (this) {
AppendClientInfo info = wrapWithPin(loader).call();
appendCache.put(key, info);
info.pinAppendClient();
return info;
}
}

public AppendClientInfo put(KeyT key, Callable<AppendClientInfo> loader) throws Exception {
AppendClientInfo info = wrapWithPin(loader).call();
appendCache.put(key, info);
return info;
}

public void invalidate(KeyT key, AppendClientInfo expectedClient) {
// The default stream is cached across multiple different DoFns. If they all try
// and
// invalidate, then we can get races between threads invalidating and recreating
// streams. For this reason,
// we check to see that the cache still contains the object we created before
// invalidating (in case another
// thread has already invalidated and recreated the stream).
synchronized (this) {
AppendClientInfo cachedAppendClient = appendCache.getIfPresent(key);
if (cachedAppendClient != null
&& System.identityHashCode(cachedAppendClient)
== System.identityHashCode(expectedClient)) {
appendCache.invalidate(key);
}
}
}

public void invalidate(KeyT key) {
synchronized (this) {
appendCache.invalidate(key);
}
}

public void tickle(KeyT key) {
appendCache.getIfPresent(key);
}

public void clear() {
synchronized (this) {
appendCache.invalidateAll();
}
}

public void unpinAsync(AppendClientInfo appendClientInfo) {
appendClientInfo.unpinAppendClient(closeWriterExecutor);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -27,12 +27,19 @@
import com.google.protobuf.Descriptors;
import com.google.protobuf.DynamicMessage;
import com.google.protobuf.Message;
import java.util.function.Consumer;
import java.util.Arrays;
import java.util.concurrent.ExecutorService;
import java.util.function.Predicate;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import javax.annotation.Nullable;
import org.apache.beam.sdk.function.ThrowingConsumer;
import org.apache.beam.sdk.function.ThrowingRunnable;
import org.apache.beam.sdk.metrics.Counter;
import org.apache.beam.sdk.metrics.Metrics;
import org.apache.beam.sdk.util.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
* Container class used by {@link StorageApiWritesShardedRecords} and {@link
Expand All @@ -41,14 +48,16 @@
*/
@AutoValue
abstract class AppendClientInfo {
private static final Logger LOG = LoggerFactory.getLogger(AppendClientInfo.class);

private final Counter activeStreamAppendClients =
Metrics.counter(AppendClientInfo.class, "activeStreamAppendClients");

abstract @Nullable BigQueryServices.StreamAppendClient getStreamAppendClient();

abstract TableSchema getTableSchema();

abstract Consumer<BigQueryServices.StreamAppendClient> getCloseAppendClient();
abstract ThrowingConsumer<Exception, BigQueryServices.StreamAppendClient> getCloseAppendClient();

abstract com.google.api.services.bigquery.model.TableSchema getJsonTableSchema();

Expand All @@ -64,7 +73,8 @@ abstract static class Builder {

abstract Builder setTableSchema(TableSchema value);

abstract Builder setCloseAppendClient(Consumer<BigQueryServices.StreamAppendClient> value);
abstract Builder setCloseAppendClient(
ThrowingConsumer<Exception, BigQueryServices.StreamAppendClient> value);

abstract Builder setJsonTableSchema(com.google.api.services.bigquery.model.TableSchema value);

Expand All @@ -82,7 +92,7 @@ abstract static class Builder {
static AppendClientInfo of(
TableSchema tableSchema,
DescriptorProtos.DescriptorProto descriptor,
Consumer<BigQueryServices.StreamAppendClient> closeAppendClient)
ThrowingConsumer<Exception, BigQueryServices.StreamAppendClient> closeAppendClient)
throws Exception {
return new AutoValue_AppendClientInfo.Builder()
.setTableSchema(tableSchema)
Expand All @@ -96,7 +106,7 @@ static AppendClientInfo of(

static AppendClientInfo of(
TableSchema tableSchema,
Consumer<BigQueryServices.StreamAppendClient> closeAppendClient,
ThrowingConsumer<Exception, BigQueryServices.StreamAppendClient> closeAppendClient,
boolean includeCdcColumns)
throws Exception {
return of(
Expand Down Expand Up @@ -133,27 +143,45 @@ public AppendClientInfo withAppendClient(
public void close() {
BigQueryServices.StreamAppendClient client = getStreamAppendClient();
if (client != null) {
getCloseAppendClient().accept(client);
try {
getCloseAppendClient().accept(client);
} catch (Exception e) {
// We ignore errors when closing clients.
String msg =
e
+ "\n"
+ Arrays.stream(e.getStackTrace())
.map(StackTraceElement::toString)
.collect(Collectors.joining("\n"));
LOG.warn("Caught exception whilw trying to close append client. Ignoring {}", msg);
}
activeStreamAppendClients.dec();
}
}

@Memoized
public byte[] getTableSchemaHash() {
return TableRowToStorageApiProto.tableSchemaHash(getTableSchema());
}

boolean hasSchemaChanged(TableSchema updatedTableSchema) {
return updatedTableSchema.hashCode() != getTableSchema().hashCode();
}

public ByteString encodeUnknownFields(TableRow unknown, boolean ignoreUnknownValues)
throws TableRowToStorageApiProto.SchemaConversionException {
Message msg =
TableRowToStorageApiProto.messageFromTableRow(
getSchemaInformation(),
getDescriptorIgnoreRequired(),
unknown,
ignoreUnknownValues,
true,
null,
null,
null);
Preconditions.checkArgumentNotNull(
TableRowToStorageApiProto.messageFromTableRow(
getSchemaInformation(),
getDescriptorIgnoreRequired(),
unknown,
ignoreUnknownValues,
true,
null,
null,
null,
TableRowToStorageApiProto.ErrorCollector.DONT_COLLECT));
return msg.toByteString();
}

Expand Down Expand Up @@ -191,4 +219,38 @@ public TableRow toTableRow(ByteString protoBytes, Predicate<String> includeField
throw new RuntimeException(e);
}
}

public void pinAppendClient() {
BigQueryServices.StreamAppendClient client =
Preconditions.checkStateNotNull(getStreamAppendClient());
client.pin();
}

public void unpinAppendClient(@Nullable ExecutorService executor) {
BigQueryServices.StreamAppendClient client =
Preconditions.checkStateNotNull(getStreamAppendClient());
if (executor != null) {
runAsyncIgnoreFailure(executor, client::unpin);
} else {
client.unpin();
}
}

@SuppressWarnings({"FutureReturnValueIgnored"})
private static void runAsyncIgnoreFailure(ExecutorService executor, ThrowingRunnable task) {
executor.submit(
() -> {
try {
task.run();
} catch (Throwable e) {
String msg =
e.toString()
+ "\n"
+ Arrays.stream(e.getStackTrace())
.map(StackTraceElement::toString)
.collect(Collectors.joining("\n"));
System.err.println("Exception happened while executing async task. Ignoring: " + msg);
}
});
}
}
Loading
Loading