Skip to content

Commit

Permalink
samples: load table clustered (#157)
Browse files Browse the repository at this point in the history
* feat: sample - query with positional params

* sample: add new - load clustered table

* fix build error
  • Loading branch information
stephaniewang526 committed Feb 6, 2020
1 parent 95e4ff0 commit 2795172
Show file tree
Hide file tree
Showing 2 changed files with 167 additions and 0 deletions.
95 changes: 95 additions & 0 deletions samples/src/main/java/com/example/bigquery/LoadTableClustered.java
@@ -0,0 +1,95 @@
/*
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package com.example.bigquery;

// [START bigquery_load_table_clustered]
import com.google.cloud.bigquery.BigQuery;
import com.google.cloud.bigquery.BigQueryException;
import com.google.cloud.bigquery.BigQueryOptions;
import com.google.cloud.bigquery.Clustering;
import com.google.cloud.bigquery.Field;
import com.google.cloud.bigquery.FormatOptions;
import com.google.cloud.bigquery.Job;
import com.google.cloud.bigquery.JobInfo;
import com.google.cloud.bigquery.LoadJobConfiguration;
import com.google.cloud.bigquery.Schema;
import com.google.cloud.bigquery.StandardSQLTypeName;
import com.google.cloud.bigquery.TableId;
import com.google.cloud.bigquery.TimePartitioning;
import com.google.common.collect.ImmutableList;

public class LoadTableClustered {

public static void runLoadTableClustered() throws Exception {
// TODO(developer): Replace these variables before running the sample.
String datasetName = "MY_DATASET_NAME";
String tableName = "MY_TABLE_NAME";
String sourceUri = "/path/to/file.csv";
loadTableClustered(datasetName, tableName, sourceUri);
}

public static void loadTableClustered(String datasetName, String tableName, String sourceUri)
throws Exception {
try {
// Initialize client that will be used to send requests. This client only needs to be created
// once, and can be reused for multiple requests.
BigQuery bigquery = BigQueryOptions.getDefaultInstance().getService();

TableId tableId = TableId.of(datasetName, tableName);

Schema schema =
Schema.of(
Field.of("name", StandardSQLTypeName.STRING),
Field.of("post_abbr", StandardSQLTypeName.STRING),
Field.of("date", StandardSQLTypeName.DATE));

TimePartitioning partitioning = TimePartitioning.of(TimePartitioning.Type.DAY);

Clustering clustering =
Clustering.newBuilder().setFields(ImmutableList.of("name", "post_abbr")).build();

LoadJobConfiguration loadJobConfig =
LoadJobConfiguration.builder(tableId, sourceUri)
.setFormatOptions(FormatOptions.csv())
.setSchema(schema)
.setTimePartitioning(partitioning)
.setClustering(clustering)
.build();

Job loadJob = bigquery.create(JobInfo.newBuilder(loadJobConfig).build());

// Load data from a GCS parquet file into the table
// Blocks until this load table job completes its execution, either failing or succeeding.
Job completedJob = loadJob.waitFor();

// Check for errors
if (completedJob == null) {
throw new Exception("Job not executed since it no longer exists.");
} else if (completedJob.getStatus().getError() != null) {
// You can also look at queryJob.getStatus().getExecutionErrors() for all
// errors, not just the latest one.
throw new Exception(
"BigQuery was unable to load into the table due to an error: \n"
+ loadJob.getStatus().getError());
}
System.out.println("Data successfully loaded into clustered table during load job");
} catch (BigQueryException | InterruptedException e) {
System.out.println("Data not loaded into clustered table during load job \n" + e.toString());
}
}
}
// [END bigquery_load_table_clustered]
@@ -0,0 +1,72 @@
/*
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package com.example.bigquery;

import static com.google.common.truth.Truth.assertThat;
import static junit.framework.TestCase.assertNotNull;

import java.io.ByteArrayOutputStream;
import java.io.PrintStream;
import org.junit.After;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;

public class LoadTableClusteredIT {
private ByteArrayOutputStream bout;
private PrintStream out;

private static final String BIGQUERY_DATASET_NAME = System.getenv("BIGQUERY_DATASET_NAME");

private static void requireEnvVar(String varName) {
assertNotNull(
"Environment variable " + varName + " is required to perform these tests.",
System.getenv(varName));
}

@BeforeClass
public static void checkRequirements() {
requireEnvVar("BIGQUERY_DATASET_NAME");
}

@Before
public void setUp() {
bout = new ByteArrayOutputStream();
out = new PrintStream(bout);
System.setOut(out);
}

@After
public void tearDown() {
System.setOut(null);
}

@Test
public void loadTableClustered() throws Exception {
String sourceUri = "gs://cloud-samples-data/bigquery/us-states/us-states-by-date-no-header.csv";

String tableName = "LOAD_CLUSTERED_TABLE_TEST";

LoadTableClustered.loadTableClustered(BIGQUERY_DATASET_NAME, tableName, sourceUri);

assertThat(bout.toString())
.contains("Data successfully loaded into clustered table during load job");

// Clean up
DeleteTable.deleteTable(BIGQUERY_DATASET_NAME, tableName);
}
}

0 comments on commit 2795172

Please sign in to comment.