Skip to content

Commit

Permalink
chore: run code formatter (#425)
Browse files Browse the repository at this point in the history
  • Loading branch information
stephaniewang526 committed Jun 8, 2020
1 parent dff4e5f commit 342573a
Show file tree
Hide file tree
Showing 29 changed files with 124 additions and 121 deletions.
Expand Up @@ -47,14 +47,15 @@ public static void runAddColumnLoadAppend() throws Exception {
// 'REQUIRED' fields cannot be added to an existing schema, so the additional column must be
// 'NULLABLE'.
Schema schema =
Schema.of(
Field.newBuilder("name", LegacySQLTypeName.STRING)
Schema.of(
Field.newBuilder("name", LegacySQLTypeName.STRING)
.setMode(Field.Mode.REQUIRED)
.build());

List<Field> fields = schema.getFields();
// Adding below additional column during the load job
Field newField = Field.newBuilder("post_abbr", LegacySQLTypeName.STRING)
Field newField =
Field.newBuilder("post_abbr", LegacySQLTypeName.STRING)
.setMode(Field.Mode.NULLABLE)
.build();
List<Field> newFields = new ArrayList<>(fields);
Expand All @@ -63,8 +64,8 @@ public static void runAddColumnLoadAppend() throws Exception {
addColumnLoadAppend(datasetName, tableName, sourceUri, newSchema);
}

public static void addColumnLoadAppend(String datasetName, String tableName,
String sourceUri, Schema newSchema) throws Exception {
public static void addColumnLoadAppend(
String datasetName, String tableName, String sourceUri, Schema newSchema) throws Exception {
try {
// Initialize client that will be used to send requests. This client only needs to be created
// once, and can be reused for multiple requests.
Expand Down
Expand Up @@ -44,16 +44,16 @@ public static void browseTable(String dataset, String table) {
TableId tableId = TableId.of(dataset, table);

// Page over 100 records. If you don't need pagination, remove the pageSize parameter.
TableResult result =
bigquery.listTableData(tableId, TableDataListOption.pageSize(100));
TableResult result = bigquery.listTableData(tableId, TableDataListOption.pageSize(100));

// Print the records
result.iterateAll().forEach(row -> {
row.forEach(fieldValue ->
System.out.print(fieldValue.toString() + ", ")
);
System.out.println();
});
result
.iterateAll()
.forEach(
row -> {
row.forEach(fieldValue -> System.out.print(fieldValue.toString() + ", "));
System.out.println();
});

System.out.println("Query ran successfully");
} catch (BigQueryException e) {
Expand Down
Expand Up @@ -37,8 +37,11 @@ public static void runCopyTable() {
copyTable(sourceDatasetName, sourceTableId, destinationDatasetName, destinationTableId);
}

public static void copyTable(String sourceDatasetName, String sourceTableId,
String destinationDatasetName, String destinationTableId) {
public static void copyTable(
String sourceDatasetName,
String sourceTableId,
String destinationDatasetName,
String destinationTableId) {
try {
// Initialize client that will be used to send requests. This client only needs to be created
// once, and can be reused for multiple requests.
Expand All @@ -50,10 +53,7 @@ public static void copyTable(String sourceDatasetName, String sourceTableId,
// For more information on CopyJobConfiguration see:
// https://googleapis.dev/java/google-cloud-clients/latest/com/google/cloud/bigquery/JobConfiguration.html
CopyJobConfiguration configuration =
CopyJobConfiguration.newBuilder(
destinationTable,
sourceTable
).build();
CopyJobConfiguration.newBuilder(destinationTable, sourceTable).build();

// For more information on Job see:
// https://googleapis.dev/java/google-cloud-clients/latest/index.html?com/google/cloud/bigquery/package-summary.html
Expand Down
Expand Up @@ -37,17 +37,15 @@ public static void runCreateClusteredTable() {
String datasetName = "MY_DATASET_NAME";
String tableName = "MY_TABLE_NAME";
Schema schema =
Schema.of(
Field.of("name", StandardSQLTypeName.STRING),
Field.of("post_abbr", StandardSQLTypeName.STRING),
Field.of("date", StandardSQLTypeName.DATE));
createClusteredTable(datasetName, tableName,
schema, ImmutableList.of("name", "post_abbr"));
Schema.of(
Field.of("name", StandardSQLTypeName.STRING),
Field.of("post_abbr", StandardSQLTypeName.STRING),
Field.of("date", StandardSQLTypeName.DATE));
createClusteredTable(datasetName, tableName, schema, ImmutableList.of("name", "post_abbr"));
}

public static void createClusteredTable(
String datasetName, String tableName,
Schema schema, List<String> clusteringFields) {
String datasetName, String tableName, Schema schema, List<String> clusteringFields) {
try {
// Initialize client that will be used to send requests. This client only needs to be created
// once, and can be reused for multiple requests.
Expand All @@ -58,8 +56,7 @@ public static void createClusteredTable(
TimePartitioning partitioning = TimePartitioning.of(TimePartitioning.Type.DAY);
// Clustering fields will be consisted of fields mentioned in the schema.
// As of now, another condition is that the table should be partitioned.
Clustering clustering =
Clustering.newBuilder().setFields(clusteringFields).build();
Clustering clustering = Clustering.newBuilder().setFields(clusteringFields).build();

StandardTableDefinition tableDefinition =
StandardTableDefinition.newBuilder()
Expand Down
Expand Up @@ -35,10 +35,10 @@ public static void runCreatePartitionedTable() {
String datasetName = "MY_DATASET_NAME";
String tableName = "MY_TABLE_NAME";
Schema schema =
Schema.of(
Field.of("stringField", StandardSQLTypeName.STRING),
Field.of("booleanField", StandardSQLTypeName.BOOL),
Field.of("dateField", StandardSQLTypeName.DATE));
Schema.of(
Field.of("stringField", StandardSQLTypeName.STRING),
Field.of("booleanField", StandardSQLTypeName.BOOL),
Field.of("dateField", StandardSQLTypeName.DATE));
createPartitionedTable(datasetName, tableName, schema);
}

Expand Down
Expand Up @@ -46,7 +46,10 @@ public static void runExtractTableToCsv() {

// Exports datasetName:tableName to destinationUri as raw CSV
public static void extractTableToCsv(
String projectId, String datasetName, String tableName, String destinationUri,
String projectId,
String datasetName,
String tableName,
String destinationUri,
String dataFormat) {
try {
// Initialize client that will be used to send requests. This client only needs to be created
Expand All @@ -71,8 +74,8 @@ public static void extractTableToCsv(
"BigQuery was unable to extract due to an error: \n" + job.getStatus().getError());
return;
}
System.out.println("Table export successful. Check in GCS bucket for the " +
dataFormat + " file.");
System.out.println(
"Table export successful. Check in GCS bucket for the " + dataFormat + " file.");
} catch (BigQueryException | InterruptedException e) {
System.out.println("Table extraction job was interrupted. \n" + e.toString());
}
Expand Down
Expand Up @@ -50,7 +50,10 @@ public static void runExtractTableToJson() {

// Exports datasetName:tableName to destinationUri as a JSON file
public static void extractTableToJson(
String projectId, String datasetName, String tableName, String destinationUri,
String projectId,
String datasetName,
String tableName,
String destinationUri,
String dataFormat) {
try {
// Initialize client that will be used to send requests. This client only needs to be created
Expand All @@ -75,8 +78,8 @@ public static void extractTableToJson(
"BigQuery was unable to extract due to an error: \n" + job.getStatus().getError());
return;
}
System.out.println("Table export successful. Check in GCS bucket for the " +
dataFormat + " file.");
System.out.println(
"Table export successful. Check in GCS bucket for the " + dataFormat + " file.");
} catch (BigQueryException | InterruptedException e) {
System.out.println("Table extraction job was interrupted. \n" + e.toString());
}
Expand Down
Expand Up @@ -44,8 +44,8 @@ public static void runLoadLocalFile() throws IOException, InterruptedException {
loadLocalFile(datasetName, tableName, csvPath, FormatOptions.csv());
}

public static void loadLocalFile(String datasetName, String tableName, Path csvPath,
FormatOptions formatOptions)
public static void loadLocalFile(
String datasetName, String tableName, Path csvPath, FormatOptions formatOptions)
throws IOException, InterruptedException {
try {
// Initialize client that will be used to send requests. This client only needs to be created
Expand All @@ -54,9 +54,7 @@ public static void loadLocalFile(String datasetName, String tableName, Path csvP
TableId tableId = TableId.of(datasetName, tableName);

WriteChannelConfiguration writeChannelConfiguration =
WriteChannelConfiguration.newBuilder(tableId)
.setFormatOptions(formatOptions)
.build();
WriteChannelConfiguration.newBuilder(tableId).setFormatOptions(formatOptions).build();

// The location and JobName must be specified; other fields can be auto-detected.
String jobName = "jobId_" + UUID.randomUUID().toString();
Expand Down
Expand Up @@ -39,8 +39,8 @@ public static void runLoadParquetReplaceTable() {
loadParquetReplaceTable(datasetName, tableName, sourceUri);
}

public static void loadParquetReplaceTable(String datasetName, String tableName,
String sourceUri) {
public static void loadParquetReplaceTable(
String datasetName, String tableName, String sourceUri) {
try {
// Initialize client that will be used to send requests. This client only needs to be created
// once, and can be reused for multiple requests.
Expand Down
Expand Up @@ -41,16 +41,20 @@ public static void runLoadTableClustered() throws Exception {
String tableName = "MY_TABLE_NAME";
String sourceUri = "/path/to/file.csv";
Schema schema =
Schema.of(
Field.of("name", StandardSQLTypeName.STRING),
Field.of("post_abbr", StandardSQLTypeName.STRING),
Field.of("date", StandardSQLTypeName.DATE));
loadTableClustered(datasetName, tableName, sourceUri,
schema, ImmutableList.of("name", "post_abbr"));
Schema.of(
Field.of("name", StandardSQLTypeName.STRING),
Field.of("post_abbr", StandardSQLTypeName.STRING),
Field.of("date", StandardSQLTypeName.DATE));
loadTableClustered(
datasetName, tableName, sourceUri, schema, ImmutableList.of("name", "post_abbr"));
}

public static void loadTableClustered(String datasetName, String tableName, String sourceUri,
Schema schema, List<String> clusteringFields)
public static void loadTableClustered(
String datasetName,
String tableName,
String sourceUri,
Schema schema,
List<String> clusteringFields)
throws Exception {
try {
// Initialize client that will be used to send requests. This client only needs to be created
Expand All @@ -62,8 +66,7 @@ public static void loadTableClustered(String datasetName, String tableName, Stri
TimePartitioning partitioning = TimePartitioning.of(TimePartitioning.Type.DAY);
// Clustering fields will be consisted of fields mentioned in the schema.
// As of now, another condition is that the table should be partitioned.
Clustering clustering =
Clustering.newBuilder().setFields(clusteringFields).build();
Clustering clustering = Clustering.newBuilder().setFields(clusteringFields).build();

LoadJobConfiguration loadJobConfig =
LoadJobConfiguration.builder(tableId, sourceUri)
Expand Down
Expand Up @@ -27,16 +27,15 @@ public class SaveQueryToTable {

public static void runSaveQueryToTable() {
// TODO(developer): Replace these variables before running the sample.
String query =
"SELECT corpus FROM `bigquery-public-data.samples.shakespeare` GROUP BY corpus;";
String query = "SELECT corpus FROM `bigquery-public-data.samples.shakespeare` GROUP BY corpus;";
String destinationTable = "MY_TABLE";
String destinationDataset = "MY_DATASET";

saveQueryToTable(destinationDataset, destinationTable, query);
}

public static void saveQueryToTable(String destinationDataset,
String destinationTableId, String query) {
public static void saveQueryToTable(
String destinationDataset, String destinationTableId, String query) {
try {
// Initialize client that will be used to send requests. This client only needs to be created
// once, and can be reused for multiple requests.
Expand Down
Expand Up @@ -38,8 +38,7 @@ public static void simpleQuery(String query) {
BigQuery bigquery = BigQueryOptions.getDefaultInstance().getService();

// Create the query job.
QueryJobConfiguration queryConfig =
QueryJobConfiguration.newBuilder(query).build();
QueryJobConfiguration queryConfig = QueryJobConfiguration.newBuilder(query).build();

// Execute the query.
TableResult result = bigquery.query(queryConfig);
Expand Down
Expand Up @@ -42,8 +42,8 @@ public static void runTableInsertRows() {
tableInsertRows(datasetName, tableName, rowContent);
}

public static void tableInsertRows(String datasetName, String tableName,
Map<String, Object> rowContent) {
public static void tableInsertRows(
String datasetName, String tableName, Map<String, Object> rowContent) {
try {
// Initialize client that will be used to send requests. This client only needs to be created
// once, and can be reused for multiple requests.
Expand Down
Expand Up @@ -24,7 +24,6 @@
import com.google.cloud.bigquery.Job;
import com.google.cloud.bigquery.JobId;
import com.google.cloud.bigquery.QueryJobConfiguration;
import com.google.cloud.bigquery.QueryParameterValue;
import com.google.cloud.bigquery.TableDataWriteChannel;
import com.google.cloud.bigquery.TableId;
import com.google.cloud.bigquery.TableResult;
Expand Down Expand Up @@ -63,7 +62,8 @@ public static void updateTableDML(String datasetName, String tableName)
.build();

// Imports a local JSON file into a table.
Path jsonPath = FileSystems.getDefault().getPath("src/test/resources", "userSessionsData.json");
Path jsonPath =
FileSystems.getDefault().getPath("src/test/resources", "userSessionsData.json");

// The location and JobName must be specified; other fields can be auto-detected.
String jobName = "jobId_" + UUID.randomUUID().toString();
Expand All @@ -87,16 +87,19 @@ public static void updateTableDML(String datasetName, String tableName)
return;
}

System.out.println(job.getStatistics().toString() + " userSessionsData json uploaded successfully");
System.out.println(
job.getStatistics().toString() + " userSessionsData json uploaded successfully");

// Write a DML query to modify UserSessions table
// To create DML query job to mask the last octet in every row's ip_address column
String dmlQuery = String.format("UPDATE `%s.%s` \n"
+ "SET ip_address = REGEXP_REPLACE(ip_address, r\"(\\.[0-9]+)$\", \".0\")\n"
+ "WHERE TRUE", datasetName, tableName);
String dmlQuery =
String.format(
"UPDATE `%s.%s` \n"
+ "SET ip_address = REGEXP_REPLACE(ip_address, r\"(\\.[0-9]+)$\", \".0\")\n"
+ "WHERE TRUE",
datasetName, tableName);

QueryJobConfiguration dmlQueryConfig =
QueryJobConfiguration.newBuilder(dmlQuery).build();
QueryJobConfiguration dmlQueryConfig = QueryJobConfiguration.newBuilder(dmlQuery).build();

// Execute the query.
TableResult result = bigquery.query(dmlQueryConfig);
Expand All @@ -110,4 +113,4 @@ public static void updateTableDML(String datasetName, String tableName)
}
}
}
// [END bigquery_update_with_dml]
// [END bigquery_update_with_dml]
Expand Up @@ -32,8 +32,8 @@ public static void runUpdateTableDescription() {
updateTableDescription(datasetName, tableName, newDescription);
}

public static void updateTableDescription(String datasetName, String tableName,
String newDescription) {
public static void updateTableDescription(
String datasetName, String tableName, String newDescription) {
try {
// Initialize client that will be used to send requests. This client only needs to be created
// once, and can be reused for multiple requests.
Expand Down
Expand Up @@ -34,8 +34,8 @@ public static void runUpdateTableExpiration() {
updateTableExpiration(datasetName, tableName, newExpiration);
}

public static void updateTableExpiration(String datasetName, String tableName,
Long newExpiration) {
public static void updateTableExpiration(
String datasetName, String tableName, Long newExpiration) {
try {
// Initialize client that will be used to send requests. This client only needs to be created
// once, and can be reused for multiple requests.
Expand Down
Expand Up @@ -75,7 +75,8 @@ public void testAddColumnLoadAppend() throws Exception {

List<Field> fields = originalSchema.getFields();
// Adding below additional column during the load job
Field newField = Field.newBuilder("post_abbr", LegacySQLTypeName.STRING)
Field newField =
Field.newBuilder("post_abbr", LegacySQLTypeName.STRING)
.setMode(Field.Mode.NULLABLE)
.build();
List<Field> newFields = new ArrayList<>(fields);
Expand Down
Expand Up @@ -76,6 +76,5 @@ public void testBrowseTable() {

// Clean up
DeleteTable.deleteTable(BIGQUERY_DATASET_NAME, tableName);

}
}

0 comments on commit 342573a

Please sign in to comment.