Skip to content

Commit

Permalink
fix: update pyarrow references that are warning (#31)
Browse files Browse the repository at this point in the history
pyarrow as of 0.17 started indicating some future warnings for
functionality that has been changed and/or relocated.  As we already
bumped the min version when fixing some of the testing here to get
around breakages related to the transitions of 0.15, it seems
reasonable to update references as well.
  • Loading branch information
shollyman committed Jun 4, 2020
1 parent 56d1b1f commit 5302481
Show file tree
Hide file tree
Showing 4 changed files with 16 additions and 16 deletions.
4 changes: 2 additions & 2 deletions google/cloud/bigquery_storage_v1/reader.py
Expand Up @@ -649,7 +649,7 @@ def to_dataframe(self, message, dtypes=None):
def _parse_arrow_message(self, message):
self._parse_arrow_schema()

return pyarrow.read_record_batch(
return pyarrow.ipc.read_record_batch(
pyarrow.py_buffer(message.arrow_record_batch.serialized_record_batch),
self._schema,
)
Expand All @@ -658,7 +658,7 @@ def _parse_arrow_schema(self):
if self._schema:
return

self._schema = pyarrow.read_schema(
self._schema = pyarrow.ipc.read_schema(
pyarrow.py_buffer(self._read_session.arrow_schema.serialized_schema)
)
self._column_names = [field.name for field in self._schema]
4 changes: 2 additions & 2 deletions google/cloud/bigquery_storage_v1beta1/reader.py
Expand Up @@ -607,7 +607,7 @@ def to_dataframe(self, message, dtypes=None):
def _parse_arrow_message(self, message):
self._parse_arrow_schema()

return pyarrow.read_record_batch(
return pyarrow.ipc.read_record_batch(
pyarrow.py_buffer(message.arrow_record_batch.serialized_record_batch),
self._schema,
)
Expand All @@ -616,7 +616,7 @@ def _parse_arrow_schema(self):
if self._schema:
return

self._schema = pyarrow.read_schema(
self._schema = pyarrow.ipc.read_schema(
pyarrow.py_buffer(self._read_session.arrow_schema.serialized_schema)
)
self._column_names = [field.name for field in self._schema]
Expand Down
12 changes: 6 additions & 6 deletions tests/system/v1/test_reader_dataframe_v1.py
Expand Up @@ -46,12 +46,12 @@ def test_read_v1(client, project_id):

assert tbl.num_columns == 4
schema = tbl.schema
# Use field_by_name because the order doesn't currently match that of
# selected_fields.
assert pyarrow.types.is_int64(schema.field_by_name("station_id").type)
assert pyarrow.types.is_float64(schema.field_by_name("latitude").type)
assert pyarrow.types.is_float64(schema.field_by_name("longitude").type)
assert pyarrow.types.is_string(schema.field_by_name("name").type)
# Use field with a name specifier as there may be ordering differences
# when selected_fields is used
assert pyarrow.types.is_int64(schema.field("station_id").type)
assert pyarrow.types.is_float64(schema.field("latitude").type)
assert pyarrow.types.is_float64(schema.field("longitude").type)
assert pyarrow.types.is_string(schema.field("name").type)


@pytest.mark.parametrize(
Expand Down
12 changes: 6 additions & 6 deletions tests/system/v1beta1/test_reader_dataframe_v1beta1.py
Expand Up @@ -48,12 +48,12 @@ def test_read_rows_to_arrow(client, project_id):

assert tbl.num_columns == 4
schema = tbl.schema
# Use field_by_name because the order doesn't currently match that of
# selected_fields.
assert pyarrow.types.is_int64(schema.field_by_name("station_id").type)
assert pyarrow.types.is_float64(schema.field_by_name("latitude").type)
assert pyarrow.types.is_float64(schema.field_by_name("longitude").type)
assert pyarrow.types.is_string(schema.field_by_name("name").type)
# Use field with a name specifier as there may be ordering differences
# when selected_fields is used
assert pyarrow.types.is_int64(schema.field("station_id").type)
assert pyarrow.types.is_float64(schema.field("latitude").type)
assert pyarrow.types.is_float64(schema.field("longitude").type)
assert pyarrow.types.is_string(schema.field("name").type)


@pytest.mark.parametrize(
Expand Down

0 comments on commit 5302481

Please sign in to comment.