Skip to content
This repository was archived by the owner on Apr 1, 2026. It is now read-only.

Commit 33e6412

Browse files
committed
fix
1 parent cdfa759 commit 33e6412

3 files changed

Lines changed: 9 additions & 16 deletions

File tree

bigframes/bigquery/_operations/io.py

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@
1616

1717
from typing import Mapping, Optional, Union
1818

19-
import bigframes_vendored.constants
2019
import google.cloud.bigquery
2120
import pandas as pd
2221

@@ -39,7 +38,7 @@ def _get_table_metadata(
3938
def load_data(
4039
table_name: str,
4140
*,
42-
replace: bool = False,
41+
write_disposition: str = "INTO",
4342
columns: Optional[Mapping[str, str]] = None,
4443
partition_by: Optional[list[str]] = None,
4544
cluster_by: Optional[list[str]] = None,
@@ -57,12 +56,12 @@ def load_data(
5756
Args:
5857
table_name (str):
5958
The name of the table in BigQuery.
60-
replace (bool, default False):
61-
Whether to replace the table if it already exists.
59+
write_disposition (str, default "INTO"):
60+
Whether to replace the table if it already exists ("OVERWRITE") or append to it ("INTO").
6261
columns (Mapping[str, str], optional):
6362
The table's schema.
6463
partition_by (list[str], optional):
65-
A list of columns to partition the table by.
64+
A list of partition expressions to partition the table by. See https://docs.cloud.google.com/bigquery/docs/reference/standard-sql/load-statements#partition_expression.
6665
cluster_by (list[str], optional):
6766
A list of columns to cluster the table by.
6867
table_options (Mapping[str, Union[str, int, float, bool, list]], optional):
@@ -86,7 +85,7 @@ def load_data(
8685

8786
sql = bigframes.core.sql.io.load_data_ddl(
8887
table_name=table_name,
89-
replace=replace,
88+
write_disposition=write_disposition,
9089
columns=columns,
9190
partition_by=partition_by,
9291
cluster_by=cluster_by,
@@ -99,9 +98,6 @@ def load_data(
9998
if session is None:
10099
bpd.read_gbq_query(sql)
101100
session = bpd.get_global_session()
102-
assert (
103-
session is not None
104-
), f"Missing connection to BigQuery. Please report how you encountered this error at {bigframes_vendored.constants.FEEDBACK_LINK}."
105101
else:
106102
session.read_gbq_query(sql)
107103

bigframes/core/sql/io.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
def load_data_ddl(
2121
table_name: str,
2222
*,
23-
replace: bool = False,
23+
write_disposition: str = "INTO",
2424
columns: Optional[Mapping[str, str]] = None,
2525
partition_by: Optional[list[str]] = None,
2626
cluster_by: Optional[list[str]] = None,
@@ -31,10 +31,7 @@ def load_data_ddl(
3131
) -> str:
3232
"""Generates the LOAD DATA DDL statement."""
3333
statement = ["LOAD DATA"]
34-
if replace:
35-
statement.append("OVERWRITE")
36-
else:
37-
statement.append("INTO")
34+
statement.append(write_disposition)
3835
statement.append(table_name)
3936

4037
if columns:

tests/unit/core/sql/test_io.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,10 +24,10 @@ def test_load_data_ddl():
2424
assert sql == expected
2525

2626

27-
def test_load_data_ddl_replace():
27+
def test_load_data_ddl_overwrite():
2828
sql = bigframes.core.sql.io.load_data_ddl(
2929
"my-project.my_dataset.my_table",
30-
replace=True,
30+
write_disposition="OVERWRITE",
3131
columns={"col1": "INT64", "col2": "STRING"},
3232
from_files_options={"format": "CSV", "uris": ["gs://bucket/path*"]},
3333
)

0 commit comments

Comments
 (0)