@@ -3432,18 +3432,6 @@ def unpivot(
34323432 array_value , type = "cross"
34333433 )
34343434 new_passthrough_cols = [column_mapping [col ] for col in passthrough_columns ]
3435- # Last column is offsets
3436- if not labels_array .column_ids :
3437- # Handle empty column_ids case for multimodal DataFrames
3438- # When no index columns exist, return original array_value with identity mappings
3439- value_cols = [
3440- col for col in array_value .column_ids if col not in passthrough_columns
3441- ]
3442- return array_value , (
3443- tuple (),
3444- tuple (value_cols ),
3445- tuple (passthrough_columns ),
3446- )
34473435 index_col_ids = [labels_mapping [col ] for col in labels_array .column_ids [:- 1 ]]
34483436 explode_offsets_id = labels_mapping [labels_array .column_ids [- 1 ]]
34493437
@@ -3453,6 +3441,10 @@ def unpivot(
34533441 for input_ids in unpivot_columns :
34543442 # row explode offset used to choose the input column
34553443 # we use offset instead of label as labels are not necessarily unique
3444+ if not input_ids :
3445+ unpivot_exprs .append (ex .const (None ))
3446+ continue
3447+
34563448 cases = itertools .chain (
34573449 * (
34583450 (
@@ -3482,19 +3474,31 @@ def _pd_index_to_array_value(
34823474 Create an ArrayValue from a list of label tuples.
34833475 The last column will be row offsets.
34843476 """
3477+ id_gen = bigframes .core .identifiers .standard_id_strings ()
3478+ index_ids = [next (id_gen ) for _ in range (index .nlevels )]
3479+ offset_id = next (id_gen )
34853480
34863481 rows = []
34873482 labels_as_tuples = utils .index_as_tuples (index )
34883483 for row_offset in range (len (index )):
3489- id_gen = bigframes .core .identifiers .standard_id_strings ()
34903484 row_label = labels_as_tuples [row_offset ]
3491- row_label = ( row_label ,) if not isinstance ( row_label , tuple ) else row_label
3492- row = {}
3493- for label_part , id in zip (row_label , id_gen ):
3494- row [ id ] = label_part if pd . notnull ( label_part ) else None
3495- row [next ( id_gen ) ] = row_offset
3485+ row = {
3486+ id : ( val if pd . notnull ( val ) else None )
3487+ for id , val in zip (index_ids , row_label )
3488+ }
3489+ row [offset_id ] = row_offset
34963490 rows .append (row )
34973491
3492+ if not rows :
3493+ # Create empty table with correct columns
3494+ schema = pa .schema (
3495+ [pa .field (id , pa .null ()) for id in index_ids ]
3496+ + [pa .field (offset_id , pa .int64 ())]
3497+ )
3498+ return core .ArrayValue .from_pyarrow (
3499+ pa .Table .from_batches ([], schema = schema ), session = session
3500+ )
3501+
34983502 return core .ArrayValue .from_pyarrow (pa .Table .from_pylist (rows ), session = session )
34993503
35003504
0 commit comments