Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 27 additions & 1 deletion scripts/jobs/planning/tascomi_create_daily_snapshot.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,29 @@ def loadIncrementsSinceDate(increment_table_name, name_space, date):
return increment_df


def purge_today_partition(
glueContext: GlueContext, target_destination: str, retentionPeriod: int = 0
) -> None:
"""
Purges (delete) only today's partition under the given target destination.
Parameters:
glueContext: GlueContext instance.
target_destination: Base S3 path (e.g., "s3://your-bucket/path").
retentionPeriod: Retention period in hours (default 0, meaning delete all files immediately).
Returns:
partition_path: The S3 partition path that was purged.
"""
now = datetime.now()
snapshot_year = str(now.year)
snapshot_month = str(now.month).zfill(2)
snapshot_day = str(now.day).zfill(2)
snapshot_date = snapshot_year + snapshot_month + snapshot_day

partition_path = f"{target_destination}/snapshot_year={snapshot_year}/snapshot_month={snapshot_month}/snapshot_day={snapshot_day}/snapshot_date={snapshot_date}"

glueContext.purge_s3_path(partition_path, {"retentionPeriod": retentionPeriod})


# dict containing parameters for DQ checks
dq_params = {
"appeals": {"unique": ["id"]},
Expand Down Expand Up @@ -189,7 +212,7 @@ def loadIncrementsSinceDate(increment_table_name, name_space, date):
# snapshot table in glue catalogue
else:
pushDownPredicate = create_pushdown_predicate(
partitionDateColumn="snapshot_date", daysBuffer=3
partitionDateColumn="snapshot_date", daysBuffer=30
)
# load latest snpashot
snapshot_ddf = glueContext.create_dynamic_frame.from_catalog(
Expand Down Expand Up @@ -318,6 +341,9 @@ def loadIncrementsSinceDate(increment_table_name, name_space, date):
snapshot_df, glueContext, "resultDataFrame"
)
target_destination = s3_bucket_target + table_name

# Clean up today's partition before writing
purge_today_partition(glueContext, target_destination)
parquetData = glueContext.write_dynamic_frame.from_options(
frame=resultDataFrame,
connection_type="s3",
Expand Down