Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 23 additions & 0 deletions sqlalchemy_firebird/firebird.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@
This driver uses new Firebird OO API provided by fbclient library.
""" # noqa

import sys

from datetime import datetime
from datetime import time
from math import modf
Expand Down Expand Up @@ -140,8 +142,29 @@ def do_execute(self, cursor, statement, parameters, context=None):
# Firebird-driver needs special time zone handling.
# https://github.com/FirebirdSQL/python3-driver/issues/19#issuecomment-1523045743
adapted_parameters = [self.adapt_timezone(p) for p in parameters]
self._disable_blob_streaming(cursor)
super().do_execute(cursor, statement, adapted_parameters, context)

def do_executemany(self, cursor, statement, parameters, context=None):
self._disable_blob_streaming(cursor)
super().do_executemany(cursor, statement, parameters, context)

def do_execute_no_params(self, cursor, statement, context=None):
self._disable_blob_streaming(cursor)
super().do_execute_no_params(cursor, statement, context)

@staticmethod
def _disable_blob_streaming(cursor):
# SQLAlchemy fully consumes a cursor and then closes it before its
# result processors run. firebird-driver closes any BlobReader
# objects together with the cursor, which would make BLOB columns
# unreadable from within SQLAlchemy result rows (issue #58). Force
# all BLOBs to be returned as fully materialized bytes/str by
# raising the per-cursor streaming threshold past any practical
# size; this leaves driver_config.stream_blob_threshold untouched.
if hasattr(cursor, "stream_blob_threshold"):
cursor.stream_blob_threshold = sys.maxsize


def remove_keys(d, keys):
return {x: d[x] for x in d if x not in keys}
Expand Down
40 changes: 39 additions & 1 deletion test/test_issues.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from sqlalchemy import Column, select
from sqlalchemy import Column, Integer, select
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy.testing import eq_
Expand All @@ -9,6 +9,12 @@
TEST_UNICODE = "próf-áêïôù-🗄️.fdb"
TEST_BINARY = TEST_UNICODE.encode("utf-8")

# firebird-driver returns BlobReader objects for BLOBs larger than
# stream_blob_threshold (default 64 KiB). Use 200 KiB to trigger the
# streaming path that issue #58 was about.
LARGE_BINARY = b"\xa5" * (200 * 1024)
LARGE_TEXT = ("loremipsum-" * 20_000)[: 200 * 1024]


class IssuesTest(fixtures.TestBase):
@testing.provide_metadata
Expand Down Expand Up @@ -36,3 +42,35 @@ def test_issue_76(self, connection, type_, expected):
connection.execute(select(the_blob.c.the_value)).scalar(),
expected,
)

@testing.provide_metadata
@testing.combinations(
(fb_types.FBBLOB, LARGE_BINARY),
(fb_types.FBTEXT, LARGE_TEXT),
argnames="type_, expected",
id_="na",
)
def test_issue_58_large_blob(self, connection, type_, expected):
# Regression test for issue #58: BLOBs above firebird-driver's
# stream_blob_threshold were returned as BlobReader objects that
# SQLAlchemy could not consume (and which got closed alongside
# the cursor before the result processors ran).
metadata = self.metadata

large_blob = Table(
"large_blob",
metadata,
Column("id", Integer, primary_key=True),
Column("the_value", type_),
)
metadata.create_all(testing.db)

connection.execute(
large_blob.insert().values(id=1, the_value=expected)
)

result = connection.execute(select(large_blob.c.the_value)).scalar()

eq_(type(result), type(expected))
eq_(len(result), len(expected))
eq_(result, expected)
Loading