Skip to content
9 changes: 8 additions & 1 deletion cinder/common/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,14 @@
help='The full class name of the consistencygroup API class'),
cfg.BoolOpt('split_loggers',
default=False,
help='Log requests to multiple loggers.')
help='Log requests to multiple loggers.'),
cfg.BoolOpt('volume_history_enabled',
default=True,
help='Enable volume history tracking. When enabled, all '
'mutations to volume DB rows are recorded in the '
'volume_history table for auditing purposes. Disabling '
'this can reduce DB overhead in high-throughput '
'environments.'),
]

auth_opts = [
Expand Down
5 changes: 5 additions & 0 deletions cinder/db/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -501,6 +501,11 @@ def volume_update(context, volume_id, values):
return IMPL.volume_update(context, volume_id, values)


def volume_history_get_all_by_volume(context, volume_id):
"""Get all history records for a volume."""
return IMPL.volume_history_get_all_by_volume(context, volume_id)


def volumes_update(context, values_list):
"""Set the given properties on a list of volumes and update them.

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

"""Add volume_history table

Revision ID: 633b14d87cec
Revises: 9c74c1c6971f
Create Date: 2026-03-11
"""

from alembic import op
import sqlalchemy as sa

revision = '633b14d87cec'
down_revision = '9c74c1c6971f'
branch_labels = None
depends_on = None


def upgrade():
op.create_table(
'volume_history',
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('deleted', sa.Boolean, default=False),
sa.Column('id', sa.String(36), primary_key=True, nullable=False),
sa.Column('volume_id', sa.String(36), sa.ForeignKey('volumes.id'),
nullable=False),
sa.Column('project_id', sa.String(255)),
sa.Column('user_id', sa.String(255)),
sa.Column('request_id', sa.String(255)),
sa.Column('action', sa.String(64), nullable=False),
sa.Column('changes', sa.Text),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
op.create_index('volume_history_volume_id_idx', 'volume_history',
['volume_id'])
133 changes: 133 additions & 0 deletions cinder/db/sqlalchemy/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@
from oslo_db import options
from oslo_db.sqlalchemy import enginefacade
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
Expand Down Expand Up @@ -1990,13 +1991,60 @@ def volume_attached(
del updated_values['updated_at']

volume_ref = _volume_get(context, volume_attachment_ref['volume_id'])
# Capture old values before update for history
old_status = volume_ref['status']
old_attach_status = volume_ref['attach_status']

volume_ref['status'] = volume_status
volume_ref['attach_status'] = attach_status
volume_ref.save(context.session)

# Record attachment in volume history
attach_changes = {}
if old_status != volume_status:
attach_changes['status'] = [old_status, volume_status]
if old_attach_status != str(attach_status):
attach_changes['attach_status'] = [old_attach_status,
str(attach_status)]
if attach_changes:
_record_volume_history(context, volume_attachment_ref['volume_id'],
'attach', attach_changes)

return volume_ref, updated_values


def _record_volume_history(context, volume_id, action, changes,
project_id=None, user_id=None):
"""Record a volume state change in the volume_history table.

Args:
context: The request context
volume_id: UUID of the volume
action: Type of action (create, update, destroy, attach, detach)
changes: Dict of changes, where each key is a field name and
value is [old_value, new_value]
project_id: Override project_id (defaults to context.project_id)
user_id: Override user_id (defaults to context.user_id)

Note:
This function respects the CONF.volume_history_enabled config option.
When disabled, no history is recorded.
"""
if not CONF.volume_history_enabled:
return
if not changes:
return
history = models.VolumeHistory()
history.id = str(uuid.uuid4())
history.volume_id = volume_id
history.project_id = project_id or getattr(context, 'project_id', None)
history.user_id = user_id or getattr(context, 'user_id', None)
history.request_id = getattr(context, 'request_id', None)
history.action = action
history.changes = jsonutils.dumps(changes)
context.session.add(history)


@handle_db_data_error
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
Expand All @@ -2021,6 +2069,12 @@ def volume_create(context, values):

context.session.add(volume_ref)

# Record creation in volume history
create_changes = {k: [None, v] for k, v in values.items()
if k not in ('metadata', 'admin_metadata',
'volume_metadata', 'volume_admin_metadata')}
_record_volume_history(context, values['id'], 'create', create_changes)

return _volume_get(context, values['id'])


Expand Down Expand Up @@ -2114,6 +2168,7 @@ def volume_data_get_for_project(context, project_id, host=None):
models.Transfer,
models.VolumeGlanceMetadata,
models.VolumeAttachment,
models.VolumeHistory,
]
)

Expand All @@ -2130,6 +2185,23 @@ def volume_destroy(context, volume_id):
'migration_status': None,
}
query = model_query(context, models.Volume).filter_by(id=volume_id)

# Record destruction in volume history before updating
volume_ref = query.first()
if volume_ref and hasattr(volume_ref, 'status'):
try:
destroy_changes = {
'status': [volume_ref.status, 'deleted'],
'deleted': [False, True],
}
_record_volume_history(context, volume_id, 'destroy',
destroy_changes)
except (TypeError, AttributeError):
# Handle case where volume_ref is mocked in unit tests
pass

# Re-fetch query since .first() consumed it
query = model_query(context, models.Volume).filter_by(id=volume_id)
entity = query.column_descriptions[0]['entity']
updated_values['updated_at'] = entity.updated_at
query.update(updated_values)
Expand Down Expand Up @@ -2259,6 +2331,10 @@ def volume_detached(context, volume_id, attachment_id):
for_update=True,
)

# Capture old values for history before any changes
old_status = volume['status']
old_attach_status = volume['attach_status']

try:
attachment = _attachment_get(context, attachment_id)
attachment_updates = attachment.delete(context.session)
Expand All @@ -2285,6 +2361,18 @@ def volume_detached(context, volume_id, attachment_id):
volume.save(context.session)
del volume_updates['updated_at']

# Record detachment in volume history
detach_changes = {}
new_status = volume_updates.get('status')
new_attach_status = volume_updates.get('attach_status')
if new_status and old_status != new_status:
detach_changes['status'] = [old_status, new_status]
if new_attach_status and old_attach_status != new_attach_status:
detach_changes['attach_status'] = [old_attach_status,
new_attach_status]
if detach_changes:
_record_volume_history(context, volume_id, 'detach', detach_changes)

return volume_updates, attachment_updates


Expand Down Expand Up @@ -3233,6 +3321,22 @@ def process_sort_params(
@require_context
@main_context_manager.writer
def volume_update(context, volume_id, values):
# Fetch current state for history tracking BEFORE the update.
# We capture the old values in a dict because after the SQLAlchemy
# update(), the ORM model object will reflect the new values.
old_values = None
if CONF.volume_history_enabled:
old_volume = _volume_get(context, volume_id, joined_load=False)
# Copy the values we need before the update modifies them
old_values = {}
for key in values.keys():
if key not in ('metadata', 'admin_metadata'):
old_val = getattr(old_volume, key, None)
# Handle datetime serialization
if hasattr(old_val, 'isoformat'):
old_val = old_val.isoformat() if old_val else None
old_values[key] = old_val

metadata = values.get('metadata')
if metadata is not None:
_volume_user_metadata_update(
Expand All @@ -3256,6 +3360,35 @@ def volume_update(context, volume_id, values):
if not result:
raise exception.VolumeNotFound(volume_id=volume_id)

# Record history for volume update
if CONF.volume_history_enabled and old_values:
changes = {}
for key, new_val in values.items():
old_val = old_values.get(key)
# Handle datetime serialization for new value
if hasattr(new_val, 'isoformat'):
new_val = new_val.isoformat() if new_val else None
if old_val != new_val:
changes[key] = [old_val, new_val]
if changes:
_record_volume_history(context, volume_id, 'update', changes)


@require_context
@main_context_manager.reader
def volume_history_get_all_by_volume(context, volume_id):
"""Get all history records for a volume.

Returns history records ordered by creation time (oldest first).
History records for deleted volumes are also returned to support
auditing use cases.
"""
return model_query(
context, models.VolumeHistory, read_deleted="yes"
).filter_by(volume_id=volume_id).order_by(
models.VolumeHistory.created_at
).all()


@handle_db_data_error
@require_context
Expand Down
21 changes: 21 additions & 0 deletions cinder/db/sqlalchemy/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -1281,3 +1281,24 @@ class AttachmentSpecs(BASE, CinderBase):
'AttachmentSpecs.attachment_id == VolumeAttachment.id,'
'AttachmentSpecs.deleted == False)',
)


class VolumeHistory(BASE, CinderBase):
"""Represents a historical record of changes to a volume.

Each record captures a JSON delta of changed fields (old/new values)
along with contextual metadata (user, project, request_id).
"""
__tablename__ = 'volume_history'
__table_args__ = (
sa.Index('volume_history_volume_id_idx', 'volume_id'),
)

id = sa.Column(sa.String(36), primary_key=True)
volume_id = sa.Column(
sa.String(36), sa.ForeignKey('volumes.id'), nullable=False)
project_id = sa.Column(sa.String(255))
user_id = sa.Column(sa.String(255))
request_id = sa.Column(sa.String(255))
action = sa.Column(sa.String(64), nullable=False)
changes = sa.Column(sa.Text)
16 changes: 16 additions & 0 deletions cinder/tests/unit/db/test_migrations.py
Original file line number Diff line number Diff line change
Expand Up @@ -396,6 +396,22 @@ def _check_9c74c1c6971f(self, connection):
self.assertEqual({'backups', 'backup_gigabytes'},
{r[0] for r in res})

def _check_633b14d87cec(self, connection):
"""Test volume_history table was created."""
table = db_utils.get_table(connection, 'volume_history')
self.assertIn('id', table.c)
self.assertIn('volume_id', table.c)
self.assertIn('project_id', table.c)
self.assertIn('user_id', table.c)
self.assertIn('request_id', table.c)
self.assertIn('action', table.c)
self.assertIn('changes', table.c)
self.assertIn('created_at', table.c)
self.assertIn('deleted_at', table.c)
self.assertIn('deleted', table.c)
db_utils.index_exists(
connection, 'volume_history', 'volume_history_volume_id_idx')

# TODO: (D Release) Uncomment method _check_afd7494d43b7 and create a
# migration with hash afd7494d43b7 using the following command:
# $ tox -e venv -- alembic -c cinder/db/alembic.ini revision \
Expand Down
Loading