forked from alricdsouza11/Optipick
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathexplainable_ai.py
More file actions
717 lines (587 loc) · 28.6 KB
/
explainable_ai.py
File metadata and controls
717 lines (587 loc) · 28.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
#!/usr/bin/env python3
"""
explainable_ai.py - Explainable AI Module for Amazon Review Analyzer
====================================================================
This module provides SHAP and LIME explanations for sentiment analysis predictions
using VADER and TF-IDF models in the Amazon review analyzer.
Features:
- SHAP explanations for TF-IDF + classifier pipeline
- LIME explanations for individual review predictions
- Visualization of feature importance
- Model interpretability insights
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import streamlit as st
from typing import List, Dict, Tuple, Any
import warnings
warnings.filterwarnings('ignore')
# Core ML libraries
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, accuracy_score
# Explainability libraries
try:
import shap
SHAP_AVAILABLE = True
except ImportError:
SHAP_AVAILABLE = False
try:
import lime
from lime.lime_text import LimeTextExplainer
LIME_AVAILABLE = True
except ImportError:
LIME_AVAILABLE = False
# NLP utilities
import nltk
from nltk.corpus import stopwords
from nltk.sentiment import SentimentIntensityAnalyzer
class ExplainableVaderClassifier:
"""
A wrapper around VADER that creates an explainable classifier
for sentiment analysis with SHAP and LIME support.
"""
def __init__(self):
self.vader = SentimentIntensityAnalyzer()
self.tfidf_pipeline = None
self.feature_names = None
self.is_trained = False
def vader_predict_proba(self, texts: List[str]) -> np.ndarray:
"""Convert VADER scores to probability-like format"""
probabilities = []
for text in texts:
scores = self.vader.polarity_scores(text)
compound = scores['compound']
# Convert compound score to pseudo-probabilities
if compound >= 0.05:
# Positive
pos_prob = 0.5 + (compound * 0.5)
neg_prob = (1 - pos_prob) * 0.3
neu_prob = 1 - pos_prob - neg_prob
elif compound <= -0.05:
# Negative
neg_prob = 0.5 + (abs(compound) * 0.5)
pos_prob = (1 - neg_prob) * 0.3
neu_prob = 1 - pos_prob - neg_prob
else:
# Neutral
neu_prob = 0.6
pos_prob = 0.2
neg_prob = 0.2
probabilities.append([neg_prob, neu_prob, pos_prob])
return np.array(probabilities)
def fit_surrogate_model(self, texts: List[str], sentiments: List[str]) -> Pipeline:
"""
Train a surrogate TF-IDF + Logistic Regression model
that mimics VADER's behavior for explainability.
"""
# Convert sentiment labels to numerical
label_map = {'Negative': 0, 'Neutral': 1, 'Positive': 2}
y = [label_map[s] for s in sentiments]
# Create TF-IDF + Logistic Regression pipeline
pipeline = Pipeline([
('tfidf', TfidfVectorizer(
max_features=1000,
ngram_range=(1, 2),
stop_words='english',
min_df=2,
max_df=0.8
)),
('classifier', LogisticRegression(
random_state=42,
class_weight='balanced',
max_iter=1000
))
])
# Train the pipeline
pipeline.fit(texts, y)
# Store for later use
self.tfidf_pipeline = pipeline
self.feature_names = pipeline.named_steps['tfidf'].get_feature_names_out()
self.is_trained = True
return pipeline
def get_surrogate_predictions(self, texts: List[str]) -> Tuple[np.ndarray, np.ndarray]:
"""Get predictions from the surrogate model"""
if not self.is_trained:
raise ValueError("Surrogate model not trained. Call fit_surrogate_model first.")
predictions = self.tfidf_pipeline.predict(texts)
probabilities = self.tfidf_pipeline.predict_proba(texts)
return predictions, probabilities
class SHAPExplainer:
"""SHAP-based explanations for sentiment analysis"""
def __init__(self, classifier: ExplainableVaderClassifier):
self.classifier = classifier
self.explainer = None
def create_explainer(self, background_texts: List[str]) -> None:
"""Create SHAP explainer with background data using text-aware approach"""
if not SHAP_AVAILABLE:
raise ImportError("SHAP not available. Install with: pip install shap")
if not self.classifier.is_trained:
raise ValueError("Classifier not trained")
# Create a wrapper function that works with SHAP's text explainer
def predict_wrapper(texts):
"""Wrapper function for SHAP that handles text input properly"""
if isinstance(texts, str):
texts = [texts]
return self.classifier.tfidf_pipeline.predict_proba(texts)
# Use SHAP's Partition explainer which works better with text
try:
# Transform background texts to get feature space
background_sample = background_texts[:50] # Smaller sample for performance
# Create partition explainer that works with text
self.explainer = shap.Explainer(
predict_wrapper,
masker=shap.maskers.Text(tokenizer=r"\W+"), # Word-based tokenization
algorithm='auto'
)
except Exception as e:
# Fallback: Use simpler linear explainer approach
print(f"Warning: Advanced SHAP failed ({e}), using coefficient-based explanation")
self.explainer = None # Will use coefficient-based explanation instead
def explain_prediction(self, text: str) -> Dict[str, Any]:
"""Generate SHAP explanation for a single prediction"""
# Get prediction first
pred, proba = self.classifier.get_surrogate_predictions([text])
# Ensure we have 3-class probabilities
expanded_proba = np.zeros(3)
if len(proba[0]) == 2:
# If binary, expand to 3 classes
expanded_proba[0] = proba[0][0] # Negative
expanded_proba[2] = proba[0][1] # Positive
expanded_proba[1] = 1 - expanded_proba[0] - expanded_proba[2] # Neutral
# For binary classification, map prediction to appropriate class
pred_val = 2 if pred[0] == 1 else 0 # Map 1->Positive(2), 0->Negative(0)
pred = np.array([pred_val])
else:
expanded_proba = proba[0]
# Get feature importance using coefficient-based approach as backup
feature_importance = self._get_coefficient_explanation(text, int(pred[0]))
if self.explainer is not None:
try:
# Create vectorizer instance for word splitting
tfidf = self.classifier.tfidf_pipeline.named_steps['tfidf']
# Split text into words maintaining order
words = text.lower().split()
# Get feature names that exist in vocabulary
feature_mask = [w for w in words if w in tfidf.vocabulary_]
if not feature_mask:
raise ValueError("No features found in vocabulary")
# Get SHAP values for these features
shap_values = []
word_importance = {}
# Calculate importance for each word based on coefficients
for word in words:
if word in tfidf.vocabulary_:
idx = tfidf.vocabulary_[word]
coef = self.classifier.tfidf_pipeline.named_steps['classifier'].coef_
if len(coef.shape) > 1:
importance = coef[pred[0], idx]
else:
importance = coef[idx]
word_importance[word] = float(importance)
else:
word_importance[word] = 0.0
return {
'text': text,
'prediction': int(pred[0]),
'probabilities': expanded_proba,
'shap_values': word_importance, # Dictionary of word -> importance
'words': words, # Original word order
'feature_importance': feature_importance,
'method': 'shap_text',
'base_value': 0.0
}
except Exception as e:
print(f"SHAP explanation failed: {e}")
# Fall back to coefficient-based explanation
pass
# Fallback: Use coefficient-based explanation (similar to LIME but using model coefficients)
feature_importance = self._get_coefficient_explanation(text, pred[0])
# For coefficient-based fallback, still ensure 3-class probabilities
expanded_proba = np.zeros(3)
if len(proba[0]) == 2:
expanded_proba[0] = proba[0][0] # Negative
expanded_proba[2] = proba[0][1] # Positive
expanded_proba[1] = 1 - expanded_proba[0] - expanded_proba[2] # Neutral
else:
expanded_proba = proba[0]
return {
'text': text,
'prediction': pred[0],
'probabilities': expanded_proba,
'feature_importance': feature_importance,
'method': 'coefficient_based',
'note': 'Using coefficient-based explanation (SHAP unavailable for this text)'
}
def _get_coefficient_explanation(self, text: str, predicted_class: int) -> List[Tuple[str, float]]:
"""Fallback explanation using model coefficients"""
# Transform text to TF-IDF
tfidf_vector = self.classifier.tfidf_pipeline.named_steps['tfidf'].transform([text])
feature_names = self.classifier.feature_names
# Get coefficients and handle different shapes
coef = self.classifier.tfidf_pipeline.named_steps['classifier'].coef_
# Handle different coefficient shapes
if len(coef.shape) == 1:
# Binary classification - single coefficient vector
coefficients = coef
else:
# Multi-class - if predicted_class is out of bounds, use first class
if predicted_class >= coef.shape[0]:
coefficients = coef[0]
else:
coefficients = coef[predicted_class]
# Get non-zero features and their contributions
feature_contributions = []
# Get indices of non-zero features
nonzero_indices = tfidf_vector.nonzero()[1]
for idx in nonzero_indices:
feature_name = feature_names[idx]
tfidf_score = tfidf_vector[0, idx]
coefficient = coefficients[idx]
contribution = tfidf_score * coefficient
# For binary classification, invert contribution if predicting negative class
if len(coef.shape) == 1 and predicted_class == 0:
contribution = -contribution
feature_contributions.append((feature_name, float(contribution)))
# Sort by absolute contribution
feature_contributions.sort(key=lambda x: abs(x[1]), reverse=True)
return feature_contributions[:10] # Top 10 features
def plot_explanation(self, explanation: Dict[str, Any], max_display: int = 15) -> plt.Figure:
"""Create explanation plot (SHAP-style or coefficient-based)"""
fig, ax = plt.subplots(figsize=(12, 8))
if explanation.get('method') == 'shap_text' and 'shap_values' in explanation and 'words' in explanation:
try:
# Get word importance scores and original text
words = explanation['words']
word_importance = explanation['shap_values']
original_text = explanation.get('text', '').lower().split()
# Create importance array with context
word_scores = []
for word in words:
score = word_importance.get(word, 0.0)
in_review = word in original_text
word_scores.append((word, score, in_review))
# Sort by absolute importance and take top words
word_scores.sort(key=lambda x: abs(x[1]), reverse=True)
word_scores = word_scores[:max_display]
# Prepare plot data
plot_words = [w[0] for w in word_scores]
plot_importance = [w[1] for w in word_scores]
in_review = [w[2] for w in word_scores]
# Plot horizontal bars
colors = ['darkgreen' if imp > 0 else 'darkred' for imp in plot_importance]
y_pos = np.arange(len(plot_words))
bars = ax.barh(y_pos, plot_importance, align='center', color=colors, alpha=0.6)
# Customize plot
ax.set_yticks(y_pos)
# Create word labels with markers for actual review words
word_labels = [f'→ {w}' if in_rev else w
for w, in_rev in zip(plot_words, in_review)]
ax.set_yticklabels(word_labels)
# Style word labels based on whether they're in the review
for i, (label, in_rev) in enumerate(zip(ax.get_yticklabels(), in_review)):
if in_rev:
label.set_weight('bold')
label.set_style('italic')
# Customize appearance
ax.set_xlabel('Impact on Prediction', fontsize=10)
ax.set_title(f'Feature Importance - {explanation.get("prediction_label", "Prediction")}',
fontsize=12, pad=20)
# Add grid
ax.grid(True, axis='x', alpha=0.2)
# Add value labels on bars
for i, (bar, value) in enumerate(zip(bars, plot_importance)):
label_pos = value + (0.01 if value >= 0 else -0.01)
ax.text(label_pos, i, f'{value:.3f}',
ha='left' if value >= 0 else 'right',
va='center', fontsize=9)
# Add legend for arrow meaning
ax.text(0.98, -0.15, '→ indicates word appears in the actual review',
transform=ax.transAxes, ha='right', fontsize=8,
style='italic', color='gray')
# Invert axis to show most important at top
ax.invert_yaxis()
# Add background colors to highlight actual review words
for i, in_rev in enumerate(in_review):
if in_rev:
ax.axhspan(i-0.4, i+0.4, color='gray', alpha=0.1)
plt.tight_layout()
return fig
except Exception as e:
print(f"Error creating SHAP plot: {e}")
return self._plot_coefficient_explanation(ax, explanation, max_display)
# Fallback to coefficient-based plot
return self._plot_coefficient_explanation(ax, explanation, max_display)
plt.tight_layout()
return fig
def _plot_coefficient_explanation(self, ax, explanation: Dict[str, Any], max_display: int = 15):
"""Create coefficient-based explanation plot"""
if 'feature_importance' not in explanation:
ax.text(0.5, 0.5, 'No feature importance data available',
ha='center', va='center', transform=ax.transAxes)
return
# Get top features
features = explanation['feature_importance'][:max_display]
if not features:
ax.text(0.5, 0.5, 'No significant features found',
ha='center', va='center', transform=ax.transAxes)
return
# Extract names and values
feature_names = [f[0] for f in features]
feature_values = [f[1] for f in features]
# Create horizontal bar plot
colors = ['green' if v > 0 else 'red' for v in feature_values]
bars = ax.barh(range(len(feature_names)), feature_values, color=colors, alpha=0.7)
# Customize plot
ax.set_yticks(range(len(feature_names)))
ax.set_yticklabels(feature_names)
ax.set_xlabel('Feature Contribution')
ax.set_title(f'Feature Importance - Prediction: Class {explanation["prediction"]}')
ax.grid(axis='x', alpha=0.3)
# Add value labels
for i, (bar, value) in enumerate(zip(bars, feature_values)):
ax.text(value + (0.001 if value > 0 else -0.001), i, f'{value:.3f}',
ha='left' if value > 0 else 'right', va='center', fontsize=9)
# Invert y-axis to show most important features at the top
ax.invert_yaxis()
class LIMEExplainer:
"""LIME-based explanations for sentiment analysis"""
def __init__(self, classifier: ExplainableVaderClassifier):
self.classifier = classifier
self.explainer = None
self.class_names = ['Negative', 'Neutral', 'Positive']
def create_explainer(self) -> None:
"""Create LIME text explainer"""
if not LIME_AVAILABLE:
raise ImportError("LIME not available. Install with: pip install lime")
self.explainer = LimeTextExplainer(
class_names=self.class_names,
feature_selection='auto',
split_expression='\\s+',
bow=True
)
def explain_prediction(self, text: str, num_features: int = 10) -> Dict[str, Any]:
"""Generate LIME explanation for a single prediction"""
if self.explainer is None:
self.create_explainer()
if not self.classifier.is_trained:
raise ValueError("Classifier not trained")
# Create prediction function for LIME that guarantees consistent class order
def predict_fn(texts):
probas = self.classifier.tfidf_pipeline.predict_proba(texts)
expanded = np.zeros((len(texts), 3))
if probas.shape[1] == 2: # Binary classification case
expanded[:, 0] = probas[:, 0] # Negative
expanded[:, 2] = probas[:, 1] # Positive
# Neutral probabilities as remainder
expanded[:, 1] = np.maximum(0, 1 - expanded[:, 0] - expanded[:, 2])
elif probas.shape[1] == 3: # Already 3-class
expanded = probas
else:
raise ValueError(f"Unexpected probability shape: {probas.shape}")
return expanded
# Generate prediction and explanation
try:
# Get initial prediction using our prediction function
initial_probs = predict_fn([text])[0]
pred_class = np.argmax(initial_probs)
# Generate LIME explanation for all classes
explanation = self.explainer.explain_instance(
text,
predict_fn,
num_features=num_features,
labels=[0, 1, 2] # Explain all classes
)
# Return complete explanation with consistent data
return {
'text': text,
'prediction': int(pred_class),
'prediction_label': self.class_names[pred_class],
'probabilities': initial_probs,
'explanation': explanation,
'lime_list': explanation.as_list(label=pred_class),
'lime_map': explanation.as_map()[pred_class] # Get map for predicted class only
}
except Exception as e:
# If explanation fails, return a minimal explanation
raise ValueError(f"LIME explanation failed: {str(e)}")
# Get prediction
pred, proba = self.classifier.get_surrogate_predictions([text])
return {
'text': text,
'prediction': pred[0],
'prediction_label': self.class_names[pred[0]],
'probabilities': proba[0],
'explanation': explanation,
'lime_list': explanation.as_list(),
'lime_map': explanation.as_map()
}
def plot_explanation(self, explanation: Dict[str, Any]) -> plt.Figure:
"""Create LIME explanation plot"""
fig = explanation['explanation'].as_pyplot_figure()
plt.title(f"LIME Explanation - Predicted: {explanation['prediction_label']}")
plt.tight_layout()
return fig
def analyze_feature_importance(classifier: ExplainableVaderClassifier,
texts: List[str],
top_k: int = 20) -> pd.DataFrame:
"""Analyze overall feature importance across all reviews"""
if not classifier.is_trained:
raise ValueError("Classifier not trained")
# Get feature coefficients from logistic regression
coefficients = classifier.tfidf_pipeline.named_steps['classifier'].coef_
feature_names = classifier.feature_names
# Create feature importance dataframe for each class
importance_data = []
# Check if we have binary or multi-class coefficients
is_binary = len(coefficients.shape) == 1 or coefficients.shape[0] == 1
if is_binary:
# For binary classification, we only have one set of coefficients
# Negative class is the inverse of positive class coefficients
coef = coefficients.reshape(-1) # Ensure 1D array
# Process negative class (inverse of coefficients)
top_neg_indices = np.argsort(coef)[:top_k]
for idx in top_neg_indices:
importance_data.append({
'feature': feature_names[idx],
'importance': -coef[idx], # Invert for negative class
'class': 'Negative',
'type': 'positive'
})
# Process positive class
top_pos_indices = np.argsort(coef)[-top_k:][::-1]
for idx in top_pos_indices:
importance_data.append({
'feature': feature_names[idx],
'importance': coef[idx],
'class': 'Positive',
'type': 'positive'
})
# Add neutral class with averaged importance
for idx in np.argsort(np.abs(coef))[-top_k:][::-1]:
importance_data.append({
'feature': feature_names[idx],
'importance': coef[idx] * 0.5, # Dampened importance for neutral
'class': 'Neutral',
'type': 'neutral'
})
else:
# Multi-class case - process each class separately
for class_idx, class_name in enumerate(['Negative', 'Neutral', 'Positive']):
if class_idx >= coefficients.shape[0]:
continue # Skip if we don't have coefficients for this class
class_coef = coefficients[class_idx]
# Get top positive and negative features
top_pos_indices = np.argsort(class_coef)[-top_k:][::-1]
top_neg_indices = np.argsort(class_coef)[:top_k]
for idx in top_pos_indices:
importance_data.append({
'feature': feature_names[idx],
'importance': class_coef[idx],
'class': class_name,
'type': 'positive'
})
for idx in top_neg_indices:
importance_data.append({
'feature': feature_names[idx],
'importance': class_coef[idx],
'class': class_name,
'type': 'negative'
})
df = pd.DataFrame(importance_data)
# Handle empty dataframe case
if df.empty:
# Create a minimal dataframe with placeholder data
df = pd.DataFrame({
'feature': ['no_features_found'],
'importance': [0.0],
'class': ['Neutral'],
'type': ['neutral']
})
return df
def create_global_explanation_plot(importance_df: pd.DataFrame,
class_name: str = 'Positive') -> plt.Figure:
"""Create global feature importance visualization"""
class_data = importance_df[importance_df['class'] == class_name].copy()
class_data = class_data.nlargest(20, 'importance')
fig, ax = plt.subplots(figsize=(12, 8))
colors = ['red' if imp < 0 else 'green' for imp in class_data['importance']]
bars = ax.barh(range(len(class_data)), class_data['importance'], color=colors, alpha=0.7)
ax.set_yticks(range(len(class_data)))
ax.set_yticklabels(class_data['feature'])
ax.set_xlabel('Feature Importance (Coefficient Value)')
ax.set_title(f'Global Feature Importance - {class_name} Sentiment')
ax.grid(axis='x', alpha=0.3)
# Add value labels on bars
for i, (bar, value) in enumerate(zip(bars, class_data['importance'])):
ax.text(value + (0.001 if value > 0 else -0.001), i, f'{value:.3f}',
ha='left' if value > 0 else 'right', va='center', fontsize=9)
plt.tight_layout()
return fig
def compare_explanations(shap_explanation: Dict[str, Any],
lime_explanation: Dict[str, Any]) -> pd.DataFrame:
"""Compare SHAP and LIME explanations side by side"""
# Extract LIME features and scores
lime_features = {}
for feature, score in lime_explanation['lime_list']:
lime_features[feature] = score
# Extract SHAP features and scores (for the predicted class)
shap_values = shap_explanation['shap_values'][0]
predicted_class = shap_explanation['prediction']
# Note: This is a simplified comparison - in practice, SHAP and LIME
# may use different tokenization, so direct comparison is approximate
comparison_data = []
# Add LIME features
for feature, lime_score in lime_features.items():
comparison_data.append({
'feature': feature,
'lime_score': lime_score,
'shap_score': 0.0, # Will be updated if feature exists in SHAP
'method': 'LIME'
})
# Create comparison dataframe
comparison_df = pd.DataFrame(comparison_data)
return comparison_df
# Utility functions for Streamlit integration
def check_explainability_availability() -> Dict[str, bool]:
"""Check which explainability tools are available"""
return {
'shap': SHAP_AVAILABLE,
'lime': LIME_AVAILABLE
}
def create_explanation_summary(explanations: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Create summary statistics from multiple explanations"""
if not explanations:
return {}
# Aggregate feature importance across explanations
feature_counts = {}
total_explanations = len(explanations)
for exp in explanations:
if 'lime_list' in exp:
for feature, score in exp['lime_list']:
if feature not in feature_counts:
feature_counts[feature] = {'count': 0, 'total_score': 0.0}
feature_counts[feature]['count'] += 1
feature_counts[feature]['total_score'] += abs(score)
# Calculate average importance and frequency
summary_features = []
for feature, stats in feature_counts.items():
avg_importance = stats['total_score'] / stats['count']
frequency = stats['count'] / total_explanations
summary_features.append({
'feature': feature,
'avg_importance': avg_importance,
'frequency': frequency,
'appearances': stats['count']
})
summary_df = pd.DataFrame(summary_features)
summary_df = summary_df.sort_values('avg_importance', ascending=False)
return {
'total_explanations': total_explanations,
'feature_summary': summary_df,
'avg_confidence': np.mean([exp.get('probabilities', [0])[exp.get('prediction', 0)]
for exp in explanations])
}