-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcooccurrence.py
More file actions
959 lines (814 loc) · 34.9 KB
/
cooccurrence.py
File metadata and controls
959 lines (814 loc) · 34.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
import spacy
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
from collections import Counter
import networkx as nx
from tqdm import tqdm
import re
import math
import requests
import time
from typing import List, Dict, Tuple
import re
matplotlib.rcParams['font.sans-serif'] = ['SimHei'] # 指定默认字体
matplotlib.rcParams['axes.unicode_minus'] = False # 解决保存图像时负号'-'显示为方块的问题
def load_and_analyze_text(file_path, min_words=4): # 添加最小词数参数
"""
Read the file, parse date stamps (e.g. 'Jan 25th 2018'), group text by date,
and then split each date's text into labeled sentences.
Parameters:
-----------
file_path : str
Path to input text file
min_words : int
Minimum number of words required for a sentence to be included
"""
# 1) Read the file
with open(file_path, 'r', encoding='utf-8') as file:
full_text = file.read()
# 2) Match date stamps and split
pattern = r'[A-Z][a-z]{2}\s+\d{1,2}(?:st|nd|rd|th)?\s+\d{4}'
sections = re.split(f'({pattern})', full_text)
# sections now holds alternating [before_date, date, text, next_date, text, ...]
# Dictionary to store date -> list of sentences
date_sentences = {}
current_date = None
total_sentences = 0 # Track total sentences before filtering
filtered_sentences = 0 # Track filtered sentences
for i in range(len(sections)):
segment = sections[i].strip()
if re.match(pattern, segment):
current_date = segment
date_sentences[current_date] = []
else:
# If segment is text and we have a valid date
if current_date:
# 3) Split into sentences with improved logic
# First split on clear sentence boundaries
raw_sents = []
# Keep track of potential sentence fragments
current_sent = []
# Split on sentence boundaries but preserve splits for inspection
segments = re.split(r'([.!?]+)', segment)
for i, seg in enumerate(segments):
if re.match(r'[.!?]+', seg): # If it's a boundary marker
if i+1 < len(segments):
next_seg = segments[i+1].strip()
# Don't split if next segment starts with lowercase or number
if not next_seg or re.match(r'^[a-z0-9]', next_seg):
current_sent.append(seg)
continue
# End of sentence reached
current_sent.append(seg)
full_sent = ''.join(current_sent).strip()
if full_sent:
raw_sents.append(full_sent)
current_sent = []
else:
current_sent.append(seg)
# Add any remaining sentence
if current_sent:
full_sent = ''.join(current_sent).strip()
if full_sent:
raw_sents.append(full_sent)
# Filter out short sentences before adding to date_sentences
long_sentences = []
for sent in raw_sents:
total_sentences += 1
if len(sent.split()) >= min_words:
long_sentences.append(sent)
else:
filtered_sentences += 1
date_sentences[current_date].extend(long_sentences)
# 4) Flatten all sentences and compute lengths in words
all_sentences = [sent for sublist in date_sentences.values() for sent in sublist]
# Change to count words instead of characters
lengths = [len(sent.split()) for sent in all_sentences]
# Create DataFrame with sentences and their word counts
sentences_df = pd.DataFrame({
'sentence': all_sentences,
'word_count': lengths,
'date': [date for date, sents in date_sentences.items() for _ in sents]
})
# Save to CSV
output_file = './output/sentences_analysis.csv'
sentences_df.to_csv(output_file, index=False, encoding='utf-8')
print(f"Sentences analysis saved to {output_file}")
# Update plot title and labels to reflect word counts
plt.figure(figsize=(10, 6))
sns.histplot(lengths, bins=50)
plt.title('Distribution of Sentence Lengths')
plt.xlabel('Sentence Length (words)')
plt.ylabel('Count')
plt.show()
print(f"Total number of date stamps: {len(date_sentences)}")
print(f"Total number of sentences: {len(all_sentences)}")
if lengths:
print(f"Average sentence length: {np.mean(lengths):.2f} words")
print(f"Median sentence length: {np.median(lengths):.2f} words")
print(f"Total sentences before filtering: {total_sentences}")
print(f"Sentences removed (too short): {filtered_sentences}")
print(f"Sentences retained: {len(all_sentences)}")
print(f"Filtering removed {filtered_sentences/total_sentences:.1%} of sentences")
# Return dictionary of date -> sentences
return date_sentences, all_sentences
def load_synonym_mapping(file_path='synonyms.csv'):
"""
Load synonym mapping from a CSV file
Expected CSV format:
standard_form,synonym1,synonym2,synonym3,...
china,chinese,chinas
america,american,americans,usa,united states
"""
synonym_mapping = {}
try:
with open(file_path, 'r', encoding='utf-8') as file:
for line in file:
# Skip empty lines and comments
if not line.strip() or line.strip().startswith('#'):
continue
# Split the line by comma and remove whitespace
terms = [term.strip().lower() for term in line.strip().split(',')]
if len(terms) < 2: # Skip lines with no synonyms
continue
# First term is the standard form
standard_form = terms[0]
# Map all synonyms to the standard form
for synonym in terms[1:]:
if synonym: # Skip empty terms
synonym_mapping[synonym] = standard_form
# Also map standard form to itself
synonym_mapping[standard_form] = standard_form
except FileNotFoundError:
print(f"Warning: Synonym file {file_path} not found. Proceeding without synonym mapping.")
return {}
print(f"Loaded {len(synonym_mapping)} synonym mappings from {file_path}")
return synonym_mapping
def load_compound_words(file_path='compound_words.txt'):
"""
Load compound words from a text file
Returns a set of compound words and a dict of first words to possible compounds
"""
compound_words = set()
first_word_dict = {}
try:
with open(file_path, 'r', encoding='utf-8') as file:
for line in file:
# Skip empty lines and comments
line = line.strip()
if line and not line.startswith('#'):
# Add to compound words set
compound_words.add(line.lower())
# Add first word to dictionary
first_word = line.split()[0].lower()
if first_word not in first_word_dict:
first_word_dict[first_word] = set()
first_word_dict[first_word].add(line.lower())
print(f"Loaded {len(compound_words)} compound words from {file_path}")
except FileNotFoundError:
print(f"Warning: Compound words file {file_path} not found.")
return set(), {}
return compound_words, first_word_dict
def merge_compound_words(tokens, compound_words, first_word_dict):
"""
Merge compound words in a list of tokens
"""
if not compound_words:
return tokens
merged_tokens = []
i = 0
while i < len(tokens):
current_word = tokens[i].lower()
# Check if current word could start a compound
if current_word in first_word_dict and i + 1 < len(tokens):
# Try to find the longest possible compound
max_compound_len = max(len(comp.split())
for comp in first_word_dict[current_word])
# Look ahead to find compounds
for j in range(min(max_compound_len, len(tokens) - i)):
possible_compound = ' '.join(tokens[i:i+j+1]).lower()
if possible_compound in compound_words:
# Found a compound, add it and skip its tokens
merged_tokens.append(possible_compound)
i += j + 1
break
else:
# No compound found, add current token
merged_tokens.append(current_word)
i += 1
else:
# Not a potential compound start, add current token
merged_tokens.append(current_word)
i += 1
return merged_tokens
def load_custom_stopwords(file_path='./data/custom_stopwords.txt'):
"""
Load custom stopwords from a text file
Parameters:
-----------
file_path : str
Path to custom stopwords file, one word per line
Returns:
--------
set
Set of custom stopwords
"""
custom_stopwords = set()
try:
with open(file_path, 'r', encoding='utf-8') as file:
for line in file:
# Skip empty lines and comments
line = line.strip()
if line and not line.startswith('#'):
custom_stopwords.add(line.lower())
print(f"Loaded {len(custom_stopwords)} custom stopwords from {file_path}")
except FileNotFoundError:
print(f"Warning: Custom stopwords file {file_path} not found.")
return set()
return custom_stopwords
def preprocess_text(sentences, synonym_file='./data/synonyms.csv',
compound_file='./data/compound_words.txt',
custom_stopwords_file='./data/custom_stopwords.txt'):
"""
Preprocess text using spaCy for lemmatization and stopword removal,
with compound word merging and synonym normalization
"""
# Load English language model
nlp = spacy.load('en_core_web_sm')
# Get custom stopwords
custom_stopwords = load_custom_stopwords(custom_stopwords_file)
# Get synonym mapping
synonym_map = load_synonym_mapping(synonym_file)
# Get compound words
compound_words, first_word_dict = load_compound_words(compound_file)
# Process sentences
processed_docs = []
for sentence in tqdm(sentences, desc="Processing sentences"):
doc = nlp(sentence.lower())
# Keep only non-stopword tokens that are alphabetic
tokens = []
for token in doc:
if (not token.is_stop
and token.text.lower() not in custom_stopwords # Check custom stopwords
and token.is_alpha
and len(token.text) > 2 # Remove very short words
and not token.like_num): # Remove numbers
# Get lemmatized form
lemma = token.lemma_
tokens.append(lemma)
# Merge compound words
tokens = merge_compound_words(tokens, compound_words, first_word_dict)
# Apply synonym mapping
tokens = [synonym_map.get(token, token) for token in tokens]
processed_docs.append(tokens)
return processed_docs
def analyze_word_frequency(processed_docs):
"""
Analyze word frequency and create visualization
"""
# Flatten list of tokens
all_tokens = [token for doc in processed_docs for token in doc]
# Count frequencies
word_freq = Counter(all_tokens)
# Create DataFrame for top 20 words
top_20 = pd.DataFrame(word_freq.most_common(20), columns=['word', 'frequency'])
# Plot
plt.figure(figsize=(12, 6))
sns.barplot(data=top_20, x='word', y='frequency')
plt.xticks(rotation=45, ha='right')
plt.title('Top 20 Most Frequent Words')
plt.tight_layout()
plt.show(block=False)
# Save vocabulary to CSV
vocab_df = pd.DataFrame(word_freq.most_common(), columns=['word', 'frequency'])
vocab_df.to_csv('./output/vocabulary.csv', index=False)
return word_freq
def load_candidate_words(file_path='candidate_words.txt'):
"""
Load candidate words from a text file
"""
candidate_words = set()
try:
with open(file_path, 'r', encoding='utf-8') as file:
for line in file:
# Skip empty lines, comments and whitespace
line = line.strip()
if line and not line.startswith('#'):
candidate_words.add(line.lower())
print(f"Loaded {len(candidate_words)} candidate words from {file_path}")
except FileNotFoundError:
print(f"Warning: Candidate words file {file_path} not found.")
return set()
return candidate_words
class ConceptNetQuerier:
def __init__(self, base_url: str = "http://api.conceptnet.io"):
self.base_url = base_url
self.request_delay = 0.5
def get_relations(self, word1: str, word2: str) -> List[Dict]:
relations = []
urls = [
f"{self.base_url}/query?start=/c/en/{word1}&end=/c/en/{word2}",
f"{self.base_url}/query?start=/c/en/{word2}&end=/c/en/{word1}"
]
for url in urls:
try:
response = requests.get(url)
time.sleep(self.request_delay)
if response.status_code == 200:
data = response.json()
for edge in data.get('edges', []):
relation = {
'start': edge['start']['label'],
'end': edge['end']['label'],
'relation': edge['rel']['label'],
'weight': edge.get('weight', 0),
'direction': 'forward' if edge['start']['label'] == word1 else 'backward'
}
relations.append(relation)
else:
print(f"请求失败: {response.status_code}")
except Exception as e:
print(f"查询出错: {str(e)}")
return relations
def process_word_pairs(self, word_pairs: List[Tuple[str, str]]) -> Dict:
results = {}
for word1, word2 in word_pairs:
pair_key = f"{word1}-{word2}"
relations = self.get_relations(word1, word2)
results[pair_key] = relations
return results
class DBpediaQuerier:
def __init__(self, endpoint="http://dbpedia.org/sparql"):
self.endpoint = endpoint
self.request_delay = 0.5
def get_relations(self, word1: str, word2: str) -> List[str]:
"""从DBpedia获取两个词之间的语义关系"""
query = """
SELECT DISTINCT ?relation
WHERE {
{
?s rdfs:label ?label1 .
?o rdfs:label ?label2 .
?s ?relation ?o .
FILTER(LANG(?label1) = "en" && LANG(?label2) = "en")
FILTER(LCASE(STR(?label1)) = "%s")
FILTER(LCASE(STR(?label2)) = "%s")
}
UNION
{
?s rdfs:label ?label1 .
?o rdfs:label ?label2 .
?o ?relation ?s .
FILTER(LANG(?label1) = "en" && LANG(?label2) = "en")
FILTER(LCASE(STR(?label1)) = "%s")
FILTER(LCASE(STR(?label2)) = "%s")
}
}
""" % (word1.lower(), word2.lower(), word1.lower(), word2.lower())
try:
response = requests.get(self.endpoint,
params={'query': query, 'format': 'json'},
headers={'Accept': 'application/json'})
time.sleep(self.request_delay)
if response.status_code == 200:
results = response.json()
relations = [result['relation']['value'] for result in results.get('results', {}).get('bindings', [])]
return relations
else:
print(f"DBpedia查询失败: {response.status_code}")
return []
except Exception as e:
print(f"DBpedia查询出错: {str(e)}")
return []
class YAGOQuerier:
def __init__(self, endpoint="https://yago-knowledge.org/sparql/query"):
self.endpoint = endpoint
self.request_delay = 0.5
def get_relations(self, word1: str, word2: str) -> List[str]:
"""从YAGO获取两个词之间的语义关系"""
query = """
SELECT DISTINCT ?relation
WHERE {
{
?s rdfs:label ?label1 .
?o rdfs:label ?label2 .
?s ?relation ?o .
FILTER(LANG(?label1) = "en" && LANG(?label2) = "en")
FILTER(LCASE(STR(?label1)) = "%s")
FILTER(LCASE(STR(?label2)) = "%s")
}
UNION
{
?s rdfs:label ?label1 .
?o rdfs:label ?label2 .
?o ?relation ?s .
FILTER(LANG(?label1) = "en" && LANG(?label2) = "en")
FILTER(LCASE(STR(?label1)) = "%s")
FILTER(LCASE(STR(?label2)) = "%s")
}
}
""" % (word1.lower(), word2.lower(), word1.lower(), word2.lower())
try:
response = requests.get(self.endpoint,
params={'query': query, 'format': 'json'},
headers={'Accept': 'application/json'})
time.sleep(self.request_delay)
if response.status_code == 200:
results = response.json()
relations = [result['relation']['value'] for result in results.get('results', {}).get('bindings', [])]
return relations
else:
print(f"YAGO查询失败: {response.status_code}")
return []
except Exception as e:
print(f"YAGO查询出错: {str(e)}")
return []
def get_semantic_relations(word1: str, word2: str, knowledge_base: str = 'conceptnet') -> List[str]:
"""
从指定的知识库获取两个词之间的语义关系
Parameters:
-----------
word1: str
第一个词
word2: str
第二个词
knowledge_base: str
知识库选择 ('conceptnet', 'dbpedia', 'yago')
Returns:
--------
List[str]
语义关系列表
"""
if knowledge_base == 'conceptnet':
querier = ConceptNetQuerier()
elif knowledge_base == 'dbpedia':
querier = DBpediaQuerier()
elif knowledge_base == 'yago':
querier = YAGOQuerier()
else:
raise ValueError(f"不支持的知识库: {knowledge_base}")
return querier.get_relations(word1, word2)
def build_cooccurrence_network(processed_docs,
candidate_file=None,
analysis_mode='all',
top_k=200,
min_freq=5,
window_size=5,
knowledge_base=None): # 修改参数
"""
构建词共现网络
Parameters:
-----------
processed_docs : list of lists
预处理后的文档
candidate_file : str, optional
候选词文件路径
analysis_mode : str
'candidates_all': 为每个候选词构建子网络
'all': 分析所有词共现
top_k : int, optional
保留频次最高的 k 个共现关系
min_freq : int
最小共现频次阈值
window_size : int
共现窗口大小
knowledge_base : str, optional
选择语义关系知识库 ('conceptnet', 'dbpedia', 'yago', None)
"""
# 创建图
G = nx.Graph()
# 统计词频
word_freq = Counter([word for doc in processed_docs for word in doc])
# 加载候选词
candidate_words = load_candidate_words(candidate_file) if candidate_file else set()
# 过滤低频词
valid_words = {word for word, freq in word_freq.items() if freq >= min_freq}
print(f"使用所有频次 >= {min_freq} 的词: {len(valid_words)} 个词")
# 构建共现矩阵
cooc_details = {}
for doc in tqdm(processed_docs, desc="构建共现网络"):
doc_words = [word for word in doc if word in valid_words]
# 在窗口内寻找共现
for i in range(len(doc_words)):
for j in range(i + 1, min(i + window_size + 1, len(doc_words))):
word1, word2 = doc_words[i], doc_words[j]
if word1 != word2: # 避免自环
key = tuple(sorted([word1, word2]))
dist = j - i
cooc_details.setdefault(key, {"freq": 0, "dist_sum": 0, "count": 0})
cooc_details[key]["freq"] += 1
cooc_details[key]["dist_sum"] += dist
cooc_details[key]["count"] += 1
# 构建完整的共现矩阵
cooc_matrix = {}
for (w1, w2), data in cooc_details.items():
freq = data["freq"]
avg_dist = (data["dist_sum"] / data["count"]) if data["count"] else 0
salton = freq / math.sqrt(word_freq[w1] * word_freq[w2]) if (word_freq[w1] and word_freq[w2]) else 0
# 获取语义关系
semantic_rels = get_semantic_relations(w1, w2, knowledge_base) if knowledge_base else []
cooc_matrix[(w1, w2)] = {
"freq": freq,
"salton_index": salton,
"avg_distance": avg_dist,
"semantic_relations": semantic_rels
}
# 根据分析模式处理网络
if analysis_mode == 'candidates_all' and candidate_words:
# 为每个候选词构建子网络
candidate_networks = {}
for candidate in candidate_words:
if candidate not in valid_words:
print(f"警告: 候选词 '{candidate}' 频次低于阈值,已跳过")
continue
# 提取与该候选词相关的所有边
candidate_edges = {(w1, w2): data
for (w1, w2), data in cooc_matrix.items()
if candidate in (w1, w2)}
# 构建子图
sub_G = nx.Graph()
for (w1, w2), data in candidate_edges.items():
sub_G.add_edge(w1, w2, weight=data["freq"])
# 保存子网络
candidate_networks[candidate] = {
'graph': sub_G,
'cooc_matrix': candidate_edges
}
# 导出该候选词的共现矩阵
export_cooccurrence_matrix(
candidate_edges,
f'./output/cooccurrence_matrix_{candidate}.csv'
)
# 导出该候选词的网络文件
export_to_gephi(
sub_G,
f'./output/network_{candidate}.gexf'
)
print(f"\n候选词 '{candidate}' 的网络统计:")
print(f"节点数: {sub_G.number_of_nodes()}")
print(f"边数: {sub_G.number_of_edges()}")
return candidate_networks, cooc_matrix
else: # all 模式
# 构建完整网络
for (word1, word2), data in cooc_matrix.items():
G.add_edge(word1, word2, weight=data["freq"])
print(f"\n网络统计:")
print(f"节点数: {G.number_of_nodes()}")
print(f"边数: {G.number_of_edges()}")
print(f"平均度: {sum(dict(G.degree()).values()) / G.number_of_nodes():.2f}")
return G, cooc_matrix
def visualize_network(G, max_nodes=200):
"""
使用不同颜色可视化共现网络中的社区
"""
# 使用 Louvain 算法进行社区检测
communities = nx.community.louvain_communities(G)
# 创建节点到社区的映射
node_community = {}
for community_id, community in enumerate(communities):
for node in community:
node_community[node] = community_id
print(f"检测到 {len(communities)} 个社区")
for i, community in enumerate(communities):
print(f"社区 {i}: {len(community)} 个节点")
# 选择top节点
centrality = nx.degree_centrality(G)
top_nodes = sorted(centrality.items(), key=lambda x: x[1], reverse=True)[:max_nodes]
top_nodes = [node[0] for node in top_nodes]
# 创建子图
H = G.subgraph(top_nodes)
# 创建布局
pos = nx.spring_layout(H, k=1/math.sqrt(H.number_of_nodes()), iterations=50)
# 设置图形大小
plt.figure(figsize=(15, 15))
# 为每个社区选择不同的颜色
num_communities = len(communities)
colors = plt.cm.tab20(np.linspace(0, 1, num_communities))
# 绘制边
edge_weights = [H[u][v]['weight'] for u, v in H.edges()]
nx.draw_networkx_edges(H, pos, alpha=0.2,
width=[w/max(edge_weights)*3 for w in edge_weights])
# 按社区绘制节点
for community_id in range(num_communities):
# 获取属于当前社区的节点
community_nodes = [node for node in H.nodes()
if node_community.get(node) == community_id]
if community_nodes:
# 获取节点大小
node_sizes = [centrality[node] * 5000 for node in community_nodes]
# 绘制属于同一社区的节点(使用相同颜色)
nx.draw_networkx_nodes(H, pos,
nodelist=community_nodes,
node_size=node_sizes,
node_color=[colors[community_id]],
label=f'社区 {community_id}')
# 绘制节点标签
nx.draw_networkx_labels(H, pos, font_size=8)
plt.title(f"词共现网络 (按中心度排序的前 {max_nodes} 个节点,使用 Louvain 算法检测社区)")
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.axis('off')
plt.tight_layout()
plt.show(block=False)
return communities
def export_cooccurrence_matrix(cooc_matrix, output_file='cooccurrence_matrix.csv'):
"""
Export co-occurrence matrix to CSV file
"""
# Convert to DataFrame format
cooc_data = []
for (word1, word2), data in cooc_matrix.items():
cooc_data.append({
'word1': word1,
'word2': word2,
'frequency': data['freq'],
'salton_index': data['salton_index'],
'avg_distance': data['avg_distance'],
'semantic_relations': ','.join(data['semantic_relations'])
})
df = pd.DataFrame(cooc_data)
df = df.sort_values('frequency', ascending=False)
df.to_csv(output_file, index=False)
print(f"Co-occurrence matrix exported to {output_file}")
def export_to_gephi(G, output_file='network.gexf'):
"""
Export network to Gephi-compatible GEXF format with additional attributes
Parameters:
G : networkx.Graph
The co-occurrence network
output_file : str
Output file path (should end with .gexf)
"""
# Export to GEXF
nx.write_gexf(G, output_file)
print(f"Network exported to {output_file}")
def calculate_network_metrics(G, top_n=30, output_file="output/network_metrics.csv"):
"""计算共现网络的整体指标、节点中心性指标和社区指标
"""
# 1. 计算整体网络指标
network_metrics = {
"节点数": G.number_of_nodes(),
"边数": G.number_of_edges(),
"网络密度": nx.density(G),
"网络关联度": nx.degree_assortativity_coefficient(G),
"网络效率": nx.global_efficiency(G)
}
# 2. 计算所有节点的中心性指标
metrics_dict = {
"度": dict(G.degree()),
"加权度": dict(G.degree(weight='weight')),
"点度中心性": nx.degree_centrality(G),
"中介中心性": nx.betweenness_centrality(G),
"特征向量中心性": nx.eigenvector_centrality(G)
}
# 3. 创建包含所有指标的DataFrame
all_metrics = []
# 为每种中心性指标创建排序后的列表
for metric_name, metric_dict in metrics_dict.items():
# 按指标值降序排序
sorted_nodes = sorted(metric_dict.items(), key=lambda x: x[1], reverse=True)
# 只取前N个节点
top_n_nodes = sorted_nodes[:top_n]
# 将排序结果添加到列表中
for i, (node, value) in enumerate(top_n_nodes, 1):
while len(all_metrics) < i:
all_metrics.append({})
all_metrics[i-1].update({
f"{metric_name}_节点": node,
f"{metric_name}_值": value
})
# 4. 转换为DataFrame并保存
metrics_df = pd.DataFrame(all_metrics)
metrics_df.to_csv(output_file, index=False, encoding='utf-8')
print(f"已保存网络指标到: {output_file}")
# 添加社区检测
communities = nx.community.louvain_communities(G)
# 计算模块度
modularity = nx.community.modularity(G, communities)
# 更新网络指标以包含模块度
network_metrics["模块度"] = modularity
network_metrics["社区数量"] = len(communities)
# 计算每个社区的节点度信息并保存到CSV
community_degrees = {}
node_degrees = dict(G.degree())
# 为每个社区创建节点-度的映射
for community_id, community in enumerate(communities):
# 计算社区总度
community_total_degree = sum(node_degrees[node] for node in community)
community_degrees[community_id] = {
'nodes': community,
'total_degree': community_total_degree,
'node_degrees': {node: node_degrees[node] for node in community}
}
# 创建包含所有社区节点度信息的DataFrame
community_data = {}
max_community_size = max(len(comm) for comm in communities)
for community_id, data in community_degrees.items():
# 按度排序的节点和对应的度
sorted_nodes = sorted(data['node_degrees'].items(), key=lambda x: x[1], reverse=True)
# 创建两个列表,分别存储节点和度
nodes = [node for node, _ in sorted_nodes]
degrees = [degree for _, degree in sorted_nodes]
# 填充到最大长度
nodes.extend([''] * (max_community_size - len(nodes)))
degrees.extend([None] * (max_community_size - len(degrees)))
# 添加到数据字典
community_data[f'社区{community_id}_节点'] = nodes
community_data[f'社区{community_id}_度'] = degrees
# 保存到CSV
community_df = pd.DataFrame(community_data)
community_df.to_csv('output/community_degrees.csv', index=False, encoding='utf-8')
return network_metrics, metrics_df, communities, community_degrees
def main(file_path,
candidate_file=None,
synonym_file='./data/synonyms.csv',
compound_file='./data/compound_words.txt',
analysis_mode='all',
top_k=None,
min_freq=5,
window_size=5,
knowledge_base=None): # 添加新参数
"""
Main function to run the entire analysis
Parameters:
-----------
file_path : str
Path to the input text file
candidate_file : str, optional
Path to candidate words file
synonym_file : str
Path to synonym mapping file
compound_file : str
Path to compound words file
analysis_mode : str
'candidates_only': only analyze co-occurrences between candidate words
'candidates_all': analyze co-occurrences between candidates and all other words
'all': analyze all word co-occurrences
top_k : int, optional
If specified, keep only top k most frequent co-occurrences
min_freq : int
Minimum co-occurrence frequency threshold
window_size : int
Size of the sliding window for co-occurrence analysis
knowledge_base : str, optional
Select semantic relation knowledge base ('conceptnet', 'dbpedia', 'yago', None)
"""
# Load and initial preprocessing
print("Loading and cleaning text...")
date_sentences, all_sentences = load_and_analyze_text(file_path)
# Text preprocessing
print("\nPreprocessing text...")
processed_docs = preprocess_text(all_sentences, synonym_file, compound_file)
# Word frequency analysis
print("\nAnalyzing word frequencies...")
word_freq = analyze_word_frequency(processed_docs)
# Build and visualize co-occurrence network
print(f"\nBuilding co-occurrence network (mode: {analysis_mode})...")
G, cooc_matrix = build_cooccurrence_network(
processed_docs,
candidate_file=candidate_file,
analysis_mode=analysis_mode,
top_k=top_k,
min_freq=min_freq,
window_size=window_size,
knowledge_base=knowledge_base
)
# Export co-occurrence matrix
output_file = f"./output/cooccurrence_matrix_{analysis_mode}.csv"
export_cooccurrence_matrix(cooc_matrix, output_file)
# Export network to Gephi format
export_to_gephi(G, output_file=f"./output/network_{analysis_mode}.gexf")
print("\n执行社区检测并可视化网络...")
communities = visualize_network(G)
plt.show()
# 计算网络指标(现在包含社区信息)
network_metrics, node_metrics, communities, community_degrees = calculate_network_metrics(G, top_n=30)
# 打印网络指标
print("\n网络指标:")
for metric, value in network_metrics.items():
if isinstance(value, float):
print(f"{metric}: {value:.4f}")
else:
print(f"{metric}: {value}")
# 打印社区信息
print("\n社区检测结果:")
for i, community in enumerate(communities):
total_degree = community_degrees[i]['total_degree']
print(f"社区 {i}: {len(community)} 个节点, 总度: {total_degree}")
print(f"代表性节点: {list(community)[:5]}...")
print("\n社区度信息已保存到: output/community_degrees.csv")
return G, processed_docs, word_freq, cooc_matrix, communities
if __name__ == '__main__':
data_path = f'./data/corpus.txt'
candidate_words_path = f'./data/candidate_words.txt'
analysis_mode = 'candidates_all'
G, docs, freq, cooc, communities = main(
data_path,
candidate_file=candidate_words_path,
analysis_mode=analysis_mode,
top_k=None,
min_freq=3,
knowledge_base='dbpedia' # 可选: 'conceptnet', 'dbpedia', 'yago', None
)