diff --git a/app.py b/app.py index ac0d2ec..09bac60 100644 --- a/app.py +++ b/app.py @@ -48,6 +48,20 @@ def create_app() -> Flask: chatbot_bp, weather_bp): app.register_blueprint(bp) + # ── Eagerly load DistilBERT at startup ──────────────────────────────── + # THIS IS THE KEY FIX for the teammate problem. + # + # Without this, the model loads lazily on the first request. On a machine + # that hasn't downloaded the model yet (~67 MB), the download happens during + # that first request — and every request that arrives before the download + # completes silently falls back to rule-based sentiment instead of DistilBERT. + # + # With this call, the model is fully downloaded, loaded, and warmed up + # BEFORE Flask starts serving requests. Every machine gets DistilBERT + # from request #1, regardless of whether the model was pre-cached. + from modules.chatbot.sentiment import warmup_sentiment_model + warmup_sentiment_model() + # ── Static file routes ───────────────────────────────────────────────── @app.route("/uploads/") def serve_upload(filename): @@ -76,9 +90,23 @@ def dashboard(): return render_template("dashboard.html") # ── Health check ─────────────────────────────────────────────────────── + # Now includes sentiment model status so you can verify DistilBERT loaded + # correctly on any machine by hitting GET /api/health @app.route("/api/health") def health(): - return jsonify({"status": "ok", "service": "Solar PV Defect Detection"}), 200 + from modules.chatbot.sentiment import _model_loaded, _model_failed + if _model_loaded: + sentiment_status = "distilbert_ready" + elif _model_failed: + sentiment_status = "rule_based_fallback" + else: + sentiment_status = "not_loaded" + + return jsonify({ + "status": "ok", + "service": "Solar PV Defect Detection", + "sentiment_model": sentiment_status, + }), 200 # ── Error handlers ───────────────────────────────────────────────────── @app.errorhandler(404) diff --git a/backticks.txt b/backticks.txt new file mode 100644 index 0000000..9813135 Binary files /dev/null and b/backticks.txt differ diff --git a/chroma_db/40ee6c53-0de3-4c74-ba74-35218d6f6bee/index_metadata.pickle b/chroma_db/40ee6c53-0de3-4c74-ba74-35218d6f6bee/index_metadata.pickle new file mode 100644 index 0000000..2bde33a Binary files /dev/null and b/chroma_db/40ee6c53-0de3-4c74-ba74-35218d6f6bee/index_metadata.pickle differ diff --git a/chroma_db/6de7f9b9-4776-411a-97cb-d8c64141c932/chroma.sqlite3 b/chroma_db/6de7f9b9-4776-411a-97cb-d8c64141c932/chroma.sqlite3 new file mode 100644 index 0000000..fc8bc8c Binary files /dev/null and b/chroma_db/6de7f9b9-4776-411a-97cb-d8c64141c932/chroma.sqlite3 differ diff --git a/chroma_db/6de7f9b9-4776-411a-97cb-d8c64141c932/index_metadata.pickle b/chroma_db/6de7f9b9-4776-411a-97cb-d8c64141c932/index_metadata.pickle new file mode 100644 index 0000000..529832a Binary files /dev/null and b/chroma_db/6de7f9b9-4776-411a-97cb-d8c64141c932/index_metadata.pickle differ diff --git a/chroma_db/chroma.sqlite3 b/chroma_db/chroma.sqlite3 new file mode 100644 index 0000000..bf1a0d5 Binary files /dev/null and b/chroma_db/chroma.sqlite3 differ diff --git a/config.py b/config.py index f4933d7..f5b70a8 100644 --- a/config.py +++ b/config.py @@ -20,10 +20,11 @@ os.makedirs(_d, exist_ok=True) # ── Model paths ──────────────────────────────────────────────────────────── -YOLO_MODEL_PATH = os.path.join(BASE_DIR, "model", "best.pt") -INTEGRITY_MODEL_PATH = os.path.join(BASE_DIR, "model", "integrity_model.pth") -CARBON_MODEL_PATH = os.path.join(BASE_DIR, "model", "carbon_model.pkl") -LABEL_ENCODER_PATH = os.path.join(BASE_DIR, "model", "label_encoder.pkl") +YOLO_MODEL_PATH = os.path.join(BASE_DIR, "model", "best.pt") +THERMAL_YOLO_MODEL_PATH = os.path.join(BASE_DIR, "model", "bestthermal.pt") +INTEGRITY_MODEL_PATH = os.path.join(BASE_DIR, "model", "integrity_model.pth") +CARBON_MODEL_PATH = os.path.join(BASE_DIR, "model", "carbon_model.pkl") +LABEL_ENCODER_PATH = os.path.join(BASE_DIR, "model", "label_encoder.pkl") # ── CSV paths ────────────────────────────────────────────────────────────── DETECTIONS_CSV = os.path.join(DATA_FOLDER, "detections.csv") @@ -52,7 +53,7 @@ } # ── Auth ─────────────────────────────────────────────────────────────────── -PASSCODE = os.environ.get("PASSCODE", "SOLAR@2025") +PASSCODE = os.environ.get("PASSCODE", "Solar@2026") SESSION_TIMEOUT = int(os.environ.get("SESSION_TIMEOUT", 3600)) MAX_ATTEMPTS = 5 LOCKOUT_DURATION = 300 diff --git a/data/carbon.csv b/data/carbon.csv index b653eb4..9012775 100644 --- a/data/carbon.csv +++ b/data/carbon.csv @@ -1,3 +1,74 @@ timestamp,image_filename,city,panel_power_w,ambient_temp_c,irradiance_w_m2,emission_factor,num_defects,dominant_defect,total_degradation_pct,co2_kg_per_year 2026-02-19T23:52:17.786428,fb83acf6c6024c4099b8b801b329e830.jpg,Delhi,380.0,25.0,900.0,0.85,2,crack,1.42,9.55 2026-02-19T23:53:06.806821,dd203d7faf224a60812a6c2748291bb0.jpg,Delhi,380.0,25.0,900.0,0.85,2,crack,0.8,6.07 +2026-02-20T12:28:08.737413,f1a95e02b29c4569acda3723704b1c06.jpg,Chennai,380.0,31.0,900.0,0.82,6,finger,0.45,7.48 +2026-02-20T13:03:45.951848,fc0c05d5129b43449bcd5b63216d6b17.jpg,Chennai,380.0,32.0,900.0,0.82,1,finger,0.14,0.89 +2026-02-20T13:29:28.542298,cb44158798cb4378ae7ddcaad1218250.jpg,Chennai,380.0,32.0,900.0,0.82,1,crack,0.87,4.55 +2026-02-20T14:39:18.708734,f4a6c5d070cd4d11a0bd5b9019d0e005.jpg,Chennai,380.0,30.0,900.0,0.82,1,finger,0.14,0.9 +2026-02-20T14:48:38.377801,ecbb22bbb95e4e8bacb99046315a110d.jpg,Chennai,380.0,32.0,900.0,0.82,1,black_core,37.43,204.64 +2026-02-20T15:07:02.130804,20d6525fdf7a4d1085cb1397de6fc188.jpg,Chennai,380.0,30.0,900.0,0.82,1,black_core,38.28,204.73 +2026-02-20T16:52:45.206256,fa9a250a5d364e308d92301afa276be8.jpg,Chennai,380.0,32.0,900.0,0.82,2,finger,0.34,3.38 +2026-02-20T18:46:18.197115,3db7070a788c4c9e9728de40a2ec2c3e.jpg,Chennai,380.0,27.0,900.0,0.82,1,black_core,32.33,170.6 +2026-02-20T16:10:48.169351,d776dc1e2458451c923ddbcbd71d717c.jpg,Chennai,380.0,30.0,900.0,0.82,1,crack,12.76,69.12 +2026-02-20T16:11:03.203225,d776dc1e2458451c923ddbcbd71d717c.jpg,Chennai,380.0,30.0,900.0,0.82,1,crack,12.76,69.12 +2026-02-20T16:11:17.588066,d776dc1e2458451c923ddbcbd71d717c.jpg,Chennai,380.0,30.0,900.0,0.82,1,crack,12.76,69.12 +2026-02-20T16:26:29.566060,489ae0552c3e458698b4a3a6b213b166.jpg,Chennai,380.0,29.0,900.0,0.82,1,crack,12.76,69.1 +2026-02-20T16:29:36.831282,bbdfd5c6f2b541a8ae665b7a4da10056.jpg,Mumbai,380.0,31.0,900.0,0.78,1,black_core,37.64,196.01 +2026-02-20T16:31:28.365827,unknown,Mumbai,380.0,31.0,900.0,0.78,0,black_core,0.0,0.86 +2026-02-20T16:32:42.405905,d7d073f015504945b743bf22218d1a43.jpg,Mumbai,380.0,31.0,900.0,0.78,2,finger,0.34,2.87 +2026-02-20T16:35:39.052999,b66d8ff882b543a88efab271f23daeaf.jpg,"Thalavapalayam, Karur",380.0,34.0,900.0,0.8,3,finger,7.9,43.31 +2026-02-20T16:38:32.426088,e3629abe48fc4bfeb449c7fee882a1ff.jpg,"Thalavapalayam, Karur",380.0,34.0,900.0,0.8,1,black_core,39.27,200.86 +2026-02-20T16:48:47.831969,e8c0b12fdf2b4a16bd028f8036d8ae8c.jpg,"Thalavapalayam, Karur",380.0,33.0,900.0,0.8,1,black_core,39.27,200.83 +2026-02-20T16:49:49.442941,afa768841e8b450c8e75d2348d48eafb.jpg,"Thalavapalayam, Karur",380.0,33.0,900.0,0.8,1,crack,12.76,68.12 +2026-02-20T17:08:45.494390,e64fc47c9c114afca19783b48eaf9fd9.jpg,"Thalavapalayam, Karur",380.0,33.0,900.0,0.8,2,finger,0.34,3.33 +2026-02-20T17:10:31.662859,6a1a647c3d904c03ab0df1ce926f01b2.jpg,"Thalavapalayam, Karur",380.0,33.0,900.0,0.8,3,crack,7.9,43.31 +2026-02-20T17:20:43.402220,7f8ce02c0c3c458f9d4ffa3887041a36.jpg,"Thalavapalayam, Karur",380.0,33.0,900.0,0.8,1,black_core,37.64,200.83 +2026-02-20T17:22:29.708047,f0dfcecb0b4d4cd3b897c79b50c19622.jpg,"Thalavapalayam, Karur",380.0,33.0,900.0,0.8,1,crack,0.9,5.35 +2026-02-20T18:04:57.334796,a671857c6760492dbd0674ef1746140e.jpg,"Thalavapalayam, Karur",380.0,32.0,900.0,0.8,1,black_core,39.82,200.77 +2026-02-20T18:06:35.371468,a81aaf65edbb4cea929811b42b7d34e0.jpg,"Thalavapalayam, Karur",380.0,32.0,900.0,0.8,5,finger,0.7,6.6 +2026-02-20T18:10:04.070486,unknown,"Thalavapalayam, Karur",380.0,32.0,900.0,0.8,0,black_core,0.0,0.99 +2026-02-20T18:12:28.066943,unknown,"Thalavapalayam, Karur",380.0,32.0,900.0,0.8,0,black_core,0.0,0.99 +2026-02-20T18:13:04.915114,unknown,"Thalavapalayam, Karur",380.0,32.0,900.0,0.8,0,black_core,0.0,0.99 +2026-02-20T18:50:05.829353,unknown,"Thalavapalayam, Karur",380.0,31.0,900.0,0.8,0,black_core,0.0,1.0 +2026-02-20T18:56:50.865377,59cbb8a88822463dbbba1d8fd2b025db.jpg,"Thalavapalayam, Karur",380.0,31.0,900.0,0.8,5,finger,0.7,6.6 +2026-02-20T19:18:38.842609,cea2b4f521264f61a3547a726e0e3bea.jpg,"Thalavapalayam, Karur",380.0,30.0,900.0,0.8,1,finger,0.19,1.62 +2026-02-20T19:22:50.583742,5952fedc7f65417c95407d6b6e4ffff2.jpg,"Thalavapalayam, Karur",380.0,30.0,900.0,0.8,1,black_core,40.0,200.87 +2026-02-20T20:43:10.641819,b6e3ba3cb62e497a8fc0a7968480ae26.jpg,"Thalavapalayam, Karur",380.0,28.0,900.0,0.8,2,finger,0.34,3.34 +2026-02-20T20:55:07.039752,a0df065710e24bb2b4323bbefbe59f44.jpg,"Thalavapalayam, Karur",380.0,28.0,900.0,0.8,3,finger,3.69,20.76 +2026-02-20T21:15:25.557610,32814407b22d4be9bd3576b593bfd054.jpg,"Thalavapalayam, Karur",380.0,27.0,900.0,0.8,1,crack,0.71,3.84 +2026-02-20T21:17:19.103017,08c69eff45214e72b09ec77f630d1171.jpg,Fort Worth,380.0,6.0,900.0,0.8,3,crack,7.9,43.36 +2026-02-20T21:20:59.307689,e10852b13c574c94a6a27e612f223dd1.jpg,"Thalavapalayam, Karur",380.0,27.0,900.0,0.8,3,crack,7.9,43.31 +2026-02-20T21:22:23.688302,adee2ce989a146d3a427f31d3cdd9e7e.jpg,"Thalavapalayam, Karur",380.0,27.0,900.0,0.8,3,crack,7.9,43.31 +2026-02-20T22:03:41.083880,f59c69d55bf3467f98b45197fcec07b4.jpg,"Thalavapalayam, Karur",380.0,27.0,900.0,0.8,3,finger,7.9,43.31 +2026-02-20T23:13:31.481230,832a638e6bbc4ed98d75cc9d60fbf48b.jpg,"Thalavapalayam, Karur",380.0,26.0,900.0,0.8,1,black_core,37.64,200.48 +2026-02-20T23:14:55.071865,50bc6dd265274e9d9ed0f594a0b85f20.jpg,"Thalavapalayam, Karur",380.0,26.0,900.0,0.8,3,black_core,7.9,43.3 +2026-02-20T23:21:27.746803,ece21e99bb694264adfb4194867d201f.jpg,"Thalavapalayam, Karur",380.0,25.0,900.0,0.8,1,black_core,37.64,200.48 +2026-02-20T23:25:08.309735,26dae42761c949df8307393e1ebf1b7e.jpg,"Thalavapalayam, Karur",380.0,25.0,900.0,0.8,1,black_core,37.64,200.48 +2026-02-20T23:27:59.501476,9e7e01f0a07b4d338969f2eda0efa874.jpg,"Thalavapalayam, Karur",380.0,25.0,900.0,0.8,1,black_core,37.64,200.48 +2026-02-20T23:30:27.958387,c3b7ae8ff7dc4ffab9f19292b0c97661.jpg,"Thalavapalayam, Karur",380.0,25.0,900.0,0.8,1,black_core,37.64,200.48 +2026-02-20T23:40:55.690781,4aca04cbda9441e2a91812954b992f83.jpg,"Thalavapalayam, Karur",380.0,25.0,900.0,0.8,2,finger,0.34,3.33 +2026-02-21T08:22:00.543834,202afc91227e44bfb73b917280781c4c.jpg,"Thalavapalayam, Karur",380.0,25.0,900.0,0.8,1,black_core,37.64,200.48 +2026-02-21T08:40:42.913738,8e8026f14bb2448fa966c3b555363815.jpg,"Thalavapalayam, Karur",380.0,26.0,900.0,0.8,1,black_core,38.28,200.48 +2026-02-21T08:43:20.178768,dbbfffed512849f5abbd8db3853a83ab.jpg,"Thalavapalayam, Karur",380.0,26.0,900.0,0.8,1,black_core,37.64,200.48 +2026-02-21T09:00:06.901018,3172bbd7795848fbb7ff0bab0806ee8e.jpg,"Thalavapalayam, Karur",380.0,27.0,900.0,0.8,2,finger,2.42,13.85 +2026-02-21T09:02:23.634324,fe305043ffe34d629c52b5bbef02453d.jpg,"Thalavapalayam, Karur",380.0,27.0,900.0,0.8,1,crack,1.16,6.69 +2026-02-21T09:03:19.008549,40d550410790458ea5f2dfb1a0beffc1.jpg,"Thalavapalayam, Karur",380.0,27.0,900.0,0.8,1,black_core,35.98,185.1 +2026-02-21T09:04:48.228536,65b20bd844da4dd7be81e64e5a62d397.jpg,"Thalavapalayam, Karur",380.0,27.0,900.0,0.8,1,black_core,39.27,200.72 +2026-02-21T09:12:12.633639,6f3e03f3a18c490692172e34e6bc9b82.jpg,Chennai,380.0,32.0,900.0,0.82,1,black_core,39.27,204.64 +2026-02-21T09:16:08.057523,fe53d4a66d3a4418b768acc7a1ea3eaa.jpg,"Thalavapalayam, Karur",380.0,27.0,900.0,0.8,1,black_core,39.27,200.72 +2026-02-21T09:17:54.697915,f8ba7eaa9e394dc1aedeff77fd4f7444.jpg,"Thalavapalayam, Karur",380.0,32.0,900.0,0.8,1,black_core,39.27,200.77 +2026-02-21T09:19:13.227453,ac645665379b4b4d8fcb560cbb0ced2d.jpg,Chennai,380.0,32.0,900.0,0.82,1,black_core,39.27,204.64 +2026-02-21T09:21:48.315160,a444e738d98f4fbb94bada5110fd4575.jpg,"Thalavapalayam, Karur",380.0,28.0,900.0,0.8,1,black_core,38.28,200.78 +2026-02-21T09:38:08.276310,5bd1493a63a14d17bb6b7435cab529a2.jpg,"Thalavapalayam, Karur",380.0,28.0,900.0,0.8,1,black_core,38.28,200.78 +2026-02-21T09:46:17.153398,072eb752bd1c446c9720f0fa796687c3.jpg,"Thalavapalayam, Karur",380.0,29.0,900.0,0.8,1,crack,0.87,4.6 +2026-02-21T09:53:20.995076,188b1882313c40a891d13f76c46ab8c2.jpg,"Thalavapalayam, Karur",380.0,32.0,900.0,0.8,3,black_core,7.9,43.31 +2026-02-21T10:03:13.366105,a9707141cf164f9792109d36118b668d.jpg,"Thalavapalayam, Karur",380.0,29.0,900.0,0.8,3,crack,7.9,43.31 +2026-02-21T10:12:11.623677,5eabc5295e124fa797fc1156f29b9cc4.jpg,"Thalavapalayam, Karur",380.0,29.0,900.0,0.8,1,black_core,38.28,200.85 +2026-02-21T10:19:07.355053,2ef1b37221da44db9e7aed6d8c1f4207.jpg,"Thalavapalayam, Karur",380.0,30.0,900.0,0.8,1,black_core,37.64,200.87 +2026-02-21T10:26:26.212152,c579f85c6f4349e08b0e675c75394df7.jpg,"Thalavapalayam, Karur",380.0,30.0,900.0,0.8,1,black_core,32.33,167.1 +2026-02-21T10:30:48.462866,50145cfd87714a1c9f42dbf2a8e80b1b.jpg,"Thalavapalayam, Karur",380.0,30.0,900.0,0.8,2,finger,0.32,3.36 +2026-02-21T11:17:26.671286,cb8b1801ed8f421e9865567aa8e74744.jpg,"Thalavapalayam, Karur",380.0,31.0,900.0,0.8,5,finger,0.7,6.6 +2026-03-24T23:50:31.456014,a6672fc74dd44e82810a9858b48c48bc.jpeg,"Thalavapalayam, Karur",380.0,27.0,900.0,0.8,2,finger,0.34,3.33 +2026-03-25T08:44:40.487544,6a10757313614b40ba0dc05eb5dbe90b.jpeg,"Thalavapalayam, Karur",380.0,27.0,900.0,0.8,2,finger,0.34,3.33 +2026-03-25T09:04:48.571746,f3e67d5c201d4101a27cd57dfb3ddd83.jpeg,"Thalavapalayam, Karur",380.0,28.0,900.0,0.8,2,finger,0.34,3.34 +2026-03-25T09:13:01.670089,eeeb3dbfa97b456e8457be7c989af54c.jpeg,Chennai,380.0,28.0,900.0,0.82,2,finger,0.34,3.4 diff --git a/data/detections.csv b/data/detections.csv index 95feeea..efa3c9b 100644 --- a/data/detections.csv +++ b/data/detections.csv @@ -7,3 +7,157 @@ timestamp,image_filename,defect_class,confidence,bbox_x1,bbox_y1,bbox_x2,bbox_y2 2026-02-19T23:52:45.102379,3d70a4ba33414709824e201a13b8b010.jpg,finger,0.7896,365,658,386,802,0.002884,1024,1024 2026-02-19T23:52:56.892768,dd203d7faf224a60812a6c2748291bb0.jpg,crack,0.9055,647,913,787,991,0.010414,1024,1024 2026-02-19T23:52:56.892768,dd203d7faf224a60812a6c2748291bb0.jpg,crack,0.856,227,908,295,993,0.005512,1024,1024 +2026-02-20T12:27:37.764743,f1a95e02b29c4569acda3723704b1c06.jpg,finger,0.7994,408,413,424,501,0.001343,1024,1024 +2026-02-20T12:27:37.764743,f1a95e02b29c4569acda3723704b1c06.jpg,finger,0.7978,561,692,579,813,0.002077,1024,1024 +2026-02-20T12:27:37.764743,f1a95e02b29c4569acda3723704b1c06.jpg,finger,0.7605,248,704,265,814,0.001783,1024,1024 +2026-02-20T12:27:37.764743,f1a95e02b29c4569acda3723704b1c06.jpg,finger,0.6608,303,531,320,603,0.001167,1024,1024 +2026-02-20T12:27:37.764743,f1a95e02b29c4569acda3723704b1c06.jpg,finger,0.6452,210,756,226,864,0.001648,1024,1024 +2026-02-20T12:27:37.764743,f1a95e02b29c4569acda3723704b1c06.jpg,finger,0.6277,752,478,767,551,0.001044,1024,1024 +2026-02-20T13:03:37.934921,fc0c05d5129b43449bcd5b63216d6b17.jpg,finger,0.7896,365,658,386,802,0.002884,1024,1024 +2026-02-20T13:29:21.865368,cb44158798cb4378ae7ddcaad1218250.jpg,crack,0.8322,677,205,982,265,0.017452,1024,1024 +2026-02-20T14:39:07.403251,f4a6c5d070cd4d11a0bd5b9019d0e005.jpg,finger,0.7896,365,658,386,802,0.002884,1024,1024 +2026-02-20T14:48:32.162506,ecbb22bbb95e4e8bacb99046315a110d.jpg,black_core,0.9596,67,87,959,967,0.748596,1024,1024 +2026-02-20T15:06:21.850745,20d6525fdf7a4d1085cb1397de6fc188.jpg,black_core,0.9684,62,74,957,971,0.765624,1024,1024 +<<<<<<< HEAD +2026-02-20T16:52:40.433291,fa9a250a5d364e308d92301afa276be8.jpg,finger,0.8101,604,233,626,380,0.003084,1024,1024 +2026-02-20T16:52:40.433291,fa9a250a5d364e308d92301afa276be8.jpg,finger,0.7668,463,430,484,616,0.003725,1024,1024 +2026-02-20T18:45:23.649107,3db7070a788c4c9e9728de40a2ec2c3e.jpg,black_core,0.9603,97,94,879,961,0.646585,1024,1024 +======= +2026-02-20T15:47:29.394274,fd70e1bc73374ae4ad76fbeb13a641a0.jpg,black_core,0.9762,49,64,957,971,0.785404,1024,1024 +2026-02-20T15:47:51.367916,e62b080aff2542068cefdc5c78450a44.jpg,black_core,0.9762,49,64,957,971,0.785404,1024,1024 +2026-02-20T15:50:05.261398,e28113efbd254182a264088483b11d74.jpg,black_core,0.9762,49,64,957,971,0.785404,1024,1024 +2026-02-20T15:50:29.058779,a458f8dc57d64f4ab53a3ebc9991b596.jpg,crack,0.9158,44,768,225,850,0.014154,1024,1024 +2026-02-20T15:54:35.402721,6db2bd7317cc4241a607f3b09fe1e062.jpg,crack,0.6455,548,75,860,933,0.255295,1024,1024 +2026-02-20T15:54:35.402721,6db2bd7317cc4241a607f3b09fe1e062.jpg,crack,0.3979,682,413,785,631,0.021414,1024,1024 +2026-02-20T15:54:35.402721,6db2bd7317cc4241a607f3b09fe1e062.jpg,crack,0.3691,777,661,879,872,0.020525,1024,1024 +2026-02-20T16:10:13.779654,d776dc1e2458451c923ddbcbd71d717c.jpg,crack,0.6455,548,75,860,933,0.255295,1024,1024 +2026-02-20T16:10:13.779654,d776dc1e2458451c923ddbcbd71d717c.jpg,crack,0.3979,682,413,785,631,0.021414,1024,1024 +2026-02-20T16:10:13.779654,d776dc1e2458451c923ddbcbd71d717c.jpg,crack,0.3691,777,661,879,872,0.020525,1024,1024 +2026-02-20T16:24:11.128821,1d04c27f350f40009ff8a4e14f0568fe.jpg,crack,0.6455,548,75,860,933,0.255295,1024,1024 +2026-02-20T16:24:11.128821,1d04c27f350f40009ff8a4e14f0568fe.jpg,crack,0.3979,682,413,785,631,0.021414,1024,1024 +2026-02-20T16:24:11.128821,1d04c27f350f40009ff8a4e14f0568fe.jpg,crack,0.3691,777,661,879,872,0.020525,1024,1024 +2026-02-20T16:26:20.951553,489ae0552c3e458698b4a3a6b213b166.jpg,crack,0.6455,548,75,860,933,0.255295,1024,1024 +2026-02-20T16:26:20.951553,489ae0552c3e458698b4a3a6b213b166.jpg,crack,0.3979,682,413,785,631,0.021414,1024,1024 +2026-02-20T16:26:20.951553,489ae0552c3e458698b4a3a6b213b166.jpg,crack,0.3691,777,661,879,872,0.020525,1024,1024 +2026-02-20T16:29:28.740044,bbdfd5c6f2b541a8ae665b7a4da10056.jpg,black_core,0.9679,63,80,950,970,0.752859,1024,1024 +2026-02-20T16:32:33.660155,d7d073f015504945b743bf22218d1a43.jpg,finger,0.8101,604,233,626,380,0.003084,1024,1024 +2026-02-20T16:32:33.660155,d7d073f015504945b743bf22218d1a43.jpg,finger,0.7668,463,430,484,616,0.003725,1024,1024 +2026-02-20T16:35:31.358787,b66d8ff882b543a88efab271f23daeaf.jpg,black_core,0.8912,781,482,1015,880,0.088818,1024,1024 +2026-02-20T16:35:31.358787,b66d8ff882b543a88efab271f23daeaf.jpg,crack,0.856,313,582,817,723,0.067772,1024,1024 +2026-02-20T16:35:31.358787,b66d8ff882b543a88efab271f23daeaf.jpg,finger,0.6491,513,683,524,817,0.001406,1024,1024 +2026-02-20T16:38:26.437424,e3629abe48fc4bfeb449c7fee882a1ff.jpg,black_core,0.9762,49,64,957,971,0.785404,1024,1024 +2026-02-20T16:48:38.848604,e8c0b12fdf2b4a16bd028f8036d8ae8c.jpg,black_core,0.9762,49,64,957,971,0.785404,1024,1024 +2026-02-20T16:49:37.696102,afa768841e8b450c8e75d2348d48eafb.jpg,crack,0.6455,548,75,860,933,0.255295,1024,1024 +2026-02-20T16:49:37.696102,afa768841e8b450c8e75d2348d48eafb.jpg,crack,0.3979,682,413,785,631,0.021414,1024,1024 +2026-02-20T16:49:37.696102,afa768841e8b450c8e75d2348d48eafb.jpg,crack,0.3691,777,661,879,872,0.020525,1024,1024 +2026-02-20T17:08:34.593515,e64fc47c9c114afca19783b48eaf9fd9.jpg,finger,0.8101,604,233,626,380,0.003084,1024,1024 +2026-02-20T17:08:34.593515,e64fc47c9c114afca19783b48eaf9fd9.jpg,finger,0.7668,463,430,484,616,0.003725,1024,1024 +2026-02-20T17:10:23.978278,6a1a647c3d904c03ab0df1ce926f01b2.jpg,black_core,0.8912,781,482,1015,880,0.088818,1024,1024 +2026-02-20T17:10:23.978278,6a1a647c3d904c03ab0df1ce926f01b2.jpg,crack,0.856,313,582,817,723,0.067772,1024,1024 +2026-02-20T17:10:23.978278,6a1a647c3d904c03ab0df1ce926f01b2.jpg,finger,0.6491,513,683,524,817,0.001406,1024,1024 +2026-02-20T17:20:35.402878,7f8ce02c0c3c458f9d4ffa3887041a36.jpg,black_core,0.9679,63,80,950,970,0.752859,1024,1024 +2026-02-20T17:22:21.363139,f0dfcecb0b4d4cd3b897c79b50c19622.jpg,crack,0.8482,43,253,207,368,0.017986,1024,1024 +2026-02-20T18:04:44.136401,a671857c6760492dbd0674ef1746140e.jpg,black_core,0.9632,50,78,983,973,0.796351,1024,1024 +2026-02-20T18:06:26.962369,a81aaf65edbb4cea929811b42b7d34e0.jpg,crack,0.8736,514,465,617,523,0.005697,1024,1024 +2026-02-20T18:06:26.962369,a81aaf65edbb4cea929811b42b7d34e0.jpg,finger,0.8226,202,752,225,860,0.002369,1024,1024 +2026-02-20T18:06:26.962369,a81aaf65edbb4cea929811b42b7d34e0.jpg,finger,0.8115,746,476,766,597,0.002308,1024,1024 +2026-02-20T18:06:26.962369,a81aaf65edbb4cea929811b42b7d34e0.jpg,finger,0.802,299,530,318,607,0.001395,1024,1024 +2026-02-20T18:06:26.962369,a81aaf65edbb4cea929811b42b7d34e0.jpg,finger,0.8012,242,703,262,817,0.002174,1024,1024 +2026-02-20T18:53:02.209584,02312b7b54834f79af0ba983df007ce9.jpg,crack,0.8482,43,253,207,368,0.017986,1024,1024 +2026-02-20T18:56:24.689432,59cbb8a88822463dbbba1d8fd2b025db.jpg,crack,0.8736,514,465,617,523,0.005697,1024,1024 +2026-02-20T18:56:24.689432,59cbb8a88822463dbbba1d8fd2b025db.jpg,finger,0.8226,202,752,225,860,0.002369,1024,1024 +2026-02-20T18:56:24.689432,59cbb8a88822463dbbba1d8fd2b025db.jpg,finger,0.8115,746,476,766,597,0.002308,1024,1024 +2026-02-20T18:56:24.689432,59cbb8a88822463dbbba1d8fd2b025db.jpg,finger,0.802,299,530,318,607,0.001395,1024,1024 +2026-02-20T18:56:24.689432,59cbb8a88822463dbbba1d8fd2b025db.jpg,finger,0.8012,242,703,262,817,0.002174,1024,1024 +2026-02-20T19:18:31.009993,cea2b4f521264f61a3547a726e0e3bea.jpg,finger,0.7793,764,660,787,832,0.003773,1024,1024 +2026-02-20T19:22:42.471372,5952fedc7f65417c95407d6b6e4ffff2.jpg,black_core,0.9642,49,69,987,982,0.816721,1024,1024 +2026-02-20T19:31:18.537560,5d88f82edace48b99e865d4cd2d01ec4.jpg,black_core,0.9603,97,94,879,961,0.646585,1024,1024 +2026-02-20T19:41:14.892756,2b4d274d68a54e01ba109bcedf12db32.jpg,black_core,0.7797,145,0,1024,707,0.592664,1024,1024 +2026-02-20T19:41:14.892756,2b4d274d68a54e01ba109bcedf12db32.jpg,black_core,0.7466,0,0,943,591,0.531495,1024,1024 +2026-02-20T19:41:14.892756,2b4d274d68a54e01ba109bcedf12db32.jpg,black_core,0.7306,0,0,944,808,0.727417,1024,1024 +2026-02-20T19:41:14.892756,2b4d274d68a54e01ba109bcedf12db32.jpg,black_core,0.7085,211,51,1024,1006,0.740447,1024,1024 +2026-02-20T19:41:14.892756,2b4d274d68a54e01ba109bcedf12db32.jpg,black_core,0.4324,0,82,880,1024,0.790558,1024,1024 +2026-02-20T19:41:14.892756,2b4d274d68a54e01ba109bcedf12db32.jpg,black_core,0.3298,153,227,1024,1024,0.662028,1024,1024 +2026-02-20T20:42:35.242954,b6e3ba3cb62e497a8fc0a7968480ae26.jpg,finger,0.8101,604,233,626,380,0.003084,1024,1024 +2026-02-20T20:42:35.242954,b6e3ba3cb62e497a8fc0a7968480ae26.jpg,finger,0.7668,463,430,484,616,0.003725,1024,1024 +2026-02-20T20:54:49.294233,a0df065710e24bb2b4323bbefbe59f44.jpg,crack,0.9199,652,217,988,438,0.070816,1024,1024 +2026-02-20T20:54:49.294233,a0df065710e24bb2b4323bbefbe59f44.jpg,finger,0.7645,360,199,372,388,0.002163,1024,1024 +2026-02-20T20:54:49.294233,a0df065710e24bb2b4323bbefbe59f44.jpg,finger,0.5487,235,910,246,989,0.000829,1024,1024 +2026-02-20T21:15:17.260448,32814407b22d4be9bd3576b593bfd054.jpg,crack,0.9158,44,768,225,850,0.014154,1024,1024 +2026-02-20T21:17:08.915636,08c69eff45214e72b09ec77f630d1171.jpg,black_core,0.8912,781,482,1015,880,0.088818,1024,1024 +2026-02-20T21:17:08.915636,08c69eff45214e72b09ec77f630d1171.jpg,crack,0.856,313,582,817,723,0.067772,1024,1024 +2026-02-20T21:17:08.915636,08c69eff45214e72b09ec77f630d1171.jpg,finger,0.6491,513,683,524,817,0.001406,1024,1024 +2026-02-20T21:20:51.559665,e10852b13c574c94a6a27e612f223dd1.jpg,black_core,0.8912,781,482,1015,880,0.088818,1024,1024 +2026-02-20T21:20:51.559665,e10852b13c574c94a6a27e612f223dd1.jpg,crack,0.856,313,582,817,723,0.067772,1024,1024 +2026-02-20T21:20:51.559665,e10852b13c574c94a6a27e612f223dd1.jpg,finger,0.6491,513,683,524,817,0.001406,1024,1024 +2026-02-20T21:22:15.814979,adee2ce989a146d3a427f31d3cdd9e7e.jpg,black_core,0.8912,781,482,1015,880,0.088818,1024,1024 +2026-02-20T21:22:15.814979,adee2ce989a146d3a427f31d3cdd9e7e.jpg,crack,0.856,313,582,817,723,0.067772,1024,1024 +2026-02-20T21:22:15.814979,adee2ce989a146d3a427f31d3cdd9e7e.jpg,finger,0.6491,513,683,524,817,0.001406,1024,1024 +2026-02-20T22:03:02.750214,f59c69d55bf3467f98b45197fcec07b4.jpg,black_core,0.8912,781,482,1015,880,0.088818,1024,1024 +2026-02-20T22:03:02.750214,f59c69d55bf3467f98b45197fcec07b4.jpg,crack,0.856,313,582,817,723,0.067772,1024,1024 +2026-02-20T22:03:02.750214,f59c69d55bf3467f98b45197fcec07b4.jpg,finger,0.6491,513,683,524,817,0.001406,1024,1024 +2026-02-20T23:13:23.863912,832a638e6bbc4ed98d75cc9d60fbf48b.jpg,black_core,0.9679,63,80,950,970,0.752859,1024,1024 +2026-02-20T23:14:48.146651,50bc6dd265274e9d9ed0f594a0b85f20.jpg,black_core,0.8912,781,482,1015,880,0.088818,1024,1024 +2026-02-20T23:14:48.146651,50bc6dd265274e9d9ed0f594a0b85f20.jpg,crack,0.856,313,582,817,723,0.067772,1024,1024 +2026-02-20T23:14:48.146651,50bc6dd265274e9d9ed0f594a0b85f20.jpg,finger,0.6491,513,683,524,817,0.001406,1024,1024 +2026-02-20T23:21:19.799396,ece21e99bb694264adfb4194867d201f.jpg,black_core,0.9679,63,80,950,970,0.752859,1024,1024 +2026-02-20T23:25:00.343998,26dae42761c949df8307393e1ebf1b7e.jpg,black_core,0.9679,63,80,950,970,0.752859,1024,1024 +2026-02-20T23:27:51.608642,9e7e01f0a07b4d338969f2eda0efa874.jpg,black_core,0.9679,63,80,950,970,0.752859,1024,1024 +2026-02-20T23:30:19.264020,c3b7ae8ff7dc4ffab9f19292b0c97661.jpg,black_core,0.9679,63,80,950,970,0.752859,1024,1024 +2026-02-20T23:40:30.016361,4aca04cbda9441e2a91812954b992f83.jpg,finger,0.8101,604,233,626,380,0.003084,1024,1024 +2026-02-20T23:40:30.016361,4aca04cbda9441e2a91812954b992f83.jpg,finger,0.7668,463,430,484,616,0.003725,1024,1024 +2026-02-20T23:44:21.467566,8b48ad3d84224fc1904435a48e512c47.jpg,black_core,0.9679,63,80,950,970,0.752859,1024,1024 +2026-02-20T23:46:25.847061,d391656b6f454c4cba40dc101696127e.jpg,finger,0.8101,604,233,626,380,0.003084,1024,1024 +2026-02-20T23:46:25.847061,d391656b6f454c4cba40dc101696127e.jpg,finger,0.7668,463,430,484,616,0.003725,1024,1024 +2026-02-20T23:47:56.529007,07b9699873514ac99ba2e8a0ef9b56cd.jpg,black_core,0.9679,63,80,950,970,0.752859,1024,1024 +2026-02-21T08:20:30.858237,36b2841c772a464e9e70845327c45424.jpg,black_core,0.9603,97,94,879,961,0.646585,1024,1024 +2026-02-21T08:21:19.698252,202afc91227e44bfb73b917280781c4c.jpg,black_core,0.9679,63,80,950,970,0.752859,1024,1024 +2026-02-21T08:40:30.676018,8e8026f14bb2448fa966c3b555363815.jpg,black_core,0.9676,65,73,960,970,0.765624,1024,1024 +2026-02-21T08:43:09.145088,dbbfffed512849f5abbd8db3853a83ab.jpg,black_core,0.9679,63,80,950,970,0.752859,1024,1024 +2026-02-21T08:44:44.145853,c0a0b277b09c435c8d3a8e55d309cb6c.jpg,crack,0.8736,514,465,617,523,0.005697,1024,1024 +2026-02-21T08:44:44.145853,c0a0b277b09c435c8d3a8e55d309cb6c.jpg,finger,0.8226,202,752,225,860,0.002369,1024,1024 +2026-02-21T08:44:44.145853,c0a0b277b09c435c8d3a8e55d309cb6c.jpg,finger,0.8115,746,476,766,597,0.002308,1024,1024 +2026-02-21T08:44:44.145853,c0a0b277b09c435c8d3a8e55d309cb6c.jpg,finger,0.802,299,530,318,607,0.001395,1024,1024 +2026-02-21T08:44:44.145853,c0a0b277b09c435c8d3a8e55d309cb6c.jpg,finger,0.8012,242,703,262,817,0.002174,1024,1024 +2026-02-21T08:59:49.458821,3172bbd7795848fbb7ff0bab0806ee8e.jpg,crack,0.8172,526,419,869,549,0.042524,1024,1024 +2026-02-21T08:59:49.458821,3172bbd7795848fbb7ff0bab0806ee8e.jpg,finger,0.7735,877,192,911,375,0.005934,1024,1024 +2026-02-21T09:00:50.175958,3ebd19962533435cb6a9bf57ff5d1e23.jpg,crack,0.9153,242,327,678,571,0.101456,1024,1024 +2026-02-21T09:00:50.175958,3ebd19962533435cb6a9bf57ff5d1e23.jpg,star_crack,0.7308,553,477,666,567,0.009699,1024,1024 +2026-02-21T09:02:17.011310,fe305043ffe34d629c52b5bbef02453d.jpg,crack,0.9223,49,360,275,468,0.023277,1024,1024 +2026-02-21T09:03:12.033412,40d550410790458ea5f2dfb1a0beffc1.jpg,black_core,0.9643,106,77,952,969,0.719673,1024,1024 +2026-02-21T09:04:40.423723,65b20bd844da4dd7be81e64e5a62d397.jpg,black_core,0.9762,49,64,957,971,0.785404,1024,1024 +2026-02-21T09:11:54.709198,6f3e03f3a18c490692172e34e6bc9b82.jpg,black_core,0.9762,49,64,957,971,0.785404,1024,1024 +2026-02-21T09:15:58.568743,fe53d4a66d3a4418b768acc7a1ea3eaa.jpg,black_core,0.9762,49,64,957,971,0.785404,1024,1024 +2026-02-21T09:17:40.292013,f8ba7eaa9e394dc1aedeff77fd4f7444.jpg,black_core,0.9762,49,64,957,971,0.785404,1024,1024 +2026-02-21T09:19:01.503906,ac645665379b4b4d8fcb560cbb0ced2d.jpg,black_core,0.9762,49,64,957,971,0.785404,1024,1024 +2026-02-21T09:21:35.403489,a444e738d98f4fbb94bada5110fd4575.jpg,black_core,0.9684,62,74,957,971,0.765624,1024,1024 +2026-02-21T09:37:59.500412,5bd1493a63a14d17bb6b7435cab529a2.jpg,black_core,0.9676,65,73,960,970,0.765624,1024,1024 +2026-02-21T09:46:08.816004,072eb752bd1c446c9720f0fa796687c3.jpg,crack,0.8322,677,205,982,265,0.017452,1024,1024 +2026-02-21T09:53:06.840824,188b1882313c40a891d13f76c46ab8c2.jpg,black_core,0.8912,781,482,1015,880,0.088818,1024,1024 +2026-02-21T09:53:06.840824,188b1882313c40a891d13f76c46ab8c2.jpg,crack,0.856,313,582,817,723,0.067772,1024,1024 +2026-02-21T09:53:06.840824,188b1882313c40a891d13f76c46ab8c2.jpg,finger,0.6491,513,683,524,817,0.001406,1024,1024 +2026-02-21T10:03:05.201425,a9707141cf164f9792109d36118b668d.jpg,black_core,0.8912,781,482,1015,880,0.088818,1024,1024 +2026-02-21T10:03:05.201425,a9707141cf164f9792109d36118b668d.jpg,crack,0.856,313,582,817,723,0.067772,1024,1024 +2026-02-21T10:03:05.201425,a9707141cf164f9792109d36118b668d.jpg,finger,0.6491,513,683,524,817,0.001406,1024,1024 +2026-02-21T10:12:04.057380,5eabc5295e124fa797fc1156f29b9cc4.jpg,black_core,0.9676,65,73,960,970,0.765624,1024,1024 +2026-02-21T10:18:52.351819,2ef1b37221da44db9e7aed6d8c1f4207.jpg,black_core,0.9679,63,80,950,970,0.752859,1024,1024 +2026-02-21T10:26:15.397064,c579f85c6f4349e08b0e675c75394df7.jpg,black_core,0.9603,97,94,879,961,0.646585,1024,1024 +2026-02-21T10:30:34.438178,50145cfd87714a1c9f42dbf2a8e80b1b.jpg,star_crack,0.9444,389,577,471,635,0.004536,1024,1024 +2026-02-21T10:30:34.438178,50145cfd87714a1c9f42dbf2a8e80b1b.jpg,finger,0.6291,187,199,200,355,0.001934,1024,1024 +2026-02-21T11:17:14.675956,cb8b1801ed8f421e9865567aa8e74744.jpg,crack,0.8736,514,465,617,523,0.005697,1024,1024 +2026-02-21T11:17:14.675956,cb8b1801ed8f421e9865567aa8e74744.jpg,finger,0.8226,202,752,225,860,0.002369,1024,1024 +2026-02-21T11:17:14.675956,cb8b1801ed8f421e9865567aa8e74744.jpg,finger,0.8115,746,476,766,597,0.002308,1024,1024 +2026-02-21T11:17:14.675956,cb8b1801ed8f421e9865567aa8e74744.jpg,finger,0.802,299,530,318,607,0.001395,1024,1024 +2026-02-21T11:17:14.675956,cb8b1801ed8f421e9865567aa8e74744.jpg,finger,0.8012,242,703,262,817,0.002174,1024,1024 +2026-03-24T22:47:46.526705,277cc81c801f4672b9e78d16dd996051.jpeg,crack,0.8482,43,253,207,368,0.017986,1024,1024 +2026-03-24T23:47:10.502385,69f9b58eab6f4a318db9fc019d07f8a8.jpeg,finger,0.8101,604,233,626,380,0.003084,1024,1024 +2026-03-24T23:47:10.502385,69f9b58eab6f4a318db9fc019d07f8a8.jpeg,finger,0.7668,463,430,484,616,0.003725,1024,1024 +2026-03-24T23:50:23.253881,a6672fc74dd44e82810a9858b48c48bc.jpeg,finger,0.8101,604,233,626,380,0.003084,1024,1024 +2026-03-24T23:50:23.253881,a6672fc74dd44e82810a9858b48c48bc.jpeg,finger,0.7668,463,430,484,616,0.003725,1024,1024 +2026-03-24T23:53:11.578422,52e60bb002a54dea8c99471dc7dddb2b.jpeg,none,0,0,0,0,0,0,640,640 +2026-03-25T08:44:31.392345,6a10757313614b40ba0dc05eb5dbe90b.jpeg,finger,0.8101,604,233,626,380,0.003084,1024,1024 +2026-03-25T08:44:31.392345,6a10757313614b40ba0dc05eb5dbe90b.jpeg,finger,0.7668,463,430,484,616,0.003725,1024,1024 +2026-03-25T09:04:37.761557,f3e67d5c201d4101a27cd57dfb3ddd83.jpeg,finger,0.8101,604,233,626,380,0.003084,1024,1024 +2026-03-25T09:04:37.761557,f3e67d5c201d4101a27cd57dfb3ddd83.jpeg,finger,0.7668,463,430,484,616,0.003725,1024,1024 +2026-03-25T09:12:56.595425,eeeb3dbfa97b456e8457be7c989af54c.jpeg,finger,0.8101,604,233,626,380,0.003084,1024,1024 +2026-03-25T09:12:56.595425,eeeb3dbfa97b456e8457be7c989af54c.jpeg,finger,0.7668,463,430,484,616,0.003725,1024,1024 diff --git a/find_bug.py b/find_bug.py new file mode 100644 index 0000000..fe16f62 --- /dev/null +++ b/find_bug.py @@ -0,0 +1,23 @@ +import re + +file_path = "e:/Projects/SolaritivityPlus/templates/partials/js.html" +with open(file_path, "r", encoding="utf-8") as f: + lines = f.readlines() + +in_template = False +start_line = -1 + +for i, line in enumerate(lines): + backticks = re.findall(r'`', line) + for _ in backticks: + if not in_template: + in_template = True + start_line = i + 1 + else: + in_template = False + start_line = -1 + +if in_template: + print(f"Unterminated template literal started at line {start_line}") +else: + print("All template literals seem terminated (inline check).") diff --git a/fyp_pycell/Scripts/Activate.ps1 b/fyp_pycell/Scripts/Activate.ps1 new file mode 100644 index 0000000..181c1ef --- /dev/null +++ b/fyp_pycell/Scripts/Activate.ps1 @@ -0,0 +1,405 @@ +<# +.Synopsis +Activate a Python virtual environment for the current PowerShell session. + +.Description +Pushes the python executable for a virtual environment to the front of the +$Env:PATH environment variable and sets the prompt to signify that you are +in a Python virtual environment. Makes use of the command line switches as +well as the `pyvenv.cfg` file values present in the virtual environment. + +.Parameter VenvDir +Path to the directory that contains the virtual environment to activate. The +default value for this is the parent of the directory that the Activate.ps1 +script is located within. + +.Parameter Prompt +The prompt prefix to display when this virtual environment is activated. By +default, this prompt is the name of the virtual environment folder (VenvDir) +surrounded by parentheses and followed by a single space (ie. '(.venv) '). + +.Example +Activate.ps1 +Activates the Python virtual environment that contains the Activate.ps1 script. + +.Example +Activate.ps1 -Verbose +Activates the Python virtual environment that contains the Activate.ps1 script, +and shows extra information about the activation as it executes. + +.Example +Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv +Activates the Python virtual environment located in the specified location. + +.Example +Activate.ps1 -Prompt "MyPython" +Activates the Python virtual environment that contains the Activate.ps1 script, +and prefixes the current prompt with the specified string (surrounded in +parentheses) while the virtual environment is active. + +.Notes +On Windows, it may be required to enable this Activate.ps1 script by setting the +execution policy for the user. You can do this by issuing the following PowerShell +command: + +PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser + +For more information on Execution Policies: +https://go.microsoft.com/fwlink/?LinkID=135170 + +#> +Param( + [Parameter(Mandatory = $false)] + [String] + $VenvDir, + [Parameter(Mandatory = $false)] + [String] + $Prompt +) + +<# Function declarations --------------------------------------------------- #> + +<# +.Synopsis +Remove all shell session elements added by the Activate script, including the +addition of the virtual environment's Python executable from the beginning of +the PATH variable. + +.Parameter NonDestructive +If present, do not remove this function from the global namespace for the +session. + +#> +function global:deactivate ([switch]$NonDestructive) { + # Revert to original values + + # The prior prompt: + if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) { + Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt + Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT + } + + # The prior PYTHONHOME: + if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) { + Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME + Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME + } + + # The prior PATH: + if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) { + Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH + Remove-Item -Path Env:_OLD_VIRTUAL_PATH + } + + # Just remove the VIRTUAL_ENV altogether: + if (Test-Path -Path Env:VIRTUAL_ENV) { + Remove-Item -Path env:VIRTUAL_ENV + } + + # Just remove VIRTUAL_ENV_PROMPT altogether. + if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) { + Remove-Item -Path env:VIRTUAL_ENV_PROMPT + } + + # Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether: + if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) { + Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force + } + + # Leave deactivate function in the global namespace if requested: + if (-not $NonDestructive) { + Remove-Item -Path function:deactivate + } +} + +<# +.Description +Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the +given folder, and returns them in a map. + +For each line in the pyvenv.cfg file, if that line can be parsed into exactly +two strings separated by `=` (with any amount of whitespace surrounding the =) +then it is considered a `key = value` line. The left hand string is the key, +the right hand is the value. + +If the value starts with a `'` or a `"` then the first and last character is +stripped from the value before being captured. + +.Parameter ConfigDir +Path to the directory that contains the `pyvenv.cfg` file. +#> +function Get-PyVenvConfig( + [String] + $ConfigDir +) { + Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg" + + # Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue). + $pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue + + # An empty map will be returned if no config file is found. + $pyvenvConfig = @{ } + + if ($pyvenvConfigPath) { + + Write-Verbose "File exists, parse `key = value` lines" + $pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath + + $pyvenvConfigContent | ForEach-Object { + $keyval = $PSItem -split "\s*=\s*", 2 + if ($keyval[0] -and $keyval[1]) { + $val = $keyval[1] + + # Remove extraneous quotations around a string value. + if ("'""".Contains($val.Substring(0, 1))) { + $val = $val.Substring(1, $val.Length - 2) + } + + $pyvenvConfig[$keyval[0]] = $val + Write-Verbose "Adding Key: '$($keyval[0])'='$val'" + } + } + } + return $pyvenvConfig +} + + +<# Begin Activate script --------------------------------------------------- #> + +# Determine the containing directory of this script +$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition +$VenvExecDir = Get-Item -Path $VenvExecPath + +Write-Verbose "Activation script is located in path: '$VenvExecPath'" +Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)" +Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)" + +# Set values required in priority: CmdLine, ConfigFile, Default +# First, get the location of the virtual environment, it might not be +# VenvExecDir if specified on the command line. +if ($VenvDir) { + Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values" +} +else { + Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir." + $VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/") + Write-Verbose "VenvDir=$VenvDir" +} + +# Next, read the `pyvenv.cfg` file to determine any required value such +# as `prompt`. +$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir + +# Next, set the prompt from the command line, or the config file, or +# just use the name of the virtual environment folder. +if ($Prompt) { + Write-Verbose "Prompt specified as argument, using '$Prompt'" +} +else { + Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value" + if ($pyvenvCfg -and $pyvenvCfg['prompt']) { + Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'" + $Prompt = $pyvenvCfg['prompt']; + } + else { + Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virutal environment)" + Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'" + $Prompt = Split-Path -Path $venvDir -Leaf + } +} + +Write-Verbose "Prompt = '$Prompt'" +Write-Verbose "VenvDir='$VenvDir'" + +# Deactivate any currently active virtual environment, but leave the +# deactivate function in place. +deactivate -nondestructive + +# Now set the environment variable VIRTUAL_ENV, used by many tools to determine +# that there is an activated venv. +$env:VIRTUAL_ENV = $VenvDir + +if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) { + + Write-Verbose "Setting prompt to '$Prompt'" + + # Set the prompt to include the env name + # Make sure _OLD_VIRTUAL_PROMPT is global + function global:_OLD_VIRTUAL_PROMPT { "" } + Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT + New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt + + function global:prompt { + Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) " + _OLD_VIRTUAL_PROMPT + } + $env:VIRTUAL_ENV_PROMPT = $Prompt +} + +# Clear PYTHONHOME +if (Test-Path -Path Env:PYTHONHOME) { + Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME + Remove-Item -Path Env:PYTHONHOME +} + +# Add the venv to the PATH +Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH +$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH" + +# SIG # Begin signature block +# MIIc+QYJKoZIhvcNAQcCoIIc6jCCHOYCAQExDzANBglghkgBZQMEAgEFADB5Bgor +# BgEEAYI3AgEEoGswaTA0BgorBgEEAYI3AgEeMCYCAwEAAAQQH8w7YFlLCE63JNLG +# KX7zUQIBAAIBAAIBAAIBAAIBADAxMA0GCWCGSAFlAwQCAQUABCB/jbdIOBl7aFn0 +# IOwX0LZ7IuNFjwXgmb5mWup4AsyxRaCCC38wggUwMIIEGKADAgECAhAECRgbX9W7 +# ZnVTQ7VvlVAIMA0GCSqGSIb3DQEBCwUAMGUxCzAJBgNVBAYTAlVTMRUwEwYDVQQK +# EwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20xJDAiBgNV +# BAMTG0RpZ2lDZXJ0IEFzc3VyZWQgSUQgUm9vdCBDQTAeFw0xMzEwMjIxMjAwMDBa +# Fw0yODEwMjIxMjAwMDBaMHIxCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2Vy +# dCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20xMTAvBgNVBAMTKERpZ2lD +# ZXJ0IFNIQTIgQXNzdXJlZCBJRCBDb2RlIFNpZ25pbmcgQ0EwggEiMA0GCSqGSIb3 +# DQEBAQUAA4IBDwAwggEKAoIBAQD407Mcfw4Rr2d3B9MLMUkZz9D7RZmxOttE9X/l +# qJ3bMtdx6nadBS63j/qSQ8Cl+YnUNxnXtqrwnIal2CWsDnkoOn7p0WfTxvspJ8fT +# eyOU5JEjlpB3gvmhhCNmElQzUHSxKCa7JGnCwlLyFGeKiUXULaGj6YgsIJWuHEqH +# CN8M9eJNYBi+qsSyrnAxZjNxPqxwoqvOf+l8y5Kh5TsxHM/q8grkV7tKtel05iv+ +# bMt+dDk2DZDv5LVOpKnqagqrhPOsZ061xPeM0SAlI+sIZD5SlsHyDxL0xY4PwaLo +# LFH3c7y9hbFig3NBggfkOItqcyDQD2RzPJ6fpjOp/RnfJZPRAgMBAAGjggHNMIIB +# yTASBgNVHRMBAf8ECDAGAQH/AgEAMA4GA1UdDwEB/wQEAwIBhjATBgNVHSUEDDAK +# BggrBgEFBQcDAzB5BggrBgEFBQcBAQRtMGswJAYIKwYBBQUHMAGGGGh0dHA6Ly9v +# Y3NwLmRpZ2ljZXJ0LmNvbTBDBggrBgEFBQcwAoY3aHR0cDovL2NhY2VydHMuZGln +# aWNlcnQuY29tL0RpZ2lDZXJ0QXNzdXJlZElEUm9vdENBLmNydDCBgQYDVR0fBHow +# eDA6oDigNoY0aHR0cDovL2NybDQuZGlnaWNlcnQuY29tL0RpZ2lDZXJ0QXNzdXJl +# ZElEUm9vdENBLmNybDA6oDigNoY0aHR0cDovL2NybDMuZGlnaWNlcnQuY29tL0Rp +# Z2lDZXJ0QXNzdXJlZElEUm9vdENBLmNybDBPBgNVHSAESDBGMDgGCmCGSAGG/WwA +# AgQwKjAoBggrBgEFBQcCARYcaHR0cHM6Ly93d3cuZGlnaWNlcnQuY29tL0NQUzAK +# BghghkgBhv1sAzAdBgNVHQ4EFgQUWsS5eyoKo6XqcQPAYPkt9mV1DlgwHwYDVR0j +# BBgwFoAUReuir/SSy4IxLVGLp6chnfNtyA8wDQYJKoZIhvcNAQELBQADggEBAD7s +# DVoks/Mi0RXILHwlKXaoHV0cLToaxO8wYdd+C2D9wz0PxK+L/e8q3yBVN7Dh9tGS +# dQ9RtG6ljlriXiSBThCk7j9xjmMOE0ut119EefM2FAaK95xGTlz/kLEbBw6RFfu6 +# r7VRwo0kriTGxycqoSkoGjpxKAI8LpGjwCUR4pwUR6F6aGivm6dcIFzZcbEMj7uo +# +MUSaJ/PQMtARKUT8OZkDCUIQjKyNookAv4vcn4c10lFluhZHen6dGRrsutmQ9qz +# sIzV6Q3d9gEgzpkxYz0IGhizgZtPxpMQBvwHgfqL2vmCSfdibqFT+hKUGIUukpHq +# aGxEMrJmoecYpJpkUe8wggZHMIIFL6ADAgECAhADPtXtoGXRuMkd/PkqbJvYMA0G +# CSqGSIb3DQEBCwUAMHIxCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJ +# bmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20xMTAvBgNVBAMTKERpZ2lDZXJ0 +# IFNIQTIgQXNzdXJlZCBJRCBDb2RlIFNpZ25pbmcgQ0EwHhcNMTgxMjE4MDAwMDAw +# WhcNMjExMjIyMTIwMDAwWjCBgzELMAkGA1UEBhMCVVMxFjAUBgNVBAgTDU5ldyBI +# YW1wc2hpcmUxEjAQBgNVBAcTCVdvbGZlYm9ybzEjMCEGA1UEChMaUHl0aG9uIFNv +# ZnR3YXJlIEZvdW5kYXRpb24xIzAhBgNVBAMTGlB5dGhvbiBTb2Z0d2FyZSBGb3Vu +# ZGF0aW9uMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAqr2kS7J1uW7o +# JRxlsdrETAjKarfoH5TI8PWST6Yb2xPooP7vHT4iaVXyL5Lze1f53Jw67Sp+u524 +# fJXf30qHViEWxumy2RWG0nciU2d+mMqzjlaAWSZNF0u4RcvyDJokEV0RUOqI5CG5 +# zPI3W9uQ6LiUk3HCYW6kpH177A5T3pw/Po8O8KErJGn1anaqtIICq99ySxrMad/2 +# hPMBRf6Ndah7f7HPn1gkSSTAoejyuqF5h+B0qI4+JK5+VLvz659VTbAWJsYakkxZ +# xVWYpFv4KeQSSwoo0DzMvmERsTzNvVBMWhu9OriJNg+QfFmf96zVTu93cZ+r7xMp +# bXyfIOGKhHMaRuZ8ihuWIx3gI9WHDFX6fBKR8+HlhdkaiBEWIsXRoy+EQUyK7zUs +# +FqOo2sRYttbs8MTF9YDKFZwyPjn9Wn+gLGd5NUEVyNvD9QVGBEtN7vx87bduJUB +# 8F4DylEsMtZTfjw/au6AmOnmneK5UcqSJuwRyZaGNk7y3qj06utx+HTTqHgi975U +# pxfyrwAqkovoZEWBVSpvku8PVhkBXcLmNe6MEHlFiaMoiADAeKmX5RFRkN+VrmYG +# Tg4zajxfdHeIY8TvLf48tTfmnQJd98geJQv/01NUy/FxuwqAuTkaez5Nl1LxP0Cp +# THhghzO4FRD4itT2wqTh4jpojw9QZnsCAwEAAaOCAcUwggHBMB8GA1UdIwQYMBaA +# FFrEuXsqCqOl6nEDwGD5LfZldQ5YMB0GA1UdDgQWBBT8Kr9+1L6s84KcpM97IgE7 +# uI8H8jAOBgNVHQ8BAf8EBAMCB4AwEwYDVR0lBAwwCgYIKwYBBQUHAwMwdwYDVR0f +# BHAwbjA1oDOgMYYvaHR0cDovL2NybDMuZGlnaWNlcnQuY29tL3NoYTItYXNzdXJl +# ZC1jcy1nMS5jcmwwNaAzoDGGL2h0dHA6Ly9jcmw0LmRpZ2ljZXJ0LmNvbS9zaGEy +# LWFzc3VyZWQtY3MtZzEuY3JsMEwGA1UdIARFMEMwNwYJYIZIAYb9bAMBMCowKAYI +# KwYBBQUHAgEWHGh0dHBzOi8vd3d3LmRpZ2ljZXJ0LmNvbS9DUFMwCAYGZ4EMAQQB +# MIGEBggrBgEFBQcBAQR4MHYwJAYIKwYBBQUHMAGGGGh0dHA6Ly9vY3NwLmRpZ2lj +# ZXJ0LmNvbTBOBggrBgEFBQcwAoZCaHR0cDovL2NhY2VydHMuZGlnaWNlcnQuY29t +# L0RpZ2lDZXJ0U0hBMkFzc3VyZWRJRENvZGVTaWduaW5nQ0EuY3J0MAwGA1UdEwEB +# /wQCMAAwDQYJKoZIhvcNAQELBQADggEBAEt1oS21X0axiafPjyY+vlYqjWKuUu/Y +# FuYWIEq6iRRaFabNDhj9RBFQF/aJiE5msrQEOfAD6/6gVSH91lZWBqg6NEeG9T9S +# XbiAPvJ9CEWFsdkXUrjbWhvCnuZ7kqUuU5BAumI1QRbpYgZL3UA+iZXkmjbGh1ln +# 8rUhWIxbBYL4Sg2nqpB44p7CUFYkPj/MbwU2gvBV2pXjj5WaskoZtsACMv5g42BN +# oVLoRAi+ev6s07POt+JtHRIm87lTyuc8wh0swTPUwksKbLU1Zdj9CpqtzXnuVE0w +# 50exJvRSK3Vt4g+0vigpI3qPmDdpkf9+4Mvy0XMNcqrthw20R+PkIlMxghDQMIIQ +# zAIBATCBhjByMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkw +# FwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMTEwLwYDVQQDEyhEaWdpQ2VydCBTSEEy +# IEFzc3VyZWQgSUQgQ29kZSBTaWduaW5nIENBAhADPtXtoGXRuMkd/PkqbJvYMA0G +# CWCGSAFlAwQCAQUAoIGaMBkGCSqGSIb3DQEJAzEMBgorBgEEAYI3AgEEMBwGCisG +# AQQBgjcCAQsxDjAMBgorBgEEAYI3AgEVMC4GCisGAQQBgjcCAQwxIDAeoByAGgBQ +# AHkAdABoAG8AbgAgADMALgAxADAALgAwMC8GCSqGSIb3DQEJBDEiBCBSbvNxuLmL +# Fyf70+vzWnE86JSS2JLTJbh9WhRqgW9MeDANBgkqhkiG9w0BAQEFAASCAgCAiz/E +# icJSsvY4x2vZnY3UjThg0S9qs+r104rhPxj39k4Qw7szI4yjQQbb2bvnoJA3LoFo +# RROlFg9dXJ8YWRZRNkla+zHX7bIsWO2aIFCnOCOUFH19ttlrTvnal6uLp7P9+wQs +# rller1aRyNIM47dYn+CGxD4NEJ/NIqhCjuRKl0v1Dkps/2md0YkoEcnRXNb3vq6x +# +2L9x3zAcBmVwVM4SFFZ2ZZQG/aHgihbVoNxxTXhYDcwaL5uRrNelz9KXDn+GYpk +# K7ZUFbmNfgnhieLHqF0hk7wLZtHI1BSmsOAFrkApcuIVLzD8aSgDbAMVZEv3GkTx +# Img7jKusLIITGuUWT8wO1LDUXT54qBkQhue6kJ3rqSa2agtg/OWxtQ9JwGSOiRaW +# wlRJjsihrw8Nx1Kcr5EwruBBLFiF+mv/C5ikLvwES1ZKoLccqCftuEptcbmsyEZS +# ov39SslaIWvqfy7rfz+KFfP9WHJxobV6DY4essDCMNcoYXkRwhbT+rr0ydDH23DS +# 3hbXpCuKsy5IAMB7Xk8/uuXV2The/qKmkkmu0KuFOu2/3oqVOC4a27IjkvBCSRhp +# /yWQSM/JQk+KwQ31XCVHeGWf7kqMgCXwkZfkw/lvusXzMuWZqT6bfZ0eGjqX/6jC +# kNwr4fCZtxx0cFLzmCr6/yClCYoDCfGoc1I+D6GCDX0wgg15BgorBgEEAYI3AwMB +# MYINaTCCDWUGCSqGSIb3DQEHAqCCDVYwgg1SAgEDMQ8wDQYJYIZIAWUDBAIBBQAw +# dwYLKoZIhvcNAQkQAQSgaARmMGQCAQEGCWCGSAGG/WwHATAxMA0GCWCGSAFlAwQC +# AQUABCBEd30afcCyVMH4hw1ZZPb4JotijhQZtXQ42klvgjTVGwIQDDJTIO6lXwNY +# 7qTonYA8LxgPMjAyMTEwMDQxOTExMzFaoIIKNzCCBP4wggPmoAMCAQICEA1CSuC+ +# Ooj/YEAhzhQA8N0wDQYJKoZIhvcNAQELBQAwcjELMAkGA1UEBhMCVVMxFTATBgNV +# BAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3LmRpZ2ljZXJ0LmNvbTExMC8G +# A1UEAxMoRGlnaUNlcnQgU0hBMiBBc3N1cmVkIElEIFRpbWVzdGFtcGluZyBDQTAe +# Fw0yMTAxMDEwMDAwMDBaFw0zMTAxMDYwMDAwMDBaMEgxCzAJBgNVBAYTAlVTMRcw +# FQYDVQQKEw5EaWdpQ2VydCwgSW5jLjEgMB4GA1UEAxMXRGlnaUNlcnQgVGltZXN0 +# YW1wIDIwMjEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDC5mGEZ8WK +# 9Q0IpEXKY2tR1zoRQr0KdXVNlLQMULUmEP4dyG+RawyW5xpcSO9E5b+bYc0VkWJa +# uP9nC5xj/TZqgfop+N0rcIXeAhjzeG28ffnHbQk9vmp2h+mKvfiEXR52yeTGdnY6 +# U9HR01o2j8aj4S8bOrdh1nPsTm0zinxdRS1LsVDmQTo3VobckyON91Al6GTm3dOP +# L1e1hyDrDo4s1SPa9E14RuMDgzEpSlwMMYpKjIjF9zBa+RSvFV9sQ0kJ/SYjU/aN +# Y+gaq1uxHTDCm2mCtNv8VlS8H6GHq756WwogL0sJyZWnjbL61mOLTqVyHO6fegFz +# +BnW/g1JhL0BAgMBAAGjggG4MIIBtDAOBgNVHQ8BAf8EBAMCB4AwDAYDVR0TAQH/ +# BAIwADAWBgNVHSUBAf8EDDAKBggrBgEFBQcDCDBBBgNVHSAEOjA4MDYGCWCGSAGG +# /WwHATApMCcGCCsGAQUFBwIBFhtodHRwOi8vd3d3LmRpZ2ljZXJ0LmNvbS9DUFMw +# HwYDVR0jBBgwFoAU9LbhIB3+Ka7S5GGlsqIlssgXNW4wHQYDVR0OBBYEFDZEho6k +# urBmvrwoLR1ENt3janq8MHEGA1UdHwRqMGgwMqAwoC6GLGh0dHA6Ly9jcmwzLmRp +# Z2ljZXJ0LmNvbS9zaGEyLWFzc3VyZWQtdHMuY3JsMDKgMKAuhixodHRwOi8vY3Js +# NC5kaWdpY2VydC5jb20vc2hhMi1hc3N1cmVkLXRzLmNybDCBhQYIKwYBBQUHAQEE +# eTB3MCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5kaWdpY2VydC5jb20wTwYIKwYB +# BQUHMAKGQ2h0dHA6Ly9jYWNlcnRzLmRpZ2ljZXJ0LmNvbS9EaWdpQ2VydFNIQTJB +# c3N1cmVkSURUaW1lc3RhbXBpbmdDQS5jcnQwDQYJKoZIhvcNAQELBQADggEBAEgc +# 3LXpmiO85xrnIA6OZ0b9QnJRdAojR6OrktIlxHBZvhSg5SeBpU0UFRkHefDRBMOG +# 2Tu9/kQCZk3taaQP9rhwz2Lo9VFKeHk2eie38+dSn5On7UOee+e03UEiifuHokYD +# Tvz0/rdkd2NfI1Jpg4L6GlPtkMyNoRdzDfTzZTlwS/Oc1np72gy8PTLQG8v1Yfx1 +# CAB2vIEO+MDhXM/EEXLnG2RJ2CKadRVC9S0yOIHa9GCiurRS+1zgYSQlT7LfySmo +# c0NR2r1j1h9bm/cuG08THfdKDXF+l7f0P4TrweOjSaH6zqe/Vs+6WXZhiV9+p7SO +# Z3j5NpjhyyjaW4emii8wggUxMIIEGaADAgECAhAKoSXW1jIbfkHkBdo2l8IVMA0G +# CSqGSIb3DQEBCwUAMGUxCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJ +# bmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20xJDAiBgNVBAMTG0RpZ2lDZXJ0 +# IEFzc3VyZWQgSUQgUm9vdCBDQTAeFw0xNjAxMDcxMjAwMDBaFw0zMTAxMDcxMjAw +# MDBaMHIxCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNV +# BAsTEHd3dy5kaWdpY2VydC5jb20xMTAvBgNVBAMTKERpZ2lDZXJ0IFNIQTIgQXNz +# dXJlZCBJRCBUaW1lc3RhbXBpbmcgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +# ggEKAoIBAQC90DLuS82Pf92puoKZxTlUKFe2I0rEDgdFM1EQfdD5fU1ofue2oPSN +# s4jkl79jIZCYvxO8V9PD4X4I1moUADj3Lh477sym9jJZ/l9lP+Cb6+NGRwYaVX4L +# J37AovWg4N4iPw7/fpX786O6Ij4YrBHk8JkDbTuFfAnT7l3ImgtU46gJcWvgzyIQ +# D3XPcXJOCq3fQDpct1HhoXkUxk0kIzBdvOw8YGqsLwfM/fDqR9mIUF79Zm5WYScp +# iYRR5oLnRlD9lCosp+R1PrqYD4R/nzEU1q3V8mTLex4F0IQZchfxFwbvPc3WTe8G +# Qv2iUypPhR3EHTyvz9qsEPXdrKzpVv+TAgMBAAGjggHOMIIByjAdBgNVHQ4EFgQU +# 9LbhIB3+Ka7S5GGlsqIlssgXNW4wHwYDVR0jBBgwFoAUReuir/SSy4IxLVGLp6ch +# nfNtyA8wEgYDVR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwEwYDVR0l +# BAwwCgYIKwYBBQUHAwgweQYIKwYBBQUHAQEEbTBrMCQGCCsGAQUFBzABhhhodHRw +# Oi8vb2NzcC5kaWdpY2VydC5jb20wQwYIKwYBBQUHMAKGN2h0dHA6Ly9jYWNlcnRz +# LmRpZ2ljZXJ0LmNvbS9EaWdpQ2VydEFzc3VyZWRJRFJvb3RDQS5jcnQwgYEGA1Ud +# HwR6MHgwOqA4oDaGNGh0dHA6Ly9jcmw0LmRpZ2ljZXJ0LmNvbS9EaWdpQ2VydEFz +# c3VyZWRJRFJvb3RDQS5jcmwwOqA4oDaGNGh0dHA6Ly9jcmwzLmRpZ2ljZXJ0LmNv +# bS9EaWdpQ2VydEFzc3VyZWRJRFJvb3RDQS5jcmwwUAYDVR0gBEkwRzA4BgpghkgB +# hv1sAAIEMCowKAYIKwYBBQUHAgEWHGh0dHBzOi8vd3d3LmRpZ2ljZXJ0LmNvbS9D +# UFMwCwYJYIZIAYb9bAcBMA0GCSqGSIb3DQEBCwUAA4IBAQBxlRLpUYdWac3v3dp8 +# qmN6s3jPBjdAhO9LhL/KzwMC/cWnww4gQiyvd/MrHwwhWiq3BTQdaq6Z+CeiZr8J +# qmDfdqQ6kw/4stHYfBli6F6CJR7Euhx7LCHi1lssFDVDBGiy23UC4HLHmNY8ZOUf +# SBAYX4k4YU1iRiSHY4yRUiyvKYnleB/WCxSlgNcSR3CzddWThZN+tpJn+1Nhiaj1 +# a5bA9FhpDXzIAbG5KHW3mWOFIoxhynmUfln8jA/jb7UBJrZspe6HUSHkWGCbugwt +# K22ixH67xCUrRwIIfEmuE7bhfEJCKMYYVs9BNLZmXbZ0e/VWMyIvIjayS6JKldj1 +# po5SMYIChjCCAoICAQEwgYYwcjELMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lD +# ZXJ0IEluYzEZMBcGA1UECxMQd3d3LmRpZ2ljZXJ0LmNvbTExMC8GA1UEAxMoRGln +# aUNlcnQgU0hBMiBBc3N1cmVkIElEIFRpbWVzdGFtcGluZyBDQQIQDUJK4L46iP9g +# QCHOFADw3TANBglghkgBZQMEAgEFAKCB0TAaBgkqhkiG9w0BCQMxDQYLKoZIhvcN +# AQkQAQQwHAYJKoZIhvcNAQkFMQ8XDTIxMTAwNDE5MTEzMVowKwYLKoZIhvcNAQkQ +# AgwxHDAaMBgwFgQU4deCqOGRvu9ryhaRtaq0lKYkm/MwLwYJKoZIhvcNAQkEMSIE +# ILvICiD0Bu7OdD0pC2wAqLO9UGMzUOfGejuSENvWkuXdMDcGCyqGSIb3DQEJEAIv +# MSgwJjAkMCIEILMQkAa8CtmDB5FXKeBEA0Fcg+MpK2FPJpZMjTVx7PWpMA0GCSqG +# SIb3DQEBAQUABIIBAIaCoJWKRd6cHB/KrrlVmBY469068xG7ok+T18bfcLmNrvPF +# 7PGY5a4qcMZj+rBevyfWTrOreNAWyNhnxIT0qYneSTJOMytTPYnJI+GhvGwQjDhC +# Eg/JeLOe9guMq7P/ZNvFur+VoCz6sgR/Q+9IGUhJ/7liABdMwNLK38r5VEaSAnSW +# RetjuSqtMoZc2KtjL/MUY26sUwjsMD0tgt0EOF4nrcv3rWl++TsJUEqYr+aFpNu4 +# eVaTNeS0V7sRGQbWAQohkES879Lpqv7KaEW+h426+cc5el260gynz7vTzUuaamvW +# Nfbvu83P5Tk1nRA1Ds2aSqn/RMu6cNNjD8ntV5o= +# SIG # End signature block diff --git a/fyp_pycell/Scripts/activate b/fyp_pycell/Scripts/activate new file mode 100644 index 0000000..de3bf09 --- /dev/null +++ b/fyp_pycell/Scripts/activate @@ -0,0 +1,69 @@ +# This file must be used with "source bin/activate" *from bash* +# you cannot run it directly + +deactivate () { + # reset old environment variables + if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then + PATH="${_OLD_VIRTUAL_PATH:-}" + export PATH + unset _OLD_VIRTUAL_PATH + fi + if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then + PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}" + export PYTHONHOME + unset _OLD_VIRTUAL_PYTHONHOME + fi + + # This should detect bash and zsh, which have a hash command that must + # be called to get it to forget past commands. Without forgetting + # past commands the $PATH changes we made may not be respected + if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then + hash -r 2> /dev/null + fi + + if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then + PS1="${_OLD_VIRTUAL_PS1:-}" + export PS1 + unset _OLD_VIRTUAL_PS1 + fi + + unset VIRTUAL_ENV + unset VIRTUAL_ENV_PROMPT + if [ ! "${1:-}" = "nondestructive" ] ; then + # Self destruct! + unset -f deactivate + fi +} + +# unset irrelevant variables +deactivate nondestructive + +VIRTUAL_ENV="E:\Projects\SolaritivityPlus\fyp_pycell" +export VIRTUAL_ENV + +_OLD_VIRTUAL_PATH="$PATH" +PATH="$VIRTUAL_ENV/Scripts:$PATH" +export PATH + +# unset PYTHONHOME if set +# this will fail if PYTHONHOME is set to the empty string (which is bad anyway) +# could use `if (set -u; : $PYTHONHOME) ;` in bash +if [ -n "${PYTHONHOME:-}" ] ; then + _OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}" + unset PYTHONHOME +fi + +if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then + _OLD_VIRTUAL_PS1="${PS1:-}" + PS1="(fyp_pycell) ${PS1:-}" + export PS1 + VIRTUAL_ENV_PROMPT="(fyp_pycell) " + export VIRTUAL_ENV_PROMPT +fi + +# This should detect bash and zsh, which have a hash command that must +# be called to get it to forget past commands. Without forgetting +# past commands the $PATH changes we made may not be respected +if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then + hash -r 2> /dev/null +fi diff --git a/fyp_pycell/Scripts/activate.bat b/fyp_pycell/Scripts/activate.bat new file mode 100644 index 0000000..85ecad8 --- /dev/null +++ b/fyp_pycell/Scripts/activate.bat @@ -0,0 +1,34 @@ +@echo off + +rem This file is UTF-8 encoded, so we need to update the current code page while executing it +for /f "tokens=2 delims=:." %%a in ('"%SystemRoot%\System32\chcp.com"') do ( + set _OLD_CODEPAGE=%%a +) +if defined _OLD_CODEPAGE ( + "%SystemRoot%\System32\chcp.com" 65001 > nul +) + +set VIRTUAL_ENV=E:\Projects\SolaritivityPlus\fyp_pycell + +if not defined PROMPT set PROMPT=$P$G + +if defined _OLD_VIRTUAL_PROMPT set PROMPT=%_OLD_VIRTUAL_PROMPT% +if defined _OLD_VIRTUAL_PYTHONHOME set PYTHONHOME=%_OLD_VIRTUAL_PYTHONHOME% + +set _OLD_VIRTUAL_PROMPT=%PROMPT% +set PROMPT=(fyp_pycell) %PROMPT% + +if defined PYTHONHOME set _OLD_VIRTUAL_PYTHONHOME=%PYTHONHOME% +set PYTHONHOME= + +if defined _OLD_VIRTUAL_PATH set PATH=%_OLD_VIRTUAL_PATH% +if not defined _OLD_VIRTUAL_PATH set _OLD_VIRTUAL_PATH=%PATH% + +set PATH=%VIRTUAL_ENV%\Scripts;%PATH% +set VIRTUAL_ENV_PROMPT=(fyp_pycell) + +:END +if defined _OLD_CODEPAGE ( + "%SystemRoot%\System32\chcp.com" %_OLD_CODEPAGE% > nul + set _OLD_CODEPAGE= +) diff --git a/fyp_pycell/Scripts/chroma.exe b/fyp_pycell/Scripts/chroma.exe new file mode 100644 index 0000000..a92a636 Binary files /dev/null and b/fyp_pycell/Scripts/chroma.exe differ diff --git a/fyp_pycell/Scripts/coloredlogs.exe b/fyp_pycell/Scripts/coloredlogs.exe new file mode 100644 index 0000000..9224e63 Binary files /dev/null and b/fyp_pycell/Scripts/coloredlogs.exe differ diff --git a/fyp_pycell/Scripts/convert-caffe2-to-onnx.exe b/fyp_pycell/Scripts/convert-caffe2-to-onnx.exe new file mode 100644 index 0000000..a0a4a55 Binary files /dev/null and b/fyp_pycell/Scripts/convert-caffe2-to-onnx.exe differ diff --git a/fyp_pycell/Scripts/convert-onnx-to-caffe2.exe b/fyp_pycell/Scripts/convert-onnx-to-caffe2.exe new file mode 100644 index 0000000..10c3584 Binary files /dev/null and b/fyp_pycell/Scripts/convert-onnx-to-caffe2.exe differ diff --git a/fyp_pycell/Scripts/cpuinfo.exe b/fyp_pycell/Scripts/cpuinfo.exe new file mode 100644 index 0000000..ece55e0 Binary files /dev/null and b/fyp_pycell/Scripts/cpuinfo.exe differ diff --git a/fyp_pycell/Scripts/deactivate.bat b/fyp_pycell/Scripts/deactivate.bat new file mode 100644 index 0000000..62a39a7 --- /dev/null +++ b/fyp_pycell/Scripts/deactivate.bat @@ -0,0 +1,22 @@ +@echo off + +if defined _OLD_VIRTUAL_PROMPT ( + set "PROMPT=%_OLD_VIRTUAL_PROMPT%" +) +set _OLD_VIRTUAL_PROMPT= + +if defined _OLD_VIRTUAL_PYTHONHOME ( + set "PYTHONHOME=%_OLD_VIRTUAL_PYTHONHOME%" + set _OLD_VIRTUAL_PYTHONHOME= +) + +if defined _OLD_VIRTUAL_PATH ( + set "PATH=%_OLD_VIRTUAL_PATH%" +) + +set _OLD_VIRTUAL_PATH= + +set VIRTUAL_ENV= +set VIRTUAL_ENV_PROMPT= + +:END diff --git a/fyp_pycell/Scripts/distro.exe b/fyp_pycell/Scripts/distro.exe new file mode 100644 index 0000000..92ade3b Binary files /dev/null and b/fyp_pycell/Scripts/distro.exe differ diff --git a/fyp_pycell/Scripts/dotenv.exe b/fyp_pycell/Scripts/dotenv.exe new file mode 100644 index 0000000..ab042b6 Binary files /dev/null and b/fyp_pycell/Scripts/dotenv.exe differ diff --git a/fyp_pycell/Scripts/f2py.exe b/fyp_pycell/Scripts/f2py.exe new file mode 100644 index 0000000..623356e Binary files /dev/null and b/fyp_pycell/Scripts/f2py.exe differ diff --git a/fyp_pycell/Scripts/filetype.exe b/fyp_pycell/Scripts/filetype.exe new file mode 100644 index 0000000..5acf84a Binary files /dev/null and b/fyp_pycell/Scripts/filetype.exe differ diff --git a/fyp_pycell/Scripts/flask.exe b/fyp_pycell/Scripts/flask.exe new file mode 100644 index 0000000..a063948 Binary files /dev/null and b/fyp_pycell/Scripts/flask.exe differ diff --git a/fyp_pycell/Scripts/fonttools.exe b/fyp_pycell/Scripts/fonttools.exe new file mode 100644 index 0000000..b210bcb Binary files /dev/null and b/fyp_pycell/Scripts/fonttools.exe differ diff --git a/fyp_pycell/Scripts/hf.exe b/fyp_pycell/Scripts/hf.exe new file mode 100644 index 0000000..4c04e48 Binary files /dev/null and b/fyp_pycell/Scripts/hf.exe differ diff --git a/fyp_pycell/Scripts/httpx.exe b/fyp_pycell/Scripts/httpx.exe new file mode 100644 index 0000000..1e27b11 Binary files /dev/null and b/fyp_pycell/Scripts/httpx.exe differ diff --git a/fyp_pycell/Scripts/huggingface-cli.exe b/fyp_pycell/Scripts/huggingface-cli.exe new file mode 100644 index 0000000..20cb8e7 Binary files /dev/null and b/fyp_pycell/Scripts/huggingface-cli.exe differ diff --git a/fyp_pycell/Scripts/humanfriendly.exe b/fyp_pycell/Scripts/humanfriendly.exe new file mode 100644 index 0000000..4020641 Binary files /dev/null and b/fyp_pycell/Scripts/humanfriendly.exe differ diff --git a/fyp_pycell/Scripts/imageio_download_bin.exe b/fyp_pycell/Scripts/imageio_download_bin.exe new file mode 100644 index 0000000..c0eb03c Binary files /dev/null and b/fyp_pycell/Scripts/imageio_download_bin.exe differ diff --git a/fyp_pycell/Scripts/imageio_remove_bin.exe b/fyp_pycell/Scripts/imageio_remove_bin.exe new file mode 100644 index 0000000..c4fae4c Binary files /dev/null and b/fyp_pycell/Scripts/imageio_remove_bin.exe differ diff --git a/fyp_pycell/Scripts/import_pb_to_tensorboard.exe b/fyp_pycell/Scripts/import_pb_to_tensorboard.exe new file mode 100644 index 0000000..df6c98a Binary files /dev/null and b/fyp_pycell/Scripts/import_pb_to_tensorboard.exe differ diff --git a/fyp_pycell/Scripts/isympy.exe b/fyp_pycell/Scripts/isympy.exe new file mode 100644 index 0000000..35c1708 Binary files /dev/null and b/fyp_pycell/Scripts/isympy.exe differ diff --git a/fyp_pycell/Scripts/jsondiff b/fyp_pycell/Scripts/jsondiff new file mode 100644 index 0000000..b749257 --- /dev/null +++ b/fyp_pycell/Scripts/jsondiff @@ -0,0 +1,41 @@ +#!E:\Projects\SolaritivityPlus\fyp_pycell\Scripts\python.exe +# -*- coding: utf-8 -*- + +from __future__ import print_function + +import sys +import json +import jsonpatch +import argparse + + +parser = argparse.ArgumentParser(description='Diff two JSON files') +parser.add_argument('FILE1', type=argparse.FileType('r')) +parser.add_argument('FILE2', type=argparse.FileType('r')) +parser.add_argument('--indent', type=int, default=None, + help='Indent output by n spaces') +parser.add_argument('-u', '--preserve-unicode', action='store_true', + help='Output Unicode character as-is without using Code Point') +parser.add_argument('-v', '--version', action='version', + version='%(prog)s ' + jsonpatch.__version__) + + +def main(): + try: + diff_files() + except KeyboardInterrupt: + sys.exit(1) + + +def diff_files(): + """ Diffs two JSON files and prints a patch """ + args = parser.parse_args() + doc1 = json.load(args.FILE1) + doc2 = json.load(args.FILE2) + patch = jsonpatch.make_patch(doc1, doc2) + if patch.patch: + print(json.dumps(patch.patch, indent=args.indent, ensure_ascii=not(args.preserve_unicode))) + sys.exit(1) + +if __name__ == "__main__": + main() diff --git a/fyp_pycell/Scripts/jsonpatch b/fyp_pycell/Scripts/jsonpatch new file mode 100644 index 0000000..c7caead --- /dev/null +++ b/fyp_pycell/Scripts/jsonpatch @@ -0,0 +1,107 @@ +#!E:\Projects\SolaritivityPlus\fyp_pycell\Scripts\python.exe +# -*- coding: utf-8 -*- + +import sys +import os.path +import json +import jsonpatch +import tempfile +import argparse + + +parser = argparse.ArgumentParser( + description='Apply a JSON patch on a JSON file') +parser.add_argument('ORIGINAL', type=argparse.FileType('r'), + help='Original file') +parser.add_argument('PATCH', type=argparse.FileType('r'), + nargs='?', default=sys.stdin, + help='Patch file (read from stdin if omitted)') +parser.add_argument('--indent', type=int, default=None, + help='Indent output by n spaces') +parser.add_argument('-b', '--backup', action='store_true', + help='Back up ORIGINAL if modifying in-place') +parser.add_argument('-i', '--in-place', action='store_true', + help='Modify ORIGINAL in-place instead of to stdout') +parser.add_argument('-v', '--version', action='version', + version='%(prog)s ' + jsonpatch.__version__) +parser.add_argument('-u', '--preserve-unicode', action='store_true', + help='Output Unicode character as-is without using Code Point') + +def main(): + try: + patch_files() + except KeyboardInterrupt: + sys.exit(1) + + +def patch_files(): + """ Diffs two JSON files and prints a patch """ + args = parser.parse_args() + doc = json.load(args.ORIGINAL) + patch = json.load(args.PATCH) + result = jsonpatch.apply_patch(doc, patch) + + if args.in_place: + dirname = os.path.abspath(os.path.dirname(args.ORIGINAL.name)) + + try: + # Attempt to replace the file atomically. We do this by + # creating a temporary file in the same directory as the + # original file so we can atomically move the new file over + # the original later. (This is done in the same directory + # because atomic renames do not work across mount points.) + + fd, pathname = tempfile.mkstemp(dir=dirname) + fp = os.fdopen(fd, 'w') + atomic = True + + except OSError: + # We failed to create the temporary file for an atomic + # replace, so fall back to non-atomic mode by backing up + # the original (if desired) and writing a new file. + + if args.backup: + os.rename(args.ORIGINAL.name, args.ORIGINAL.name + '.orig') + fp = open(args.ORIGINAL.name, 'w') + atomic = False + + else: + # Since we're not replacing the original file in-place, write + # the modified JSON to stdout instead. + + fp = sys.stdout + + # By this point we have some sort of file object we can write the + # modified JSON to. + + json.dump(result, fp, indent=args.indent, ensure_ascii=not(args.preserve_unicode)) + fp.write('\n') + + if args.in_place: + # Close the new file. If we aren't replacing atomically, this + # is our last step, since everything else is already in place. + + fp.close() + + if atomic: + try: + # Complete the atomic replace by linking the original + # to a backup (if desired), fixing up the permissions + # on the temporary file, and moving it into place. + + if args.backup: + os.link(args.ORIGINAL.name, args.ORIGINAL.name + '.orig') + os.chmod(pathname, os.stat(args.ORIGINAL.name).st_mode) + os.rename(pathname, args.ORIGINAL.name) + + except OSError: + # In the event we could not actually do the atomic + # replace, unlink the original to move it out of the + # way and finally move the temporary file into place. + + os.unlink(args.ORIGINAL.name) + os.rename(pathname, args.ORIGINAL.name) + + +if __name__ == "__main__": + main() diff --git a/fyp_pycell/Scripts/jsonpointer b/fyp_pycell/Scripts/jsonpointer new file mode 100644 index 0000000..1072452 --- /dev/null +++ b/fyp_pycell/Scripts/jsonpointer @@ -0,0 +1,67 @@ +#!E:\Projects\SolaritivityPlus\fyp_pycell\Scripts\python.exe +# -*- coding: utf-8 -*- + + +import argparse +import json +import sys + +import jsonpointer + +parser = argparse.ArgumentParser( + description='Resolve a JSON pointer on JSON files') + +# Accept pointer as argument or as file +ptr_group = parser.add_mutually_exclusive_group(required=True) + +ptr_group.add_argument('-f', '--pointer-file', type=argparse.FileType('r'), + nargs='?', + help='File containing a JSON pointer expression') + +ptr_group.add_argument('POINTER', type=str, nargs='?', + help='A JSON pointer expression') + +parser.add_argument('FILE', type=argparse.FileType('r'), nargs='+', + help='Files for which the pointer should be resolved') +parser.add_argument('--indent', type=int, default=None, + help='Indent output by n spaces') +parser.add_argument('-v', '--version', action='version', + version='%(prog)s ' + jsonpointer.__version__) + + +def main(): + try: + resolve_files() + except KeyboardInterrupt: + sys.exit(1) + + +def parse_pointer(args): + if args.POINTER: + ptr = args.POINTER + elif args.pointer_file: + ptr = args.pointer_file.read().strip() + else: + parser.print_usage() + sys.exit(1) + + return ptr + + +def resolve_files(): + """ Resolve a JSON pointer on JSON files """ + args = parser.parse_args() + + ptr = parse_pointer(args) + + for f in args.FILE: + doc = json.load(f) + try: + result = jsonpointer.resolve_pointer(doc, ptr) + print(json.dumps(result, indent=args.indent)) + except jsonpointer.JsonPointerException as e: + print('Could not resolve pointer: %s' % str(e), file=sys.stderr) + + +if __name__ == "__main__": + main() diff --git a/fyp_pycell/Scripts/jsonschema.exe b/fyp_pycell/Scripts/jsonschema.exe new file mode 100644 index 0000000..786b489 Binary files /dev/null and b/fyp_pycell/Scripts/jsonschema.exe differ diff --git a/fyp_pycell/Scripts/lsm2bin.exe b/fyp_pycell/Scripts/lsm2bin.exe new file mode 100644 index 0000000..cbfd125 Binary files /dev/null and b/fyp_pycell/Scripts/lsm2bin.exe differ diff --git a/fyp_pycell/Scripts/markdown-it.exe b/fyp_pycell/Scripts/markdown-it.exe new file mode 100644 index 0000000..278c6ac Binary files /dev/null and b/fyp_pycell/Scripts/markdown-it.exe differ diff --git a/fyp_pycell/Scripts/markdown_py.exe b/fyp_pycell/Scripts/markdown_py.exe new file mode 100644 index 0000000..488f3dd Binary files /dev/null and b/fyp_pycell/Scripts/markdown_py.exe differ diff --git a/fyp_pycell/Scripts/normalizer.exe b/fyp_pycell/Scripts/normalizer.exe new file mode 100644 index 0000000..9459dbc Binary files /dev/null and b/fyp_pycell/Scripts/normalizer.exe differ diff --git a/fyp_pycell/Scripts/numba b/fyp_pycell/Scripts/numba new file mode 100644 index 0000000..51ac7f8 --- /dev/null +++ b/fyp_pycell/Scripts/numba @@ -0,0 +1,8 @@ +#!E:\Projects\SolaritivityPlus\fyp_pycell\Scripts\python.exe +# -*- coding: UTF-8 -*- +from __future__ import print_function, division, absolute_import + +from numba.misc.numba_entry import main + +if __name__ == "__main__": + main() diff --git a/fyp_pycell/Scripts/onnxruntime_test.exe b/fyp_pycell/Scripts/onnxruntime_test.exe new file mode 100644 index 0000000..57e672a Binary files /dev/null and b/fyp_pycell/Scripts/onnxruntime_test.exe differ diff --git a/fyp_pycell/Scripts/pip.exe b/fyp_pycell/Scripts/pip.exe new file mode 100644 index 0000000..7862edd Binary files /dev/null and b/fyp_pycell/Scripts/pip.exe differ diff --git a/fyp_pycell/Scripts/pip3.10.exe b/fyp_pycell/Scripts/pip3.10.exe new file mode 100644 index 0000000..7862edd Binary files /dev/null and b/fyp_pycell/Scripts/pip3.10.exe differ diff --git a/fyp_pycell/Scripts/pip3.exe b/fyp_pycell/Scripts/pip3.exe new file mode 100644 index 0000000..7862edd Binary files /dev/null and b/fyp_pycell/Scripts/pip3.exe differ diff --git a/fyp_pycell/Scripts/pybase64.exe b/fyp_pycell/Scripts/pybase64.exe new file mode 100644 index 0000000..cb86af8 Binary files /dev/null and b/fyp_pycell/Scripts/pybase64.exe differ diff --git a/fyp_pycell/Scripts/pyftmerge.exe b/fyp_pycell/Scripts/pyftmerge.exe new file mode 100644 index 0000000..8b05fcb Binary files /dev/null and b/fyp_pycell/Scripts/pyftmerge.exe differ diff --git a/fyp_pycell/Scripts/pyftsubset.exe b/fyp_pycell/Scripts/pyftsubset.exe new file mode 100644 index 0000000..39859a4 Binary files /dev/null and b/fyp_pycell/Scripts/pyftsubset.exe differ diff --git a/fyp_pycell/Scripts/pygmentize.exe b/fyp_pycell/Scripts/pygmentize.exe new file mode 100644 index 0000000..30464c2 Binary files /dev/null and b/fyp_pycell/Scripts/pygmentize.exe differ diff --git a/fyp_pycell/Scripts/pyproject-build.exe b/fyp_pycell/Scripts/pyproject-build.exe new file mode 100644 index 0000000..8106d13 Binary files /dev/null and b/fyp_pycell/Scripts/pyproject-build.exe differ diff --git a/fyp_pycell/Scripts/python.exe b/fyp_pycell/Scripts/python.exe new file mode 100644 index 0000000..42356dc Binary files /dev/null and b/fyp_pycell/Scripts/python.exe differ diff --git a/fyp_pycell/Scripts/pythonw.exe b/fyp_pycell/Scripts/pythonw.exe new file mode 100644 index 0000000..c1c2531 Binary files /dev/null and b/fyp_pycell/Scripts/pythonw.exe differ diff --git a/fyp_pycell/Scripts/saved_model_cli.exe b/fyp_pycell/Scripts/saved_model_cli.exe new file mode 100644 index 0000000..4fbdbc5 Binary files /dev/null and b/fyp_pycell/Scripts/saved_model_cli.exe differ diff --git a/fyp_pycell/Scripts/tensorboard.exe b/fyp_pycell/Scripts/tensorboard.exe new file mode 100644 index 0000000..7f13473 Binary files /dev/null and b/fyp_pycell/Scripts/tensorboard.exe differ diff --git a/fyp_pycell/Scripts/tf_upgrade_v2.exe b/fyp_pycell/Scripts/tf_upgrade_v2.exe new file mode 100644 index 0000000..73c4530 Binary files /dev/null and b/fyp_pycell/Scripts/tf_upgrade_v2.exe differ diff --git a/fyp_pycell/Scripts/tflite_convert.exe b/fyp_pycell/Scripts/tflite_convert.exe new file mode 100644 index 0000000..3dd1592 Binary files /dev/null and b/fyp_pycell/Scripts/tflite_convert.exe differ diff --git a/fyp_pycell/Scripts/tiff2fsspec.exe b/fyp_pycell/Scripts/tiff2fsspec.exe new file mode 100644 index 0000000..f11caae Binary files /dev/null and b/fyp_pycell/Scripts/tiff2fsspec.exe differ diff --git a/fyp_pycell/Scripts/tiffcomment.exe b/fyp_pycell/Scripts/tiffcomment.exe new file mode 100644 index 0000000..694a31c Binary files /dev/null and b/fyp_pycell/Scripts/tiffcomment.exe differ diff --git a/fyp_pycell/Scripts/tifffile.exe b/fyp_pycell/Scripts/tifffile.exe new file mode 100644 index 0000000..b03da9d Binary files /dev/null and b/fyp_pycell/Scripts/tifffile.exe differ diff --git a/fyp_pycell/Scripts/tiny-agents.exe b/fyp_pycell/Scripts/tiny-agents.exe new file mode 100644 index 0000000..5392204 Binary files /dev/null and b/fyp_pycell/Scripts/tiny-agents.exe differ diff --git a/fyp_pycell/Scripts/toco.exe b/fyp_pycell/Scripts/toco.exe new file mode 100644 index 0000000..3dd1592 Binary files /dev/null and b/fyp_pycell/Scripts/toco.exe differ diff --git a/fyp_pycell/Scripts/torchrun.exe b/fyp_pycell/Scripts/torchrun.exe new file mode 100644 index 0000000..f5fe921 Binary files /dev/null and b/fyp_pycell/Scripts/torchrun.exe differ diff --git a/fyp_pycell/Scripts/tqdm.exe b/fyp_pycell/Scripts/tqdm.exe new file mode 100644 index 0000000..97aa338 Binary files /dev/null and b/fyp_pycell/Scripts/tqdm.exe differ diff --git a/fyp_pycell/Scripts/transformers-cli.exe b/fyp_pycell/Scripts/transformers-cli.exe new file mode 100644 index 0000000..c3458f5 Binary files /dev/null and b/fyp_pycell/Scripts/transformers-cli.exe differ diff --git a/fyp_pycell/Scripts/ttx.exe b/fyp_pycell/Scripts/ttx.exe new file mode 100644 index 0000000..34184a0 Binary files /dev/null and b/fyp_pycell/Scripts/ttx.exe differ diff --git a/fyp_pycell/Scripts/typer.exe b/fyp_pycell/Scripts/typer.exe new file mode 100644 index 0000000..abe039f Binary files /dev/null and b/fyp_pycell/Scripts/typer.exe differ diff --git a/fyp_pycell/Scripts/ultralytics.exe b/fyp_pycell/Scripts/ultralytics.exe new file mode 100644 index 0000000..12e007e Binary files /dev/null and b/fyp_pycell/Scripts/ultralytics.exe differ diff --git a/fyp_pycell/Scripts/uvicorn.exe b/fyp_pycell/Scripts/uvicorn.exe new file mode 100644 index 0000000..86f4b3a Binary files /dev/null and b/fyp_pycell/Scripts/uvicorn.exe differ diff --git a/fyp_pycell/Scripts/watchfiles.exe b/fyp_pycell/Scripts/watchfiles.exe new file mode 100644 index 0000000..11072b3 Binary files /dev/null and b/fyp_pycell/Scripts/watchfiles.exe differ diff --git a/fyp_pycell/Scripts/websockets.exe b/fyp_pycell/Scripts/websockets.exe new file mode 100644 index 0000000..9411d7d Binary files /dev/null and b/fyp_pycell/Scripts/websockets.exe differ diff --git a/fyp_pycell/Scripts/wheel.exe b/fyp_pycell/Scripts/wheel.exe new file mode 100644 index 0000000..0ab86b9 Binary files /dev/null and b/fyp_pycell/Scripts/wheel.exe differ diff --git a/fyp_pycell/Scripts/wsdump.exe b/fyp_pycell/Scripts/wsdump.exe new file mode 100644 index 0000000..0c3d084 Binary files /dev/null and b/fyp_pycell/Scripts/wsdump.exe differ diff --git a/fyp_pycell/Scripts/yolo.exe b/fyp_pycell/Scripts/yolo.exe new file mode 100644 index 0000000..12e007e Binary files /dev/null and b/fyp_pycell/Scripts/yolo.exe differ diff --git a/fyp_pycell/pyvenv.cfg b/fyp_pycell/pyvenv.cfg new file mode 100644 index 0000000..a7dce96 --- /dev/null +++ b/fyp_pycell/pyvenv.cfg @@ -0,0 +1,3 @@ +home = C:\Users\Anirudh\AppData\Local\Programs\Python\Python310 +include-system-site-packages = false +version = 3.10.0 diff --git a/fyp_pycell/share/man/man1/isympy.1 b/fyp_pycell/share/man/man1/isympy.1 new file mode 100644 index 0000000..0ff9661 --- /dev/null +++ b/fyp_pycell/share/man/man1/isympy.1 @@ -0,0 +1,188 @@ +'\" -*- coding: us-ascii -*- +.if \n(.g .ds T< \\FC +.if \n(.g .ds T> \\F[\n[.fam]] +.de URL +\\$2 \(la\\$1\(ra\\$3 +.. +.if \n(.g .mso www.tmac +.TH isympy 1 2007-10-8 "" "" +.SH NAME +isympy \- interactive shell for SymPy +.SH SYNOPSIS +'nh +.fi +.ad l +\fBisympy\fR \kx +.if (\nx>(\n(.l/2)) .nr x (\n(.l/5) +'in \n(.iu+\nxu +[\fB-c\fR | \fB--console\fR] [\fB-p\fR ENCODING | \fB--pretty\fR ENCODING] [\fB-t\fR TYPE | \fB--types\fR TYPE] [\fB-o\fR ORDER | \fB--order\fR ORDER] [\fB-q\fR | \fB--quiet\fR] [\fB-d\fR | \fB--doctest\fR] [\fB-C\fR | \fB--no-cache\fR] [\fB-a\fR | \fB--auto\fR] [\fB-D\fR | \fB--debug\fR] [ +-- | PYTHONOPTIONS] +'in \n(.iu-\nxu +.ad b +'hy +'nh +.fi +.ad l +\fBisympy\fR \kx +.if (\nx>(\n(.l/2)) .nr x (\n(.l/5) +'in \n(.iu+\nxu +[ +{\fB-h\fR | \fB--help\fR} +| +{\fB-v\fR | \fB--version\fR} +] +'in \n(.iu-\nxu +.ad b +'hy +.SH DESCRIPTION +isympy is a Python shell for SymPy. It is just a normal python shell +(ipython shell if you have the ipython package installed) that executes +the following commands so that you don't have to: +.PP +.nf +\*(T< +>>> from __future__ import division +>>> from sympy import * +>>> x, y, z = symbols("x,y,z") +>>> k, m, n = symbols("k,m,n", integer=True) + \*(T> +.fi +.PP +So starting isympy is equivalent to starting python (or ipython) and +executing the above commands by hand. It is intended for easy and quick +experimentation with SymPy. For more complicated programs, it is recommended +to write a script and import things explicitly (using the "from sympy +import sin, log, Symbol, ..." idiom). +.SH OPTIONS +.TP +\*(T<\fB\-c \fR\*(T>\fISHELL\fR, \*(T<\fB\-\-console=\fR\*(T>\fISHELL\fR +Use the specified shell (python or ipython) as +console backend instead of the default one (ipython +if present or python otherwise). + +Example: isympy -c python + +\fISHELL\fR could be either +\&'ipython' or 'python' +.TP +\*(T<\fB\-p \fR\*(T>\fIENCODING\fR, \*(T<\fB\-\-pretty=\fR\*(T>\fIENCODING\fR +Setup pretty printing in SymPy. By default, the most pretty, unicode +printing is enabled (if the terminal supports it). You can use less +pretty ASCII printing instead or no pretty printing at all. + +Example: isympy -p no + +\fIENCODING\fR must be one of 'unicode', +\&'ascii' or 'no'. +.TP +\*(T<\fB\-t \fR\*(T>\fITYPE\fR, \*(T<\fB\-\-types=\fR\*(T>\fITYPE\fR +Setup the ground types for the polys. By default, gmpy ground types +are used if gmpy2 or gmpy is installed, otherwise it falls back to python +ground types, which are a little bit slower. You can manually +choose python ground types even if gmpy is installed (e.g., for testing purposes). + +Note that sympy ground types are not supported, and should be used +only for experimental purposes. + +Note that the gmpy1 ground type is primarily intended for testing; it the +use of gmpy even if gmpy2 is available. + +This is the same as setting the environment variable +SYMPY_GROUND_TYPES to the given ground type (e.g., +SYMPY_GROUND_TYPES='gmpy') + +The ground types can be determined interactively from the variable +sympy.polys.domains.GROUND_TYPES inside the isympy shell itself. + +Example: isympy -t python + +\fITYPE\fR must be one of 'gmpy', +\&'gmpy1' or 'python'. +.TP +\*(T<\fB\-o \fR\*(T>\fIORDER\fR, \*(T<\fB\-\-order=\fR\*(T>\fIORDER\fR +Setup the ordering of terms for printing. The default is lex, which +orders terms lexicographically (e.g., x**2 + x + 1). You can choose +other orderings, such as rev-lex, which will use reverse +lexicographic ordering (e.g., 1 + x + x**2). + +Note that for very large expressions, ORDER='none' may speed up +printing considerably, with the tradeoff that the order of the terms +in the printed expression will have no canonical order + +Example: isympy -o rev-lax + +\fIORDER\fR must be one of 'lex', 'rev-lex', 'grlex', +\&'rev-grlex', 'grevlex', 'rev-grevlex', 'old', or 'none'. +.TP +\*(T<\fB\-q\fR\*(T>, \*(T<\fB\-\-quiet\fR\*(T> +Print only Python's and SymPy's versions to stdout at startup, and nothing else. +.TP +\*(T<\fB\-d\fR\*(T>, \*(T<\fB\-\-doctest\fR\*(T> +Use the same format that should be used for doctests. This is +equivalent to '\fIisympy -c python -p no\fR'. +.TP +\*(T<\fB\-C\fR\*(T>, \*(T<\fB\-\-no\-cache\fR\*(T> +Disable the caching mechanism. Disabling the cache may slow certain +operations down considerably. This is useful for testing the cache, +or for benchmarking, as the cache can result in deceptive benchmark timings. + +This is the same as setting the environment variable SYMPY_USE_CACHE +to 'no'. +.TP +\*(T<\fB\-a\fR\*(T>, \*(T<\fB\-\-auto\fR\*(T> +Automatically create missing symbols. Normally, typing a name of a +Symbol that has not been instantiated first would raise NameError, +but with this option enabled, any undefined name will be +automatically created as a Symbol. This only works in IPython 0.11. + +Note that this is intended only for interactive, calculator style +usage. In a script that uses SymPy, Symbols should be instantiated +at the top, so that it's clear what they are. + +This will not override any names that are already defined, which +includes the single character letters represented by the mnemonic +QCOSINE (see the "Gotchas and Pitfalls" document in the +documentation). You can delete existing names by executing "del +name" in the shell itself. You can see if a name is defined by typing +"'name' in globals()". + +The Symbols that are created using this have default assumptions. +If you want to place assumptions on symbols, you should create them +using symbols() or var(). + +Finally, this only works in the top level namespace. So, for +example, if you define a function in isympy with an undefined +Symbol, it will not work. +.TP +\*(T<\fB\-D\fR\*(T>, \*(T<\fB\-\-debug\fR\*(T> +Enable debugging output. This is the same as setting the +environment variable SYMPY_DEBUG to 'True'. The debug status is set +in the variable SYMPY_DEBUG within isympy. +.TP +-- \fIPYTHONOPTIONS\fR +These options will be passed on to \fIipython (1)\fR shell. +Only supported when ipython is being used (standard python shell not supported). + +Two dashes (--) are required to separate \fIPYTHONOPTIONS\fR +from the other isympy options. + +For example, to run iSymPy without startup banner and colors: + +isympy -q -c ipython -- --colors=NoColor +.TP +\*(T<\fB\-h\fR\*(T>, \*(T<\fB\-\-help\fR\*(T> +Print help output and exit. +.TP +\*(T<\fB\-v\fR\*(T>, \*(T<\fB\-\-version\fR\*(T> +Print isympy version information and exit. +.SH FILES +.TP +\*(T<\fI${HOME}/.sympy\-history\fR\*(T> +Saves the history of commands when using the python +shell as backend. +.SH BUGS +The upstreams BTS can be found at \(lahttps://github.com/sympy/sympy/issues\(ra +Please report all bugs that you find in there, this will help improve +the overall quality of SymPy. +.SH "SEE ALSO" +\fBipython\fR(1), \fBpython\fR(1) diff --git a/fyp_pycell/share/man/man1/ttx.1 b/fyp_pycell/share/man/man1/ttx.1 new file mode 100644 index 0000000..bba23b5 --- /dev/null +++ b/fyp_pycell/share/man/man1/ttx.1 @@ -0,0 +1,225 @@ +.Dd May 18, 2004 +.\" ttx is not specific to any OS, but contrary to what groff_mdoc(7) +.\" seems to imply, entirely omitting the .Os macro causes 'BSD' to +.\" be used, so I give a zero-width space as its argument. +.Os \& +.\" The "FontTools Manual" argument apparently has no effect in +.\" groff 1.18.1. I think it is a bug in the -mdoc groff package. +.Dt TTX 1 "FontTools Manual" +.Sh NAME +.Nm ttx +.Nd tool for manipulating TrueType and OpenType fonts +.Sh SYNOPSIS +.Nm +.Bk +.Op Ar option ... +.Ek +.Bk +.Ar file ... +.Ek +.Sh DESCRIPTION +.Nm +is a tool for manipulating TrueType and OpenType fonts. It can convert +TrueType and OpenType fonts to and from an +.Tn XML Ns -based format called +.Tn TTX . +.Tn TTX +files have a +.Ql .ttx +extension. +.Pp +For each +.Ar file +argument it is given, +.Nm +detects whether it is a +.Ql .ttf , +.Ql .otf +or +.Ql .ttx +file and acts accordingly: if it is a +.Ql .ttf +or +.Ql .otf +file, it generates a +.Ql .ttx +file; if it is a +.Ql .ttx +file, it generates a +.Ql .ttf +or +.Ql .otf +file. +.Pp +By default, every output file is created in the same directory as the +corresponding input file and with the same name except for the +extension, which is substituted appropriately. +.Nm +never overwrites existing files; if necessary, it appends a suffix to +the output file name before the extension, as in +.Pa Arial#1.ttf . +.Ss "General options" +.Bl -tag -width ".Fl t Ar table" +.It Fl h +Display usage information. +.It Fl d Ar dir +Write the output files to directory +.Ar dir +instead of writing every output file to the same directory as the +corresponding input file. +.It Fl o Ar file +Write the output to +.Ar file +instead of writing it to the same directory as the +corresponding input file. +.It Fl v +Be verbose. Write more messages to the standard output describing what +is being done. +.It Fl a +Allow virtual glyphs ID's on compile or decompile. +.El +.Ss "Dump options" +The following options control the process of dumping font files +(TrueType or OpenType) to +.Tn TTX +files. +.Bl -tag -width ".Fl t Ar table" +.It Fl l +List table information. Instead of dumping the font to a +.Tn TTX +file, display minimal information about each table. +.It Fl t Ar table +Dump table +.Ar table . +This option may be given multiple times to dump several tables at +once. When not specified, all tables are dumped. +.It Fl x Ar table +Exclude table +.Ar table +from the list of tables to dump. This option may be given multiple +times to exclude several tables from the dump. The +.Fl t +and +.Fl x +options are mutually exclusive. +.It Fl s +Split tables. Dump each table to a separate +.Tn TTX +file and write (under the name that would have been used for the output +file if the +.Fl s +option had not been given) one small +.Tn TTX +file containing references to the individual table dump files. This +file can be used as input to +.Nm +as long as the referenced files can be found in the same directory. +.It Fl i +.\" XXX: I suppose OpenType programs (exist and) are also affected. +Don't disassemble TrueType instructions. When this option is specified, +all TrueType programs (glyph programs, the font program and the +pre-program) are written to the +.Tn TTX +file as hexadecimal data instead of +assembly. This saves some time and results in smaller +.Tn TTX +files. +.It Fl y Ar n +When decompiling a TrueType Collection (TTC) file, +decompile font number +.Ar n , +starting from 0. +.El +.Ss "Compilation options" +The following options control the process of compiling +.Tn TTX +files into font files (TrueType or OpenType): +.Bl -tag -width ".Fl t Ar table" +.It Fl m Ar fontfile +Merge the input +.Tn TTX +file +.Ar file +with +.Ar fontfile . +No more than one +.Ar file +argument can be specified when this option is used. +.It Fl b +Don't recalculate glyph bounding boxes. Use the values in the +.Tn TTX +file as is. +.El +.Sh "THE TTX FILE FORMAT" +You can find some information about the +.Tn TTX +file format in +.Pa documentation.html . +In particular, you will find in that file the list of tables understood by +.Nm +and the relations between TrueType GlyphIDs and the glyph names used in +.Tn TTX +files. +.Sh EXAMPLES +In the following examples, all files are read from and written to the +current directory. Additionally, the name given for the output file +assumes in every case that it did not exist before +.Nm +was invoked. +.Pp +Dump the TrueType font contained in +.Pa FreeSans.ttf +to +.Pa FreeSans.ttx : +.Pp +.Dl ttx FreeSans.ttf +.Pp +Compile +.Pa MyFont.ttx +into a TrueType or OpenType font file: +.Pp +.Dl ttx MyFont.ttx +.Pp +List the tables in +.Pa FreeSans.ttf +along with some information: +.Pp +.Dl ttx -l FreeSans.ttf +.Pp +Dump the +.Sq cmap +table from +.Pa FreeSans.ttf +to +.Pa FreeSans.ttx : +.Pp +.Dl ttx -t cmap FreeSans.ttf +.Sh NOTES +On MS\-Windows and MacOS, +.Nm +is available as a graphical application to which files can be dropped. +.Sh SEE ALSO +.Pa documentation.html +.Pp +.Xr fontforge 1 , +.Xr ftinfo 1 , +.Xr gfontview 1 , +.Xr xmbdfed 1 , +.Xr Font::TTF 3pm +.Sh AUTHORS +.Nm +was written by +.An -nosplit +.An "Just van Rossum" Aq just@letterror.com . +.Pp +This manual page was written by +.An "Florent Rougon" Aq f.rougon@free.fr +for the Debian GNU/Linux system based on the existing FontTools +documentation. It may be freely used, modified and distributed without +restrictions. +.\" For Emacs: +.\" Local Variables: +.\" fill-column: 72 +.\" sentence-end: "[.?!][]\"')}]*\\($\\| $\\| \\| \\)[ \n]*" +.\" sentence-end-double-space: t +.\" End: \ No newline at end of file diff --git a/gemini_check.py b/gemini_check.py new file mode 100644 index 0000000..ee00b89 --- /dev/null +++ b/gemini_check.py @@ -0,0 +1,15 @@ +# debug_gemini.py — run this from your project root: python debug_gemini.py +import os +from dotenv import load_dotenv +load_dotenv() + +import requests + +GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY") or os.environ.get("GOOGLE_API_KEY") +print(f"Key loaded: {'YES, ends with ...' + GEMINI_API_KEY[-6:] if GEMINI_API_KEY else 'NO KEY FOUND'}") + +# Step 1: List models +url = f"https://generativelanguage.googleapis.com/v1beta/models?key={GEMINI_API_KEY}" +resp = requests.get(url, timeout=30) +print(f"\nList models status: {resp.status_code}") +print(resp.text[:2000]) \ No newline at end of file diff --git a/invalid.py b/invalid_gen.py similarity index 100% rename from invalid.py rename to invalid_gen.py diff --git a/invalid_test_augmented/salt_pepper.jpg b/invalid_test_augmented/salt_pepper.jpg deleted file mode 100644 index 0c25ea2..0000000 Binary files a/invalid_test_augmented/salt_pepper.jpg and /dev/null differ diff --git a/invalid_test_augmented/high_blur.jpg b/invalid_test_images/high_blur.jpg similarity index 100% rename from invalid_test_augmented/high_blur.jpg rename to invalid_test_images/high_blur.jpg diff --git a/invalid_test_augmented/overexposed.jpg b/invalid_test_images/overexposed.jpg similarity index 100% rename from invalid_test_augmented/overexposed.jpg rename to invalid_test_images/overexposed.jpg diff --git a/invalid_test_augmented/underexposed.jpg b/invalid_test_images/underexposed.jpg similarity index 100% rename from invalid_test_augmented/underexposed.jpg rename to invalid_test_images/underexposed.jpg diff --git a/modules/carbon/carbon_routes.py b/modules/carbon/carbon_routes.py index b35916e..512e894 100644 --- a/modules/carbon/carbon_routes.py +++ b/modules/carbon/carbon_routes.py @@ -9,20 +9,56 @@ @carbon_bp.route("/predict", methods=["POST"]) @require_auth def predict(): - body = request.get_json(silent=True) or {} - detections = body.get("detections", []) - img_w = body.get("image_width", 640) - img_h = body.get("image_height", 640) - city = body.get("city", "Chennai") - panel_power = float(body.get("panel_power", 380)) + body = request.get_json(silent=True) or {} + detections = body.get("detections", []) + img_w = body.get("image_width", 640) + img_h = body.get("image_height", 640) + city = body.get("city", "Chennai") + panel_power = float(body.get("panel_power", 380)) ambient_temp = float(body.get("ambient_temp", 32)) - irradiance = float(body.get("irradiance", 900)) - filename = body.get("filename", "unknown") + irradiance = float(body.get("irradiance", 900)) + filename = body.get("filename", "unknown") try: - result = predict_carbon(detections, img_w, img_h, city, panel_power, ambient_temp, irradiance) + result = predict_carbon( + detections, img_w, img_h, + city, panel_power, ambient_temp, irradiance, + ) write_carbon(filename, result) + + # ── Ingest into ChromaDB so chatbot can answer image-specific questions ── + try: + from modules.chatbot.vector_store import ingest_image_result + + # Normalise detections to the shape ingest_image_result expects. + # Detection dicts from the frontend use "class_name"; handle both forms. + normalised = [ + { + "defect_class": d.get("class_name", d.get("defect_class", "unknown")), + "confidence": d.get("confidence", 0), + "area_ratio": d.get("area_ratio", 0), + "bbox_x1": d.get("bbox_x1", d.get("x1", 0)), + "bbox_y1": d.get("bbox_y1", d.get("y1", 0)), + "bbox_x2": d.get("bbox_x2", d.get("x2", 0)), + "bbox_y2": d.get("bbox_y2", d.get("y2", 0)), + "image_width": img_w, + "image_height": img_h, + } + for d in detections + if d.get("confidence", 0) >= 0.5 + ] + + ingest_image_result( + image_filename=filename, + detections=normalised, + carbon_data=result, # full dict from predict_carbon + ) + except Exception as rag_err: + # Never let RAG errors break the main carbon response + print(f"[Carbon] RAG ingest warning: {rag_err}") + return jsonify({"success": True, **result}), 200 + except Exception as e: import traceback; traceback.print_exc() return jsonify({"success": False, "error": str(e)}), 500 \ No newline at end of file diff --git a/modules/chatbot/__init__.py b/modules/chatbot/__init__.py index 0a3aecf..0267dc8 100644 --- a/modules/chatbot/__init__.py +++ b/modules/chatbot/__init__.py @@ -1,2 +1,3 @@ +"""modules/chatbot/__init__.py""" from .chatbot_routes import chatbot_bp __all__ = ["chatbot_bp"] \ No newline at end of file diff --git a/modules/chatbot/chatbot_routes.py b/modules/chatbot/chatbot_routes.py index e686205..fef3130 100644 --- a/modules/chatbot/chatbot_routes.py +++ b/modules/chatbot/chatbot_routes.py @@ -1,36 +1,136 @@ -"""modules/chatbot/chatbot_routes.py""" +"""modules/chatbot/chatbot_routes.py +Flask blueprint for the RAG chatbot — with sentiment analysis support. + +CHANGES FROM ORIGINAL: + - /api/chat/query now returns sentiment metadata (label, score, compound, is_negative) + alongside the answer, so the frontend can show a mood indicator or adapt the UI. + - answer_query() now returns a dict instead of a plain string. + - All existing routes and their signatures are preserved unchanged. +""" from flask import Blueprint, request, jsonify from auth import require_auth from .rag_engine import answer_query -from .vector_store import ingest_docs, ingest_csvs +from .vector_store import ingest_docs, ingest_csvs, get_stats, ingest_image_result chatbot_bp = Blueprint("chatbot", __name__, url_prefix="/api/chat") + @chatbot_bp.route("/query", methods=["POST"]) @require_auth def query(): - body = request.get_json(silent=True) or {} - question = (body.get("question") or "").strip() - history = body.get("history", []) # [{role, content}, ...] + """ + POST body: + { + "question": "What defects were found?", + "history": [{role, content}, ...], // optional + "image_filename": "abc123.jpg" // optional, current session image + } + + Response body: + { + "success": true, + "answer": "...", + "sentiment_label": "negative", // "positive" | "neutral" | "negative" + "sentiment_score": 0.91, // DistilBERT confidence + "sentiment_compound": -0.82, // -1.0 (very negative) → +1.0 (very positive) + "is_negative": true // true = empathetic mode was activated + } + """ + body = request.get_json(silent=True) or {} + question = (body.get("question") or "").strip() + history = body.get("history", []) + image_filename = (body.get("image_filename") or "").strip() or None if not question: return jsonify({"success": False, "error": "Question is required."}), 400 try: - answer = answer_query(question, history) - return jsonify({"success": True, "answer": answer}), 200 + result = answer_query(question, history, image_filename=image_filename) + + return jsonify({ + "success": True, + "answer": result["answer"], + "sentiment_label": result["sentiment_label"], + "sentiment_score": result["sentiment_score"], + "sentiment_compound": result["sentiment_compound"], + "is_negative": result["is_negative"], + }), 200 + except ValueError as e: return jsonify({"success": False, "error": str(e)}), 503 except Exception as e: return jsonify({"success": False, "error": str(e)}), 500 + @chatbot_bp.route("/ingest", methods=["POST"]) @require_auth def ingest(): """Re-ingest docs and CSVs into ChromaDB on demand.""" try: - ingest_docs() - ingest_csvs() - return jsonify({"success": True, "message": "Ingestion complete."}), 200 + doc_count = ingest_docs() + csv_count = ingest_csvs() + return jsonify({ + "success": True, + "message": "Ingestion complete.", + "doc_chunks": doc_count, + "csv_rows": csv_count, + }), 200 + except Exception as e: + return jsonify({"success": False, "error": str(e)}), 500 + + +@chatbot_bp.route("/ingest_image", methods=["POST"]) +@require_auth +def ingest_image(): + """ + Called automatically after detection/carbon pipeline runs for an image. + POST body: + { + "image_filename": "abc123.jpg", + "detections": [ + { + "defect_class": "crack", + "confidence": 0.82, + "area_ratio": 0.042, + "bbox_x1": 526, "bbox_y1": 419, + "bbox_x2": 869, "bbox_y2": 549, + "image_width": 1024, "image_height": 1024 + }, + ... + ], + "carbon_data": { + "city": "Delhi", + "panel_power_w": 380, + "ambient_temp_c": 25, + "irradiance_w_m2": 900, + "emission_factor": 0.85, + "num_defects": 2, + "dominant_defect": "crack", + "total_degradation_pct": 1.42, + "co2_kg_per_year": 9.55 + } + } + """ + body = request.get_json(silent=True) or {} + image_filename = (body.get("image_filename") or "").strip() + detections = body.get("detections", []) + carbon_data = body.get("carbon_data", None) + + if not image_filename: + return jsonify({"success": False, "error": "image_filename is required."}), 400 + + try: + ingest_image_result(image_filename, detections, carbon_data) + return jsonify({ + "success": True, + "message": f"Ingested results for {image_filename}", + }), 200 except Exception as e: - return jsonify({"success": False, "error": str(e)}), 500 \ No newline at end of file + return jsonify({"success": False, "error": str(e)}), 500 + + +@chatbot_bp.route("/stats", methods=["GET"]) +@require_auth +def stats(): + """Return vector store stats for debugging.""" + return jsonify({"success": True, **get_stats()}), 200 \ No newline at end of file diff --git a/modules/chatbot/ingest.py b/modules/chatbot/ingest.py index 6b3bb7d..3c3a40b 100644 --- a/modules/chatbot/ingest.py +++ b/modules/chatbot/ingest.py @@ -1,11 +1,23 @@ -"""modules/chatbot/ingest.py — Run once to populate ChromaDB.""" +"""modules/chatbot/ingest.py — Run once to populate ChromaDB with docs + CSV data.""" import sys, os sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "..")) -from modules.chatbot.vector_store import ingest_docs, ingest_csvs +from modules.chatbot.vector_store import ingest_docs, ingest_csvs, get_stats if __name__ == "__main__": - print("Ingesting documents...") - ingest_docs() - print("Ingesting CSV data...") - ingest_csvs() - print("Done.") \ No newline at end of file + print("=" * 55) + print("Ingesting PDF documents from docs/ folder...") + doc_count = ingest_docs() + print(f" → {doc_count} document chunks ingested") + + print("\nIngesting CSV data...") + print(" detections.csv + carbon.csv") + csv_count = ingest_csvs() + print(f" → {csv_count} CSV rows ingested") + + print("\nVector store stats:") + print(" ", get_stats()) + print("=" * 55) + print("Done. ChromaDB is ready.") + print() + print("NOTE: Live image results are ingested automatically") + print("via POST /api/chat/ingest_image after each detection run.") \ No newline at end of file diff --git a/modules/chatbot/rag_engine.py b/modules/chatbot/rag_engine.py index 6b49526..25d9eb2 100644 --- a/modules/chatbot/rag_engine.py +++ b/modules/chatbot/rag_engine.py @@ -1,71 +1,415 @@ """modules/chatbot/rag_engine.py -RAG chatbot using local GGUF model (orca-mini-3b) via ctransformers. -Retrieves context from ChromaDB, then generates answer locally. +RAG chatbot using Gemini API — with DistilBERT Sentiment Analysis. + +EXISTING FUNCTIONALITY: Fully preserved (image-context RAG, filtered search, etc.) +NEW ADDITIONS: + - Sentiment analysis on every user message (DistilBERT via sentiment.py) + - Conversation-level sentiment aggregation (sustained negative mood detection) + - Empathetic system instruction injected when negative sentiment is detected + - Empathy knowledge base queried from ChromaDB (support docs in /docs/) + - Sentiment metadata returned alongside the answer for frontend use """ +import os import re -from ctransformers import AutoModelForCausalLM -from config import LOCAL_LLM_PATH, LOCAL_LLM_TYPE, LOCAL_LLM_MAX_TOKENS, LOCAL_LLM_TEMP -from .vector_store import query_collection - -_llm = None - - -def _get_llm(): - global _llm - if _llm is None: - print(f"[RAG] Loading local model from: {LOCAL_LLM_PATH}") - _llm = AutoModelForCausalLM.from_pretrained( - LOCAL_LLM_PATH, - model_type=LOCAL_LLM_TYPE, - max_new_tokens=LOCAL_LLM_MAX_TOKENS, - temperature=LOCAL_LLM_TEMP, - local_files_only=True, +import time +import requests +from dotenv import load_dotenv +load_dotenv() + +from .vector_store import query_collection, query_collection_with_filter +from .sentiment import analyze_sentiment, analyze_conversation_sentiment, SentimentResult + +# ── Gemini config ───────────────────────────────────────────────────────── +GEMINI_TIMEOUT_SEC = int(os.environ.get("GEMINI_TIMEOUT_SEC", "90")) + +_PREFERRED_MODELS = [ + "gemini-2.5-flash", + "gemini-2.0-flash", + "gemini-2.0-flash-lite", + "gemini-1.5-flash", + "gemini-1.5-flash-latest", + "gemini-1.5-pro", +] + +_resolved_model: str | None = None + + +def _get_api_key() -> str: + key = os.environ.get("GEMINI_API_KEY") or os.environ.get("GOOGLE_API_KEY") + if not key: + raise ValueError( + "Missing GEMINI_API_KEY in environment. " + "Add it to your .env file: GEMINI_API_KEY=AIza..." ) - print("[RAG] Model loaded.") - return _llm + return key + + +def _list_available_models(api_key: str) -> list: + url = f"https://generativelanguage.googleapis.com/v1beta/models?key={api_key}" + try: + resp = requests.get(url, timeout=20) + resp.raise_for_status() + data = resp.json() + return [ + m["name"].replace("models/", "") + for m in data.get("models", []) + if "generateContent" in m.get("supportedGenerationMethods", []) + ] + except Exception as e: + print(f"[RAG] Could not list models: {e}") + return [] + + +def _resolve_model(api_key: str) -> str: + global _resolved_model + if _resolved_model: + return _resolved_model + + env_model = os.environ.get("GEMINI_MODEL", "").strip() + if env_model: + _resolved_model = env_model + print(f"[RAG] Using env-specified model: {_resolved_model}") + return _resolved_model + + available = _list_available_models(api_key) + for preferred in _PREFERRED_MODELS: + if preferred in available: + _resolved_model = preferred + print(f"[RAG] Auto-selected model: {_resolved_model}") + return _resolved_model + + _resolved_model = "gemini-2.0-flash" + print(f"[RAG] Fallback model: {_resolved_model}") + return _resolved_model + + +# ── System instructions ─────────────────────────────────────────────────── + +BASE_SYSTEM_INSTRUCTION = """You are an expert assistant for a Solar PV Defect Detection System. +You have access to domain knowledge from technical documents and real inspection data +(defect detections, carbon emission analysis) stored in a vector database. + +Your role: +- Answer questions about solar panel defects: black core, cracks, finger defects, star cracks, thick lines +- Provide personalized insights based on actual detection records and carbon emission data +- When context contains data about a specific image, use that data directly to give precise, + image-specific answers (defect type, severity, area %, confidence, bounding box, CO2 impact) +- Give actionable maintenance and remediation advice based on detected defects +- Explain defect severity, causes, and recommended corrective actions +- Help users understand their panel health and carbon footprint + +Guidelines: +- Always be concise, factual, and specific +- If context contains exact numbers (confidence, area, CO2), quote them directly +- If asked about a specific image and context is available, focus on that image's data +- If context is insufficient, say so clearly and give general expert guidance +- Never fabricate numbers — only use values present in the context""" +EMPATHY_SYSTEM_EXTENSION = """ -def _preprocess(text: str) -> str: - text = re.sub(r"[^a-zA-Z0-9.,!?%:/()\-\s]", "", text) - return re.sub(r"\s+", " ", text).strip() +──────────────────────────────────────────────────────────────── +EMOTIONAL INTELLIGENCE PROTOCOL — ACTIVE +──────────────────────────────────────────────────────────────── +The user's message has been detected as expressing NEGATIVE sentiment +(frustration, confusion, worry, or distress). Follow these guidelines: +TONE & APPROACH: +- Lead with acknowledgment of their feeling BEFORE giving technical information +- Use warm, calm, supportive language throughout your response +- Never dismiss or minimize their concern — validate it genuinely +- Avoid overly clinical or blunt phrasing; be human-centered -def _build_prompt(question: str, context: str, history: list) -> str: - system = ( - "You are a solar PV defect detection expert assistant. " - "Use the provided context from domain documents and inspection CSV data " - "to answer questions accurately and concisely. " - "If the context is insufficient, say so clearly." +STRUCTURE (for negative-sentiment responses): + 1. ACKNOWLEDGE → Briefly name/validate their frustration or concern + (e.g., "I completely understand how concerning this must be...") + 2. REASSURE → Give a grounding statement that you are here to help + 3. EXPLAIN → Provide the technical answer/information clearly and simply + 4. EMPOWER → End with a clear, actionable next step they can take + 5. OFFER MORE → Gently invite further questions + +LANGUAGE PATTERNS TO USE: +- "I understand this can feel overwhelming..." +- "That's a completely valid concern..." +- "Let me walk you through this step by step..." +- "You're not alone in finding this confusing..." +- "The good news is that..." +- "Here's exactly what you can do..." +- "Please don't hesitate to ask if anything is unclear..." + +LANGUAGE PATTERNS TO AVOID: +- "As I mentioned before..." (implies user wasn't paying attention) +- "Simply do..." / "Just..." (minimizes their difficulty) +- Bullet-only responses without any warm framing +- Abrupt or blunt one-line answers + +EMPATHETIC CONTEXT: +If the empathy knowledge base context contains relevant emotional support +guidance (from the support document), weave those strategies naturally +into your response. Do NOT quote the document literally — internalize +and apply its principles. +────────────────────────────────────────────────────────────────""" + +SUSTAINED_NEGATIVE_EXTENSION = """ + +IMPORTANT: This user has shown consistently negative sentiment across +multiple messages in this conversation. They may be increasingly +frustrated or distressed. Be especially patient, warm, and encouraging. +If the situation seems particularly stressful, gently acknowledge that +dealing with equipment issues can be stressful and reassure them that +these problems are solvable with the right approach.""" + + +def _build_system_instruction(sentiment: SentimentResult, is_sustained_negative: bool) -> str: + """Compose the system instruction based on detected sentiment.""" + instruction = BASE_SYSTEM_INSTRUCTION + + if sentiment.is_negative: + instruction += EMPATHY_SYSTEM_EXTENSION + if is_sustained_negative: + instruction += SUSTAINED_NEGATIVE_EXTENSION + + return instruction + + +# ── Image filename extraction (UNCHANGED) ───────────────────────────────── + +_IMG_PATTERN = re.compile( + r"\b([a-f0-9]{8,}\.(?:jpg|jpeg|png|bmp|tiff))\b", + re.IGNORECASE, +) + + +def _extract_image_filename(text: str) -> str | None: + m = _IMG_PATTERN.search(text) + return m.group(1) if m else None + + +def _find_image_context(question: str, history: list) -> tuple[str | None, list]: + img = _extract_image_filename(question) + if not img: + for h in reversed((history or [])[-4:]): + img = _extract_image_filename(h.get("content", "")) + if img: + break + if not img: + return None, [] + docs = query_collection_with_filter( + query=question, + where={"image_filename": img}, + n_results=10, ) - history_text = "" - if history: - for h in history[-4:]: # keep last 4 turns to stay within context window - role = "User" if h.get("role") == "user" else "Assistant" - history_text += f"{role}: {_preprocess(h.get('content',''))}\n" - - prompt = ( - f"### System:\n{system}\n\n" - f"### Context:\n{_preprocess(context)}\n\n" + return img, docs + + +def _is_image_question(question: str) -> bool: + keywords = [ + "this image", "the image", "uploaded image", "my image", + "this panel", "this scan", "current image", "last image", + "defect in", "defects in", "what was detected", "analysis", + "result", "severity", "area", "confidence", "co2", "carbon", + "how many defects", "what defect", + ] + q_lower = question.lower() + return any(kw in q_lower for kw in keywords) + + +# ── Prompt builder ──────────────────────────────────────────────────────── + +def _build_prompt(question: str, context: str, history: list) -> list: + contents = [] + for h in (history or [])[-4:]: + role = "user" if h.get("role") == "user" else "model" + contents.append({"role": role, "parts": [{"text": h.get("content", "")}]}) + + user_text = ( + f"### Relevant Context from Knowledge Base:\n{context}\n\n" + f"### Question:\n{question}" + ) + contents.append({"role": "user", "parts": [{"text": user_text}]}) + return contents + + +# ── Empathy context retrieval ───────────────────────────────────────────── + +def _get_empathy_context(question: str) -> list: + """ + Retrieve empathy/support chunks from the vector store. + These come from the empathetic_support_guide.pdf you place in /docs/. + Filtered by type='empathy_support'. + """ + # Try filtered search for empathy docs first + docs = query_collection_with_filter( + query=f"emotional support empathy frustration concern {question}", + where={"type": "empathy_support"}, + n_results=4, ) - if history_text: - prompt += f"### Conversation History:\n{history_text}\n" + # Fallback: general search with empathy keywords + if not docs: + docs = query_collection( + query=f"empathetic response emotional support frustrated user {question}", + n_results=3, + ) + return docs + + +# ── Main entry point ────────────────────────────────────────────────────── - prompt += f"### User:\n{_preprocess(question)}\n\n### Response:\n" - return prompt +def answer_query( + question: str, + history: list = None, + image_filename: str = None, +) -> dict: + """ + Generate a sentiment-aware answer using RAG + Gemini + DistilBERT. + Parameters + ---------- + question : User's question string + history : List of {role, content} dicts (conversation history) + image_filename : Optional — current session's image filename for image-specific context + + Returns + ------- + dict with keys: + answer : str — the generated response text + sentiment_label : str — "positive" | "neutral" | "negative" + sentiment_score : float — confidence 0.0–1.0 + sentiment_compound: float — signed -1.0 to +1.0 + is_negative : bool — True if empathetic mode was activated + """ + api_key = _get_api_key() + model = _resolve_model(api_key) + + # ── STEP 1: Sentiment Analysis ───────────────────────────────────────── + current_sentiment = analyze_sentiment(question) + conversation_sentiment = analyze_conversation_sentiment(history or [], window=3) + + # Sustained negative = current is negative AND conversation trend is also negative + is_sustained_negative = ( + current_sentiment.is_negative and conversation_sentiment.is_negative + ) + + print( + f"[Sentiment] Current: {current_sentiment} | " + f"Conversation: {conversation_sentiment} | " + f"Sustained negative: {is_sustained_negative}" + ) + + # ── STEP 2: Image context (UNCHANGED logic) ─────────────────────────── + image_docs: list = [] + resolved_img = image_filename + + if resolved_img: + image_docs = query_collection_with_filter( + query=question, + where={"image_filename": resolved_img}, + n_results=10, + ) + + if not image_docs: + resolved_img, image_docs = _find_image_context(question, history or []) + + if not image_docs and _is_image_question(question): + image_docs = query_collection_with_filter( + query=question, + where={"type": "detection_summary"}, + n_results=5, + ) + + # ── STEP 3: General + Empathy context ──────────────────────────────── + general_docs = query_collection(question, n_results=6) + + empathy_docs: list = [] + if current_sentiment.is_negative: + empathy_docs = _get_empathy_context(question) + print(f"[Sentiment] Empathetic mode ON — {len(empathy_docs)} empathy chunks retrieved") + + # ── STEP 4: Assemble context ────────────────────────────────────────── + context_parts = [] + + if image_docs: + img_label = f"image '{resolved_img}'" if resolved_img else "the uploaded image" + context_parts.append( + f"=== SPECIFIC DATA FOR {img_label.upper()} ===\n" + + "\n---\n".join(image_docs) + ) + + if general_docs: + context_parts.append( + "=== GENERAL KNOWLEDGE BASE ===\n" + + "\n---\n".join(general_docs) + ) + + if empathy_docs: + context_parts.append( + "=== EMPATHETIC SUPPORT GUIDANCE ===\n" + "(Use these principles to shape your tone and approach — do not quote directly)\n" + + "\n---\n".join(empathy_docs) + ) + + context = ( + "\n\n".join(context_parts) + if context_parts + else "No relevant context found in knowledge base." + ) + + # ── STEP 5: Build system instruction (sentiment-aware) ──────────────── + system_instruction = _build_system_instruction(current_sentiment, is_sustained_negative) + + # ── STEP 6: Build and send Gemini request ───────────────────────────── + contents = _build_prompt(question, context, history or []) + payload = { + "system_instruction": {"parts": [{"text": system_instruction}]}, + "contents": contents, + "generationConfig": { + "temperature": float(os.environ.get("LOCAL_LLM_TEMP", "0.7")), + "maxOutputTokens": int(os.environ.get("LOCAL_LLM_MAX_TOKENS", "2048")), + "topP": 0.95, + }, + } + + url = ( + f"https://generativelanguage.googleapis.com/v1beta/models/" + f"{model}:generateContent?key={api_key}" + ) -def answer_query(question: str, history: list = None) -> str: - # 1. Retrieve relevant docs from ChromaDB - docs = query_collection(question, n_results=5) - context = "\n---\n".join(docs) if docs else "No relevant context found." + for attempt in range(3): + try: + resp = requests.post(url, json=payload, timeout=GEMINI_TIMEOUT_SEC) + if resp.status_code == 429: + wait = 2 ** attempt + print(f"[RAG] Rate limited, retrying in {wait}s...") + time.sleep(wait) + continue + resp.raise_for_status() + data = resp.json() + text = ( + data.get("candidates", [{}])[0] + .get("content", {}) + .get("parts", [{}])[0] + .get("text", "") + .strip() + ) + answer = text if text else "I could not generate a response. Please try again." - # 2. Build prompt - prompt = _build_prompt(question, context, history or []) + # Return answer + sentiment metadata for the frontend + return { + "answer": answer, + "sentiment_label": current_sentiment.label, + "sentiment_score": current_sentiment.score, + "sentiment_compound": current_sentiment.compound, + "is_negative": current_sentiment.is_negative, + } - # 3. Run local model (streaming, collect all tokens) - llm = _get_llm() - response = "" - for token in llm(prompt, stream=True): - response += token + except requests.exceptions.Timeout: + if attempt == 2: + raise ValueError("Gemini API timed out. Please try again.") + time.sleep(2) + except requests.exceptions.HTTPError as e: + raise ValueError( + f"Gemini API error: {e.response.status_code} — {e.response.text[:200]}" + ) + except Exception as e: + raise ValueError(f"Unexpected error calling Gemini: {e}") - return response.strip() \ No newline at end of file + raise ValueError("Failed to get response from Gemini after retries.") diff --git a/modules/chatbot/sentiment.py b/modules/chatbot/sentiment.py new file mode 100644 index 0000000..bc46278 --- /dev/null +++ b/modules/chatbot/sentiment.py @@ -0,0 +1,375 @@ +"""modules/chatbot/sentiment.py [FIXED v3 - eager model warmup] +Transformer-based sentiment analysis using DistilBERT. + +Model: distilbert-base-uncased-finetuned-sst-2-english + +FIXES IN v3: + - Added warmup_sentiment_model() for eager loading at app startup. + Previously the lazy-load design meant teammates who hadn't downloaded + the model would silently fall back to rule-based sentiment for ALL + requests, since the model hadn't loaded before requests started arriving. + - Added _model_loaded / _model_failed flags so load state is always known. + - Warm-up inference pass runs after load so first real request isn't slow. + +GUARDS AGAINST DISTILBERT FALSE POSITIVES (from v2, preserved): + 1. QUESTION GUARD — plain informational questions forced to neutral + 2. RAISED THRESHOLDS — neutral zone 0.80, negative trigger -0.55 + 3. EMOTION KEYWORD GATE — must contain a real emotional word to trigger + empathetic mode (prevents "what defects are in + my image?" from triggering empathy) +""" + +from __future__ import annotations +import re +from dataclasses import dataclass + + +# ── Result dataclass ────────────────────────────────────────────────────── + +@dataclass +class SentimentResult: + label: str # "positive" | "neutral" | "negative" + score: float # 0.0-1.0 confidence for the label + is_negative: bool # True when empathetic mode should activate + compound: float # -1.0 (very negative) to +1.0 (very positive) + + def __repr__(self): + sign = "+" if self.compound >= 0 else "" + return ( + f"SentimentResult(label={self.label!r}, " + f"score={self.score:.3f}, compound={sign}{self.compound:.3f})" + ) + + +# ── Tuning constants ────────────────────────────────────────────────────── + +NEUTRAL_THRESHOLD = 0.80 # DistilBERT confidence below this -> "neutral" +NEGATIVE_TRIGGER = -0.55 # compound must be this negative to trigger empathy +MIN_WORDS_FOR_MODEL = 6 # messages shorter than this skip the model + +MODEL_NAME = "distilbert-base-uncased-finetuned-sst-2-english" + + +# ── Model state flags (module-level) ───────────────────────────────────── +# These are checked by app.py health endpoint and _get_pipeline() + +_pipeline = None # the loaded HuggingFace pipeline object +_model_loaded = False # True once model confirmed ready +_model_failed = False # True if transformers not installed or load failed + + +# ── Guard 1: Question / informational intent detection ──────────────────── +# Catches "what defects are present", "show me results", "how many cracks" etc. + +_QUESTION_STARTERS = re.compile( + r"^(what|which|where|when|who|how|why|is|are|was|were|do|does|did|can|could|" + r"should|would|will|tell me|show me|list|give me|explain|describe|find|get|" + r"display|print|calculate|analyse|analyze|check|detect|identify|summarize|" + r"summarise|compare|help me|can you|could you|please)['\s,]", + re.IGNORECASE, +) + +_ENDS_WITH_QUESTION = re.compile(r"\?\s*$") + +_FACTUAL_PHRASES = re.compile( + r"\b(defect|crack|panel|image|result|detect|analysis|carbon|co2|severity|" + r"confidence|area|bounding|bbox|upload|scan|percentage|present|found|show|" + r"list|how many|what type|which|status|report|output|data|file|model|" + r"infrared|thermal|el image|star.crack|black.core|finger|thick.line)\b", + re.IGNORECASE, +) + + +def _is_informational_question(text: str) -> bool: + """ + Return True if the message is a plain informational question or command. + These are ALWAYS forced to neutral regardless of DistilBERT's output. + """ + stripped = text.strip() + word_count = len(stripped.split()) + if _QUESTION_STARTERS.match(stripped): + return True + if _ENDS_WITH_QUESTION.search(stripped): + return True + if word_count < MIN_WORDS_FOR_MODEL and _FACTUAL_PHRASES.search(stripped): + return True + return False + + +# ── Guard 3: Genuine emotion keyword requirement ────────────────────────── + +_EMOTION_KEYWORDS = re.compile( + r"\b(frustrated|frustrating|frustration|angry|upset|worried|worry|anxious|" + r"stressed|stress|overwhelmed|confused|confusing|lost|helpless|terrible|" + r"horrible|awful|worst|useless|wrong|disappointed|disappointing|" + r"annoyed|annoying|unhappy|failed|failure|crash|stuck|" + r"don.?t understand|can.?t figure|cannot figure|no idea|nothing works|" + r"doesn.?t work|not working|not helpful|still broken|" + r"give up|hopeless|disaster|struggling|struggle|difficult|hard time|" + r"scared|afraid|fear|hurt|pain|sad|miserable|distressed|not sure|" + r"uncertain|why isn.?t|why is this|this is wrong|this is bad)\b", + re.IGNORECASE, +) + + +def _has_emotion_signal(text: str) -> bool: + """Return True if text contains at least one genuine emotional keyword.""" + return bool(_EMOTION_KEYWORDS.search(text)) + + +# ── Eager warmup (call this from app.py at startup) ─────────────────────── + +def warmup_sentiment_model() -> bool: + """ + Eagerly load and warm up the DistilBERT model. + + Call this ONCE at application startup inside create_app() in app.py. + Blocks until the model is downloaded (~67 MB, first run only) and loaded. + + This prevents the teammate problem: without eager loading, the lazy-load + design means the first N requests all arrive before the model is ready + and silently fall back to rule-based sentiment instead of DistilBERT. + + Returns + ------- + bool — True if DistilBERT loaded successfully, + False if falling back to rule-based (transformers not installed). + """ + global _pipeline, _model_loaded, _model_failed + + if _model_loaded: + print("[Sentiment] Model already loaded, skipping warmup.") + return True + if _model_failed: + print("[Sentiment] Model previously failed to load, skipping warmup.") + return False + + print("[Sentiment] ── Warming up DistilBERT sentiment model ──────────") + print(f"[Sentiment] Model : {MODEL_NAME}") + print("[Sentiment] Note : First run downloads ~67 MB (cached after that)...") + + try: + from transformers import pipeline as hf_pipeline + + _pipeline = hf_pipeline( + task="sentiment-analysis", + model=MODEL_NAME, + ) + + # Warm-up inference pass — compiles the model graph so the first + # real user request isn't slower than subsequent ones + _ = _pipeline("Solar panel inspection warm-up pass.") + + _model_loaded = True + print("[Sentiment] ✓ DistilBERT loaded and ready.") + return True + + except ImportError: + _model_failed = True + print( + "[Sentiment] ✗ 'transformers' package not found.\n" + " Fix: pip install transformers torch\n" + " Falling back to rule-based sentiment analysis." + ) + return False + + except Exception as e: + _model_failed = True + print( + f"[Sentiment] ✗ Model load failed: {e}\n" + f" Falling back to rule-based sentiment analysis." + ) + return False + + +def _get_pipeline(): + """ + Return the loaded pipeline. + + Normally _pipeline is already set by warmup_sentiment_model() at startup. + This function is a safety net in case warmup was somehow skipped — + it triggers a lazy load rather than crashing. But if app.py is correct, + this path should never be needed. + """ + global _model_loaded, _model_failed + + if _model_loaded: + return _pipeline + if _model_failed: + return None + + # Safety net: warmup wasn't called at startup — trigger it now + print( + "[Sentiment] WARNING: warmup_sentiment_model() was not called at startup.\n" + " Add it to create_app() in app.py for reliable behaviour.\n" + " Triggering lazy load now..." + ) + warmup_sentiment_model() + return _pipeline + + +# ── Rule-based fallback ─────────────────────────────────────────────────── +# Used ONLY when transformers is not installed or model load failed. + +_NEG_WORDS = { + "terrible", "horrible", "awful", "worst", "useless", "frustrated", + "frustrating", "angry", "upset", "confused", "error", "failed", + "failure", "wrong", "disappointed", "annoyed", "problem", "crash", + "stuck", "worried", "anxious", "stressed", "overwhelmed", "helpless", + "don't understand", "cant figure", "nothing works", "not working", + "not helpful", +} + +_POS_WORDS = { + "great", "good", "excellent", "perfect", "awesome", "wonderful", + "thank", "thanks", "helpful", "clear", "understood", "working", + "fixed", "solved", "resolved", "appreciate", "love", "amazing", + "nice", "correct", "right", "easy", "simple", +} + +_INTENSIFIERS = {"very", "really", "extremely", "so", "quite", "absolutely"} + + +def _rule_based_sentiment(text: str) -> SentimentResult: + words = re.findall(r"\b\w+\b", text.lower()) + bigrams = [f"{words[i]} {words[i+1]}" for i in range(len(words) - 1)] + + neg_hits = sum(1.5 if words[max(0, i-1)] in _INTENSIFIERS else 1.0 + for i, w in enumerate(words) if w in _NEG_WORDS) + pos_hits = sum(1.5 if words[max(0, i-1)] in _INTENSIFIERS else 1.0 + for i, w in enumerate(words) if w in _POS_WORDS) + neg_hits += sum(1.0 for bg in bigrams if bg in _NEG_WORDS) + pos_hits += sum(1.0 for bg in bigrams if bg in _POS_WORDS) + + total = neg_hits + pos_hits + 1e-9 + compound = (pos_hits - neg_hits) / max(total, 1) + compound = max(-1.0, min(1.0, compound)) + + if compound < -0.15: + label = "negative" + score = min(0.5 + abs(compound) * 0.5, 0.99) + elif compound > 0.15: + label = "positive" + score = min(0.5 + compound * 0.5, 0.99) + else: + label = "neutral" + score = 0.60 + + is_neg = (compound <= NEGATIVE_TRIGGER) and _has_emotion_signal(text) + return SentimentResult(label=label, score=score, is_negative=is_neg, compound=compound) + + +# ── Public API ──────────────────────────────────────────────────────────── + +def analyze_sentiment(text: str) -> SentimentResult: + """ + Analyze the sentiment of a user message using DistilBERT. + + Parameters + ---------- + text : str — raw user message string + + Returns + ------- + SentimentResult — label, score, is_negative, compound + """ + if not text or not text.strip(): + return SentimentResult(label="neutral", score=1.0, is_negative=False, compound=0.0) + + # Guard 1: plain informational questions -> always neutral + if _is_informational_question(text): + print("[Sentiment] Informational question -> forced neutral") + return SentimentResult(label="neutral", score=0.95, is_negative=False, compound=0.0) + + # Guard 2: too short for reliable DistilBERT inference + word_count = len(text.split()) + if word_count < MIN_WORDS_FOR_MODEL: + print(f"[Sentiment] Short message ({word_count} words) -> forced neutral") + return SentimentResult(label="neutral", score=0.90, is_negative=False, compound=0.0) + + # Truncate to 512 words for DistilBERT context window + # NOTE: this only affects the sentiment INPUT — has zero effect on chatbot output length + truncated = " ".join(text.split()[:512]) + pipe = _get_pipeline() + + if pipe is not None: + try: + result = pipe(truncated)[0] + raw_label = result["label"].lower() # "positive" or "negative" + raw_score = float(result["score"]) # 0.0–1.0 + + compound = raw_score if raw_label == "positive" else -raw_score + + # Carve out neutral zone for low-confidence predictions + if raw_score < NEUTRAL_THRESHOLD: + label = "neutral" + compound = compound * (raw_score / NEUTRAL_THRESHOLD) + else: + label = raw_label + + # Guard 3: require real emotion word for negative trigger + is_neg = ( + label == "negative" + and compound <= NEGATIVE_TRIGGER + and _has_emotion_signal(text) + ) + + return SentimentResult( + label=label, + score=raw_score, + is_negative=is_neg, + compound=round(compound, 4), + ) + + except Exception as e: + print(f"[Sentiment] DistilBERT inference error: {e}. Using fallback.") + + return _rule_based_sentiment(text) + + +def analyze_conversation_sentiment(history: list, window: int = 3) -> SentimentResult: + """ + Aggregate sentiment across the last `window` user messages. + More recent messages are weighted more heavily (recency weighting). + + Parameters + ---------- + history : list[dict] — each dict has keys "role" and "content" + window : int — how many recent user messages to consider + + Returns + ------- + SentimentResult — weighted aggregate result + """ + user_msgs = [ + h["content"] for h in history + if h.get("role") == "user" and h.get("content", "").strip() + ][-window:] + + if not user_msgs: + return SentimentResult(label="neutral", score=1.0, is_negative=False, compound=0.0) + + weights = list(range(1, len(user_msgs) + 1)) + results = [analyze_sentiment(msg) for msg in user_msgs] + total_w = sum(weights) + + compound = sum(r.compound * w for r, w in zip(results, weights)) / total_w + compound = round(max(-1.0, min(1.0, compound)), 4) + + label_scores: dict = {"positive": 0.0, "neutral": 0.0, "negative": 0.0} + for r, w in zip(results, weights): + label_scores[r.label] += w + label = max(label_scores, key=label_scores.get) + + score = sum(r.score * w for r, w in zip(results, weights)) / total_w + + # Sustained negative also requires at least one genuine emotional message + any_emotion = any(_has_emotion_signal(m) for m in user_msgs) + is_neg = (compound <= NEGATIVE_TRIGGER) and any_emotion + + return SentimentResult( + label=label, + score=round(score, 4), + is_negative=is_neg, + compound=compound, + ) \ No newline at end of file diff --git a/modules/chatbot/vector_store.py b/modules/chatbot/vector_store.py index d71e3a7..691b3a9 100644 --- a/modules/chatbot/vector_store.py +++ b/modules/chatbot/vector_store.py @@ -1,18 +1,51 @@ """modules/chatbot/vector_store.py -ChromaDB vector store with sentence-transformers embeddings (fully local). +ChromaDB vector store — offline-safe version. + +Sets TRANSFORMERS_OFFLINE=1 before any model loading so sentence-transformers +does not attempt network calls when the model is already cached locally. + +Run once WITH internet (hotspot) to cache the model: + python -c "from sentence_transformers import SentenceTransformer; SentenceTransformer('all-MiniLM-L6-v2')" + +Then disconnect — all subsequent runs work fully offline. """ import os import pandas as pd + +# ── Force offline mode BEFORE importing chromadb / sentence-transformers ── +# Prevents DNS lookups to huggingface.co on restricted/no-internet networks. +os.environ.setdefault("TRANSFORMERS_OFFLINE", "1") +os.environ.setdefault("HF_DATASETS_OFFLINE", "1") +# ───────────────────────────────────────────────────────────────────────── + import chromadb from chromadb.utils.embedding_functions import SentenceTransformerEmbeddingFunction from config import CHROMA_FOLDER, DETECTIONS_CSV, CARBON_CSV, DOCS_FOLDER -COLLECTION_NAME = "solar_pv_knowledge" -EMBEDDING_MODEL = "all-MiniLM-L6-v2" # ~80 MB, downloads once +COLLECTION_NAME = "solar_pv_knowledge" +EMBEDDING_MODEL = "all-MiniLM-L6-v2" +EMPATHY_DOC_NAME = "empathetic_support_guide.pdf" _client = None _collection = None +# ── Severity + action mapping ───────────────────────────────────────────── +DEFECT_SEVERITY = { + "black_core": "Critical", + "crack": "High", + "star_crack": "High", + "finger": "Medium", + "thick_line": "Low", +} + +DEFECT_ACTIONS = { + "black_core": "Immediate panel replacement recommended. Black core indicates severe cell failure.", + "crack": "Schedule urgent inspection. Cracks propagate and cause significant power loss.", + "star_crack": "Schedule urgent inspection. Star cracks indicate mechanical stress damage.", + "finger": "Monitor closely. Finger defects affect current collection efficiency.", + "thick_line": "Schedule routine maintenance. Thick lines may indicate metallization issues.", +} + def _get_client(): global _client @@ -29,63 +62,253 @@ def get_collection(): _collection = _get_client().get_or_create_collection( name=COLLECTION_NAME, embedding_function=ef, + metadata={"hnsw:space": "cosine"}, ) return _collection -def query_collection(query: str, n_results: int = 5) -> list: +def query_collection(query: str, n_results: int = 6) -> list: + """Cosine similarity search — returns top-n matching document strings.""" try: - results = get_collection().query(query_texts=[query], n_results=n_results) + col = get_collection() + count = col.count() + n = min(n_results, count) if count > 0 else 0 + if n == 0: + return [] + results = col.query( + query_texts=[query], + n_results=n, + include=["documents", "metadatas", "distances"], + ) return results.get("documents", [[]])[0] except Exception as e: print(f"[VectorStore] Query error: {e}") return [] +def query_collection_with_filter(query: str, where: dict, n_results: int = 10) -> list: + """Cosine similarity search filtered by metadata.""" + try: + col = get_collection() + count = col.count() + n = min(n_results, count) if count > 0 else 0 + if n == 0: + return [] + results = col.query( + query_texts=[query], + n_results=n, + where=where, + include=["documents", "metadatas", "distances"], + ) + return results.get("documents", [[]])[0] + except Exception as e: + print(f"[VectorStore] Filtered query error: {e}") + return [] + + +# ── Document ingestion ──────────────────────────────────────────────────── + def ingest_docs(): + """ + Ingest PDF documents from DOCS_FOLDER into ChromaDB. + empathetic_support_guide.pdf is tagged type='empathy_support'. + All other PDFs are tagged type='document'. + """ from pypdf import PdfReader - col = get_collection() + col = get_collection() count = 0 + if not os.path.isdir(DOCS_FOLDER): + print(f"[RAG] DOCS_FOLDER not found: {DOCS_FOLDER}") + return count + for fname in os.listdir(DOCS_FOLDER): if not fname.lower().endswith(".pdf"): continue + fpath = os.path.join(DOCS_FOLDER, fname) + doc_type = "empathy_support" if fname.lower() == EMPATHY_DOC_NAME.lower() else "document" + try: - reader = PdfReader(os.path.join(DOCS_FOLDER, fname)) - ids, texts = [], [] + reader = PdfReader(fpath) + ids, texts, metas = [], [], [] for i, page in enumerate(reader.pages): text = (page.extract_text() or "").strip() for j in range(0, len(text), 800): chunk = text[j:j + 800].strip() - if chunk: + if len(chunk) > 50: ids.append(f"{fname}_p{i}_c{j}") texts.append(chunk) + metas.append({ + "source": fname, + "page": i, + "type": doc_type, + "image_filename": "", + }) if ids: - col.upsert(ids=ids, documents=texts, - metadatas=[{"source": fname}] * len(ids)) + col.upsert(ids=ids, documents=texts, metadatas=metas) count += len(ids) - print(f"[RAG] {fname}: {len(ids)} chunks ingested") + print(f"[RAG] {fname}: {len(ids)} chunks ingested (type='{doc_type}')") except Exception as e: print(f"[RAG] Error ingesting {fname}: {e}") return count +# ── CSV formatters ──────────────────────────────────────────────────────── + +def _format_detection_row(row: pd.Series) -> str: + defect = str(row.get("defect_class", "unknown")) + severity = DEFECT_SEVERITY.get(defect, "Unknown") + action = DEFECT_ACTIONS.get(defect, "Consult a technician.") + parts = [] + if "timestamp" in row.index: parts.append(f"Date: {row['timestamp']}") + if "image_filename" in row.index: parts.append(f"Image: {row['image_filename']}") + parts.append(f"Defect detected: {defect}") + parts.append(f"Severity: {severity}") + if "confidence" in row.index: parts.append(f"Confidence: {float(row['confidence']):.2%}") + if "area_ratio" in row.index: parts.append(f"Defect area: {float(row['area_ratio'])*100:.2f}% of panel") + if "bbox_x1" in row.index: + parts.append(f"Bounding box: ({row['bbox_x1']},{row['bbox_y1']}) -> ({row['bbox_x2']},{row['bbox_y2']})") + if "image_width" in row.index and "image_height" in row.index: + parts.append(f"Image size: {row['image_width']}x{row['image_height']}px") + parts.append(f"Recommended action: {action}") + return " | ".join(parts) + + +def _format_carbon_row(row: pd.Series) -> str: + parts = [] + if "timestamp" in row.index: parts.append(f"Date: {row['timestamp']}") + if "image_filename" in row.index: parts.append(f"Image: {row['image_filename']}") + if "city" in row.index: parts.append(f"City: {row['city']}") + if "panel_power_w" in row.index: parts.append(f"Panel power: {row['panel_power_w']}W") + if "ambient_temp_c" in row.index: parts.append(f"Ambient temperature: {row['ambient_temp_c']}C") + if "irradiance_w_m2" in row.index: parts.append(f"Irradiance: {row['irradiance_w_m2']} W/m2") + if "emission_factor" in row.index: parts.append(f"Emission factor: {row['emission_factor']}") + if "num_defects" in row.index: parts.append(f"Number of defects: {row['num_defects']}") + if "dominant_defect" in row.index: + dom = str(row['dominant_defect']) + sev = DEFECT_SEVERITY.get(dom, "Unknown") + parts.append(f"Dominant defect: {dom} (Severity: {sev})") + if "total_degradation_pct" in row.index: parts.append(f"Total degradation: {row['total_degradation_pct']}%") + if "co2_kg_per_year" in row.index: parts.append(f"CO2 emission per year: {row['co2_kg_per_year']} kg") + if not parts: + parts = [f"{k}: {v}" for k, v in row.items() if pd.notna(v)] + return " | ".join(parts) + + def ingest_csvs(): + """Ingest detection and carbon CSV rows into ChromaDB.""" col = get_collection() total = 0 - for csv_path, prefix in [(DETECTIONS_CSV, "det"), (CARBON_CSV, "carbon")]: + csv_configs = [ + (DETECTIONS_CSV, "det", "detection", _format_detection_row), + (CARBON_CSV, "carbon", "carbon", _format_carbon_row), + ] + for csv_path, prefix, doc_type, formatter in csv_configs: if not os.path.isfile(csv_path): + print(f"[RAG] CSV not found, skipping: {csv_path}") continue try: df = pd.read_csv(csv_path) ids, texts, metas = [], [], [] for i, row in df.iterrows(): + text = formatter(row) + if not text.strip(): + continue + img_fname = str(row.get("image_filename", "")) ids.append(f"{prefix}_row_{i}") - texts.append(" | ".join(f"{k}={v}" for k, v in row.items())) - metas.append({"source": prefix}) + texts.append(text) + metas.append({ + "source": os.path.basename(csv_path), + "type": doc_type, + "row": int(i), + "image_filename": img_fname, + }) if ids: - col.upsert(ids=ids, documents=texts, metadatas=metas) + for bs in range(0, len(ids), 200): + col.upsert( + ids=ids[bs:bs+200], + documents=texts[bs:bs+200], + metadatas=metas[bs:bs+200], + ) total += len(ids) print(f"[RAG] {os.path.basename(csv_path)}: {len(ids)} rows ingested") except Exception as e: print(f"[RAG] CSV error ({csv_path}): {e}") - return total \ No newline at end of file + return total + + +# ── Live per-image ingestion ────────────────────────────────────────────── + +def ingest_image_result(image_filename: str, detections: list, carbon_data: dict = None): + """Ingest a single image's detection + carbon results into ChromaDB.""" + col = get_collection() + ids, texts, metas = [], [], [] + + for i, det in enumerate(detections): + defect = str(det.get("defect_class", "unknown")) + severity = DEFECT_SEVERITY.get(defect, "Unknown") + action = DEFECT_ACTIONS.get(defect, "Consult a technician.") + area_pct = float(det.get("area_ratio", 0)) * 100 + conf = float(det.get("confidence", 0)) + text = ( + f"Image: {image_filename} | Defect detected: {defect} | Severity: {severity} | " + f"Confidence: {conf:.2%} | Defect area: {area_pct:.2f}% of panel | " + f"Bounding box: ({det.get('bbox_x1')},{det.get('bbox_y1')}) -> " + f"({det.get('bbox_x2')},{det.get('bbox_y2')}) | " + f"Image size: {det.get('image_width')}x{det.get('image_height')}px | " + f"Recommended action: {action}" + ) + ids.append(f"img_{image_filename}_det_{i}") + texts.append(text) + metas.append({"source": "live_detection", "type": "detection", "image_filename": image_filename}) + + if detections: + defect_counts: dict = {} + for d in detections: + k = d.get("defect_class", "unknown") + defect_counts[k] = defect_counts.get(k, 0) + 1 + total_area = sum(float(d.get("area_ratio", 0)) * 100 for d in detections) + severities = [DEFECT_SEVERITY.get(d.get("defect_class", ""), "Low") for d in detections] + top_sev = ( + "Critical" if "Critical" in severities else + "High" if "High" in severities else + "Medium" if "Medium" in severities else "Low" + ) + breakdown = ", ".join(f"{k} x{v}" for k, v in defect_counts.items()) + summary = ( + f"Image: {image_filename} | Total defects found: {len(detections)} | " + f"Defect breakdown: {breakdown} | Total defect area coverage: {total_area:.2f}% | " + f"Overall severity: {top_sev}" + ) + ids.append(f"img_{image_filename}_summary") + texts.append(summary) + metas.append({"source": "live_detection", "type": "detection_summary", "image_filename": image_filename}) + + if carbon_data: + dom = str(carbon_data.get("dominant_defect", "unknown")) + sev = DEFECT_SEVERITY.get(dom, "Unknown") + c_text = ( + f"Image: {image_filename} | City: {carbon_data.get('city', 'N/A')} | " + f"Panel power: {carbon_data.get('panel_power_w', 'N/A')}W | " + f"Ambient temperature: {carbon_data.get('ambient_temp_c', 'N/A')}C | " + f"Irradiance: {carbon_data.get('irradiance_w_m2', 'N/A')} W/m2 | " + f"Number of defects: {carbon_data.get('num_defects', 'N/A')} | " + f"Dominant defect: {dom} (Severity: {sev}) | " + f"Total degradation: {carbon_data.get('total_degradation_pct', 'N/A')}% | " + f"CO2 emission per year: {carbon_data.get('co2_kg_per_year', 'N/A')} kg" + ) + ids.append(f"img_{image_filename}_carbon") + texts.append(c_text) + metas.append({"source": "live_carbon", "type": "carbon", "image_filename": image_filename}) + + if ids: + col.upsert(ids=ids, documents=texts, metadatas=metas) + print(f"[RAG] Ingested {len(ids)} chunks for image: {image_filename}") + + +def get_stats() -> dict: + """Return collection stats.""" + try: + col = get_collection() + return {"total_documents": col.count(), "collection": COLLECTION_NAME} + except Exception as e: + return {"error": str(e)} \ No newline at end of file diff --git a/modules/detection/detection_utils.py b/modules/detection/detection_utils.py index 004d3ff..72550f1 100644 --- a/modules/detection/detection_utils.py +++ b/modules/detection/detection_utils.py @@ -19,9 +19,13 @@ def draw_boxes(image_path: str, detections: list, out_path: str) -> str: except Exception: font = ImageFont.load_default() + # Blackish Brown Bounding Box + BB_COLOR = "#3D2B1F" + for det in detections: x1, y1, x2, y2 = det["bbox"] - color = COLOR_MAP.get(det["class_name"], "#00ff00") + # Use specific color map if available, else default to blackish brown + color = COLOR_MAP.get(det["class_name"], BB_COLOR) for i in range(3): draw.rectangle([x1 - i, y1 - i, x2 + i, y2 + i], outline=color) label = f"{det['class_name']} {det['confidence']:.0%}" diff --git a/modules/detection_engine/engine.py b/modules/detection_engine/engine.py new file mode 100644 index 0000000..6e310aa --- /dev/null +++ b/modules/detection_engine/engine.py @@ -0,0 +1,809 @@ +from __future__ import annotations + +import json, logging, math, time, uuid, warnings +from dataclasses import dataclass +from datetime import datetime, timezone +from typing import Any, Dict, List, Tuple + +import numpy as np +from PIL import Image, ImageDraw, ImageFont +from scipy import stats +from scipy.optimize import Bounds, LinearConstraint, milp +from scipy.special import expit + +try: + from modules.detection.detection_model import run_detection # type: ignore +except ImportError: + warnings.warn("Detection module not found — mock active.", stacklevel=2) + + def run_detection(image_path: str) -> dict: # noqa: F811 + rng = np.random.default_rng(seed=abs(hash(image_path)) % (2 ** 31)) + H, W = 600, 800 + pool = list(DEFECT_CATALOG.keys()) + n = int(rng.integers(2, 7)) + dets = [] + for _ in range(n): + cls = str(rng.choice(pool)) + x1 = int(rng.uniform(0, W * 0.65)) + y1 = int(rng.uniform(0, H * 0.65)) + x2 = int(rng.uniform(x1 + 30, min(W, x1 + W * 0.45))) + y2 = int(rng.uniform(y1 + 30, min(H, y1 + H * 0.45))) + conf = float(rng.beta(9, 2)) + dets.append({"class_name": cls, "confidence": round(conf, 4), + "bbox": [x1, y1, x2, y2]}) + return {"detections": dets, "image_shape": (H, W), "count": len(dets)} + + +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s | %(levelname)-8s | PVEngine | %(message)s", + datefmt="%Y-%m-%dT%H:%M:%S", +) +log = logging.getLogger("PVDecisionEngine") + + +# ── §1 DOMAIN CONSTANTS ────────────────────────────────────────────────────── + +DEFECT_CATALOG: Dict[str, Dict] = { + "fingers": { + "weight": 0.55, + "el_contrast": 0.60, + "string_impact": False, + "power_loss_frac": 0.12, + "iec_severity": 2, + "color": "#f1c40f", + "description": "Finger interruption — series resistance rise, Isc loss", + "el_signature": "dark linear bands along contact fingers", + }, + "star_crack": { + "weight": 0.80, + "el_contrast": 0.75, + "string_impact": False, + "power_loss_frac": 0.25, + "iec_severity": 3, + "color": "#9b59b6", + "description": "Star crack — radial fracture, severe current-path disruption", + "el_signature": "radial dark lines from mechanical impact point", + }, + "black_core": { + "weight": 0.92, + "el_contrast": 0.95, + "string_impact": True, + "power_loss_frac": 0.35, + "iec_severity": 3, + "color": "#e74c3c", + "description": "Black core — fully inactive cell, reverse-bias, string risk", + "el_signature": "uniform dark cell region in EL image", + }, + "crack": { + "weight": 0.65, + "el_contrast": 0.68, + "string_impact": False, + "power_loss_frac": 0.18, + "iec_severity": 2, + "color": "#e67e22", + "description": "Transverse crack — partial conductivity loss", + "el_signature": "dark line across cell body", + }, + "thick_line": { + "weight": 0.40, + "el_contrast": 0.42, + "string_impact": False, + "power_loss_frac": 0.08, + "iec_severity": 1, + "color": "#1abc9c", + "description": "Thick line — bus-bar misfire, minor shading loss", + "el_signature": "bright/dark thick band along bus-bar", + }, +} + +PHYSICS = { + "weibull_shape_beta": 2.2, + "weibull_scale_eta_years": 27.0, +} + +FINANCE = { + "panel_rated_power_wp": 400, + "capacity_factor": 0.165, + "electricity_price_usd_kwh": 0.12, + "discount_rate": 0.07, + "analysis_horizon_years": 10, + "grid_emission_factor_kg_kwh": 0.417, + "carbon_price_usd_per_tonne": 65.0, + "panel_replacement_usd": 320.0, + "panel_repair_usd": 110.0, + "panel_cleaning_usd": 18.0, +} + +PSI_TIERS = [ + (0.00, 0.05, "NEGLIGIBLE", 1), + (0.05, 0.15, "LOW", 1), + (0.15, 0.30, "MODERATE", 2), + (0.30, 0.50, "HIGH", 2), + (0.50, 1.01, "CRITICAL", 3), +] + +URGENCY_MAP = { + "NEGLIGIBLE": ("SCHEDULE_ROUTINE", "Re-inspect at next O&M cycle (<=12 months)"), + "LOW": ("SCHEDULE_ROUTINE", "Flag for next maintenance visit"), + "MODERATE": ("PLAN_WITHIN_30D", "Plan corrective action within 30 days"), + "HIGH": ("EXPEDITE_7D", "Expedite maintenance within 7 days"), + "CRITICAL": ("IMMEDIATE_ACTION", "Isolate string; dispatch technician within 24 hours"), +} + +MCMC_SAMPLES = 12_000 +MCMC_BURNIN = 2_000 + + +# ── §2 DATA STRUCTURES ─────────────────────────────────────────────────────── + +@dataclass +class DefectInstance: + class_name: str + confidence: float + bbox: List[int] + area_ratio: float + el_contrast_score: float = 0.0 + omega: float = 0.0 + instance_severity: float = 0.0 + +@dataclass +class PSIResult: + psi: float + tier: str + iec_class: int + dominant_defect: str + max_instance_severity: float + critical_flag: bool + string_risk_flag: bool + defect_summary: Dict[str, int] + +@dataclass +class BayesianRiskResult: + posterior_mean: float + posterior_std: float + credible_interval_95: Tuple[float, float] + prior_alpha: float + prior_beta: float + posterior_alpha: float + posterior_beta: float + mcmc_ess: float + +@dataclass +class WeibullRULResult: + rul_years_p50: float + rul_years_p10: float + rul_years_p90: float + hazard_rate: float + el_acceleration_factor: float + virtual_age_years: float + +@dataclass +class FinancialResult: + annual_energy_loss_kwh: float + annual_revenue_loss_usd: float + npv_loss_usd: float + annual_carbon_loss_kg: float + horizon_carbon_loss_tonnes: float + carbon_cost_usd: float + +@dataclass +class MILPResult: + recommended_action: str + action_binary_vector: Dict[str, int] + optimal_cost: float + solver_status: str + action_costs: Dict[str, float] + action_benefits: Dict[str, float] + net_values: Dict[str, float] + + +# ── §3 DETECTION UTILS (from detection_utils.py) ──────────────────────────── + +def draw_boxes(image_path: str, detections: list, out_path: str) -> str: + img = Image.open(image_path).convert("RGB") + draw = ImageDraw.Draw(img) + try: + font = ImageFont.truetype("arial.ttf", max(14, img.width // 50)) + except Exception: + font = ImageFont.load_default() + for det in detections: + x1, y1, x2, y2 = det["bbox"] + color = DEFECT_CATALOG.get(det["class_name"], {}).get("color", "#00ff00") + for i in range(3): + draw.rectangle([x1 - i, y1 - i, x2 + i, y2 + i], outline=color) + label = f"{det['class_name']} {det['confidence']:.0%}" + try: + bb = draw.textbbox((x1, y1 - 20), label, font=font) + draw.rectangle([bb[0] - 2, bb[1] - 2, bb[2] + 2, bb[3] + 2], fill=color) + draw.text((x1, y1 - 20), label, fill="white", font=font) + except Exception: + draw.text((x1, max(0, y1 - 20)), label, fill=color, font=font) + img.save(out_path) + return out_path + + +def compute_area_ratios(detections: list, img_w: int, img_h: int) -> list: + panel_area = img_w * img_h if img_w * img_h > 0 else 1 + for det in detections: + x1, y1, x2, y2 = det["bbox"] + det["area_ratio"] = round((x2 - x1) * (y2 - y1) / panel_area, 6) + return detections + + +# ── §4 LAYER 0 — EL PIXEL CONTRAST SCORING ────────────────────────────────── +# +# EL images encode minority-carrier lifetime as luminescence intensity. +# Defective regions emit less light and appear darker than healthy cells. +# +# For each bounding box: +# raw_contrast = clip((panel_mean - patch_mean) / panel_mean, 0, 1) +# el_score_i = el_contrast_i * raw_contrast +# +# el_contrast_i is the class-specific EL sensitivity from DEFECT_CATALOG. +# black_core has el_contrast=0.95 (fully dark); thick_line has 0.42 (subtle). +# Image is converted to L (luminance) channel regardless of input mode. +# Fallback: if image unreadable, el_score = 0.5 * el_contrast (conservative). + +def compute_el_contrast_scores( + image_path: str, + defects: List[DefectInstance], +) -> List[DefectInstance]: + try: + arr = np.asarray(Image.open(image_path).convert("L"), dtype=float) + p_mean = arr.mean() if arr.mean() > 0 else 1.0 + for d in defects: + x1, y1, x2, y2 = d.bbox + patch = arr[max(0, y1):max(1, y2), max(0, x1):max(1, x2)] + raw = float(np.clip((p_mean - patch.mean()) / p_mean, 0.0, 1.0)) if patch.size > 0 else 0.5 + alpha = DEFECT_CATALOG.get(d.class_name, {}).get("el_contrast", 0.5) + d.el_contrast_score = alpha * raw + except Exception: + for d in defects: + alpha = DEFECT_CATALOG.get(d.class_name, {}).get("el_contrast", 0.5) + d.el_contrast_score = alpha * 0.5 + return defects + + +# ── §5 UTILITY FUNCTIONS ───────────────────────────────────────────────────── + +def _iou(b1: List[int], b2: List[int]) -> float: + xi1 = max(b1[0], b2[0]); yi1 = max(b1[1], b2[1]) + xi2 = min(b1[2], b2[2]); yi2 = min(b1[3], b2[3]) + inter = max(0, xi2 - xi1) * max(0, yi2 - yi1) + a1 = max(1, (b1[2] - b1[0]) * (b1[3] - b1[1])) + a2 = max(1, (b2[2] - b2[0]) * (b2[3] - b2[1])) + return inter / (a1 + a2 - inter) + +def _overlap_penalty(defects: List[DefectInstance]) -> np.ndarray: + n = len(defects) + rho = np.ones(n) + for i in range(n): + s = sum(_iou(defects[i].bbox, defects[j].bbox) for j in range(n) if j != i) + rho[i] = 1.0 / (1.0 + s) + return rho + +def _psi_tier(psi: float) -> Tuple[str, int]: + for lo, hi, label, cls in PSI_TIERS: + if lo <= psi < hi: + return label, cls + return "CRITICAL", 3 + +def _npv_factor(rate: float, years: int) -> float: + return sum(1.0 / (1.0 + rate) ** t for t in range(1, years + 1)) + +def _kish_ess(weights: np.ndarray) -> float: + w = weights / weights.sum() + return float(1.0 / np.sum(w ** 2)) + +def _safe_logit(x: Any) -> Any: + return np.log(np.clip(x, 1e-6, 1 - 1e-6) / (1.0 - np.clip(x, 1e-6, 1 - 1e-6))) + + +# ── §6 LAYER 1 — PANEL SEVERITY INDEX (PSI) ───────────────────────────────── +# +# EL-adapted Chebyshev–L2 Hybrid Norm (IEC 62446-3) +# +# Per instance i: +# raw_i = area_ratio_i * omega_i * confidence_i * (1 + el_score_i) * rho_i +# +# omega_i = IEC class weight +# el_score_i = Layer 0 EL pixel-darkness contrast score +# rho_i = 1 / (1 + sum_j IoU(i,j)) [NMS-aware overlap suppression] +# +# PSI = clip( 0.6 * max(raw) + 0.4 * rms(raw), 0, 1 ) +# 0.6 * max -> Chebyshev: worst-case single defect (dominant failure mode) +# 0.4 * rms -> L2: distributed EL damage accumulation across all instances + +def compute_psi(defects: List[DefectInstance]) -> PSIResult: + if not defects: + return PSIResult(0.0, "NEGLIGIBLE", 1, "none", 0.0, False, False, {}) + + rho = _overlap_penalty(defects) + raws: List[float] = [] + summary: Dict[str, int] = {} + critical_flag = False + string_flag = False + dominant_cls = "" + max_sev = 0.0 + + for i, d in enumerate(defects): + cat = DEFECT_CATALOG.get(d.class_name, DEFECT_CATALOG["crack"]) + omega = cat["weight"] + raw = d.area_ratio * omega * d.confidence * (1.0 + d.el_contrast_score) * rho[i] + + d.omega = omega + d.instance_severity = raw + raws.append(raw) + + summary[d.class_name] = summary.get(d.class_name, 0) + 1 + if cat["iec_severity"] == 3: + critical_flag = True + if cat["string_impact"]: + string_flag = True + if raw > max_sev: + max_sev = raw + dominant_cls = d.class_name + + arr = np.asarray(raws, dtype=float) + psi = float(np.clip(0.6 * arr.max() + 0.4 * np.sqrt(np.mean(arr ** 2)), 0.0, 1.0)) + tier, iec_cls = _psi_tier(psi) + + return PSIResult( + psi=round(psi, 6), tier=tier, iec_class=iec_cls, + dominant_defect=dominant_cls, max_instance_severity=round(max_sev, 6), + critical_flag=critical_flag, string_risk_flag=string_flag, + defect_summary=summary, + ) + + +# ── §7 LAYER 2 — HIERARCHICAL BAYESIAN RISK (Beta-Binomial + MH-MCMC) ─────── +# +# Prior elicitation from IEC 62446-3 tier statistics: +# theta ~ Beta(alpha0, beta0) +# NEGLIGIBLE -> Beta(1.0, 9.0) LOW -> Beta(1.5, 7.5) +# MODERATE -> Beta(2.0, 6.0) HIGH -> Beta(4.0, 4.0) +# CRITICAL -> Beta(7.0, 2.0) +# +# Virtual likelihood (pseudo-count conjugate update): +# n_eff = 20 +# k_eff = round(clip(PSI * multiplier * n_eff, 0, n_eff)) +# multiplier: critical_flag -> *1.4, string_flag -> *1.2 +# Posterior: Beta(alpha0 + k_eff, beta0 + n_eff - k_eff) +# +# MCMC refinement — EL image uncertainty is modelled via latent variable: +# eps ~ Beta(2.5, 3.5) [right-skewed: EL tends to understate early damage] +# Metropolis-Hastings with Beta proposal: eps' ~ Beta(eps*phi, (1-eps)*phi), phi=25 +# Combined: theta* = sigmoid(logit(theta_post) + logit(eps)) +# ESS via Kish (1965) + +def compute_bayesian_risk(psi_result: PSIResult) -> BayesianRiskResult: + psi = psi_result.psi + tier = psi_result.tier + + prior_map = { + "NEGLIGIBLE": (1.0, 9.0), "LOW": (1.5, 7.5), "MODERATE": (2.0, 6.0), + "HIGH": (4.0, 4.0), "CRITICAL": (7.0, 2.0), + } + alpha0, beta0 = prior_map.get(tier, (2.0, 6.0)) + + multiplier = 1.0 + if psi_result.critical_flag: multiplier *= 1.40 + if psi_result.string_risk_flag: multiplier *= 1.20 + + n_eff = 20 + k_eff = int(round(np.clip(psi * multiplier * n_eff, 0, n_eff))) + alpha_n = alpha0 + k_eff + beta_n = beta0 + (n_eff - k_eff) + theta_post = alpha_n / (alpha_n + beta_n) + + rng = np.random.default_rng(seed=42) + phi = 25.0 + eps_cur = float(rng.beta(2.5, 3.5)) + samples = [] + + def log_target(e: float) -> float: + return float(stats.beta.logpdf(e, 2.5, 3.5)) if 0 < e < 1 else -np.inf + + for step in range(MCMC_SAMPLES + MCMC_BURNIN): + a_p = max(1e-4, eps_cur * phi) + b_p = max(1e-4, (1.0 - eps_cur) * phi) + eps_pr = float(rng.beta(a_p, b_p)) + la = (log_target(eps_pr) - log_target(eps_cur) + + stats.beta.logpdf(eps_cur, eps_pr * phi, (1 - eps_pr) * phi) + - stats.beta.logpdf(eps_pr, eps_cur * phi, (1 - eps_cur) * phi)) + if np.log(rng.uniform()) < la: + eps_cur = eps_pr + if step >= MCMC_BURNIN: + samples.append(eps_cur) + + samples = np.asarray(samples, dtype=float) + theta_star = expit(_safe_logit(theta_post) + _safe_logit(samples)) + ess = _kish_ess(np.ones(len(theta_star)) / len(theta_star)) + + return BayesianRiskResult( + posterior_mean=round(float(np.mean(theta_star)), 6), + posterior_std=round(float(np.std(theta_star)), 6), + credible_interval_95=( + round(float(np.percentile(theta_star, 2.5)), 4), + round(float(np.percentile(theta_star, 97.5)), 4), + ), + prior_alpha=alpha0, prior_beta=beta0, + posterior_alpha=round(alpha_n, 2), posterior_beta=round(beta_n, 2), + mcmc_ess=round(ess, 1), + ) + + +# ── §8 LAYER 3 — WEIBULL RUL WITH EL-DERIVED ACCELERATION FACTOR ──────────── +# +# EL contrast replaces thermal delta-T as the degradation signal. +# EL-AF maps luminescence drop to equivalent accelerated ageing. +# +# EL Acceleration Factor: +# mean_el = mean(el_contrast_score_i * confidence_i) over all detections +# AF_el = 1 + mean_el * 8 [calibrated: full EL drop -> 9x age speed] +# AF_risk = 1 + posterior_mean * 1.5 [Bayesian risk inflation] +# AF_total = clip(AF_el * AF_risk, 1, 20) +# +# Weibull Proportional-Hazard (fleet: beta=2.2, eta=27 yr, NREL/PVPMC data): +# eta* = eta / AF_total [deflated characteristic life] +# t_v = 2 + (AF_total - 1) * 1.5 [Kijima Type-II virtual age, deploy=2yr] +# h(t) = (beta/eta*) * (t_v/eta*)^(beta-1) +# +# Conditional RUL at percentile p: +# base = (t_v / eta*)^beta +# delta = eta* * (base - log(S_target))^(1/beta) - t_v +# P10 -> S=0.9 (pessimistic), P50 -> S=0.5, P90 -> S=0.1 (optimistic) + +def compute_weibull_rul( + psi_result: PSIResult, + risk_result: BayesianRiskResult, + defects: List[DefectInstance], +) -> WeibullRULResult: + beta_w = PHYSICS["weibull_shape_beta"] + eta = PHYSICS["weibull_scale_eta_years"] + + mean_el = float(np.mean([d.el_contrast_score * d.confidence for d in defects])) if defects else 0.0 + AF_el = 1.0 + mean_el * 8.0 + AF_risk = 1.0 + risk_result.posterior_mean * 1.5 + AF_total = float(np.clip(AF_el * AF_risk, 1.0, 20.0)) + + eta_star = eta / AF_total + t_v = 2.0 + (AF_total - 1.0) * 1.5 + h_t = (beta_w / eta_star) * ((t_v / eta_star) ** (beta_w - 1.0)) + base_term = (t_v / eta_star) ** beta_w + + def rul_at(s_target: float) -> float: + return max(0.0, eta_star * ((base_term - math.log(max(s_target, 1e-9))) ** (1.0 / beta_w)) - t_v) + + return WeibullRULResult( + rul_years_p50=round(rul_at(0.50), 2), + rul_years_p10=round(rul_at(0.90), 2), + rul_years_p90=round(rul_at(0.10), 2), + hazard_rate=round(h_t, 6), + el_acceleration_factor=round(AF_total, 3), + virtual_age_years=round(t_v, 2), + ) + + +# ── §9 LAYER 4 — NPV-DISCOUNTED FINANCIAL & CARBON PROJECTION ─────────────── +# +# Efficiency loss (EL-calibrated sensitivity 0.75): +# eta_loss = clip(PSI * 0.75, 0, 0.99) +# +# Annual energy loss: +# dE = P_rated[Wp] * CF * 8760h / 1000 * eta_loss [kWh/yr] +# +# NPV-discounted revenue loss (WACC r over horizon T): +# NPV = dE * price * sum_{t=1}^{T} (1+r)^{-t} +# +# Carbon: +# dCO2_kg = dE * EF_kg_kwh [kg/yr] +# horizon_tonnes = dCO2_kg * T / 1000 +# carbon_cost_usd = horizon_tonnes * carbon_price + +def compute_financial_impact(psi_result: PSIResult) -> FinancialResult: + F = FINANCE + eta_loss = float(np.clip(psi_result.psi * 0.75, 0.0, 0.99)) + annual_kwh_full = F["panel_rated_power_wp"] * F["capacity_factor"] * 8760 / 1000.0 + annual_el_loss = annual_kwh_full * eta_loss + annual_rev_loss = annual_el_loss * F["electricity_price_usd_kwh"] + npv_f = _npv_factor(F["discount_rate"], F["analysis_horizon_years"]) + npv_loss = annual_rev_loss * npv_f + annual_co2_kg = annual_el_loss * F["grid_emission_factor_kg_kwh"] + horizon_co2_t = annual_co2_kg * F["analysis_horizon_years"] / 1000.0 + carbon_cost = horizon_co2_t * F["carbon_price_usd_per_tonne"] + + return FinancialResult( + annual_energy_loss_kwh=round(annual_el_loss, 3), + annual_revenue_loss_usd=round(annual_rev_loss, 3), + npv_loss_usd=round(npv_loss, 2), + annual_carbon_loss_kg=round(annual_co2_kg, 3), + horizon_carbon_loss_tonnes=round(horizon_co2_t, 4), + carbon_cost_usd=round(carbon_cost, 2), + ) + + +# ── §10 LAYER 5 — TRUE MILP MAINTENANCE OPTIMISATION (scipy HiGHS) ────────── +# +# Variables: x = [x_monitor, x_clean, x_repair, x_replace] in {0,1}^4 +# +# Objective — minimise total net cost: +# min sum_j c_j * x_j +# c_j = direct_cost_j + risk_penalty_j - NPV_benefit_j +# +# Recovery factors: monitor=0.00 clean=0.15 repair=0.60 replace=0.95 +# Risk penalty: monitor -> risk_mean * NPV * 0.50 +# clean -> risk_mean * NPV * 0.30 +# repair, replace -> 0 +# +# Hard constraints (linear inequalities): +# (C1) sum x_j = 1 [exactly one action selected] +# (C2) x_monitor <= 0 if critical_flag [no passive watch on critical] +# (C3) x_replace <= 0 if PSI < 0.15 [no replace on near-healthy] +# (C4) x_replace = 1 if RUL_p50 < 3 yr [forced replacement near EOL] +# C4 supersedes C3 — RUL guard is encoded first, C3 skipped if force_replace +# +# Solver: scipy.optimize.milp -> HiGHS (same backend as PuLP, OR-Tools, CVXPY) +# Fallback: lexicographic argmin on net cost vector if HiGHS returns infeasible + +def compute_milp_decision( + psi_result: PSIResult, + risk_result: BayesianRiskResult, + fin_result: FinancialResult, + rul_result: WeibullRULResult, +) -> MILPResult: + F = FINANCE + risk_mean = risk_result.posterior_mean + npv_loss = fin_result.npv_loss_usd + psi = psi_result.psi + rul_p50 = rul_result.rul_years_p50 + + ACTIONS = ["monitor", "clean", "repair", "replace"] + n = len(ACTIONS) + direct_costs = np.array([0.0, F["panel_cleaning_usd"], F["panel_repair_usd"], F["panel_replacement_usd"]]) + recovery_factors = np.array([0.00, 0.15, 0.60, 0.95]) + benefits = npv_loss * recovery_factors + risk_penalties = np.array([risk_mean * npv_loss * 0.50, risk_mean * npv_loss * 0.30, 0.0, 0.0]) + c = direct_costs + risk_penalties - benefits + + force_replace = rul_p50 < 3.0 + A_rows = [np.ones((1, n))] + lb_rows = [np.array([1.0])] + ub_rows = [np.array([1.0])] + + if psi_result.critical_flag: + r = np.zeros(n); r[0] = 1.0 + A_rows.append(r[np.newaxis, :]); lb_rows.append(np.array([-np.inf])); ub_rows.append(np.array([0.0])) + log.info("MILP: critical_flag -> monitor disabled") + + if psi < 0.15 and not force_replace: + r = np.zeros(n); r[3] = 1.0 + A_rows.append(r[np.newaxis, :]); lb_rows.append(np.array([-np.inf])); ub_rows.append(np.array([0.0])) + log.info("MILP: low PSI -> replace disabled") + + if force_replace: + r = np.zeros(n); r[3] = 1.0 + A_rows.append(r[np.newaxis, :]); lb_rows.append(np.array([1.0])); ub_rows.append(np.array([1.0])) + log.info("MILP: RUL P50 < 3 yr -> replace forced") + + result = milp( + c=c, + constraints=LinearConstraint(np.vstack(A_rows), np.concatenate(lb_rows), np.concatenate(ub_rows)), + integrality=np.ones(n), + bounds=Bounds(lb=np.zeros(n), ub=np.ones(n)), + ) + + if result.success and result.x is not None: + x_opt = np.round(result.x).astype(int) + chosen = ACTIONS[int(np.argmax(x_opt))] + opt_cost = float(result.fun) + status = "OPTIMAL" + else: + log.warning("MILP did not converge (%s) -> fallback", result.message) + idx = int(np.argmin(c)) + x_opt = np.zeros(n, dtype=int); x_opt[idx] = 1 + chosen = ACTIONS[idx] + opt_cost = float(c[idx]) + status = f"FALLBACK:{result.message}" + + return MILPResult( + recommended_action=chosen.upper(), + action_binary_vector={a: int(x_opt[i]) for i, a in enumerate(ACTIONS)}, + optimal_cost=round(opt_cost, 2), + solver_status=status, + action_costs={a: round(float(direct_costs[i]), 2) for i, a in enumerate(ACTIONS)}, + action_benefits={a: round(float(benefits[i]), 2) for i, a in enumerate(ACTIONS)}, + net_values={a: round(float(c[i]), 2) for i, a in enumerate(ACTIONS)}, + ) + + +# ── §11 REPORT ASSEMBLY ────────────────────────────────────────────────────── + +def _build_report( + image_path: str, + det_result: dict, + defects: List[DefectInstance], + psi_result: PSIResult, + risk_result: BayesianRiskResult, + rul_result: WeibullRULResult, + fin_result: FinancialResult, + milp_result: MILPResult, + elapsed_ms: float, +) -> Dict[str, Any]: + urgency_code, urgency_note = URGENCY_MAP.get(psi_result.tier, ("UNKNOWN", "")) + + per_defect = [] + for d in defects: + cat = DEFECT_CATALOG.get(d.class_name, {}) + per_defect.append({ + "class": d.class_name, + "description": cat.get("description", ""), + "el_signature": cat.get("el_signature", ""), + "confidence": d.confidence, + "bbox_xyxy": d.bbox, + "area_ratio": d.area_ratio, + "el_contrast_score": round(d.el_contrast_score, 4), + "iec_severity": cat.get("iec_severity", "?"), + "instance_psi": round(d.instance_severity, 5), + "string_impact": cat.get("string_impact", False), + }) + + return { + "report_metadata": { + "report_id": str(uuid.uuid4()), + "engine_version": "4.1.0-el-highs-milp", + "image_type": "Electroluminescence (EL)", + "standards": ["IEC 62446-3", "IEC 61215", "IEA PVPS T13"], + "timestamp_utc": datetime.now(timezone.utc).isoformat(), + "image_path": image_path, + "processing_ms": round(elapsed_ms, 1), + }, + "detection_summary": { + "total_detections": det_result["count"], + "defect_class_counts": psi_result.defect_summary, + "image_shape_hw": list(det_result["image_shape"]), + }, + "per_defect_analysis": per_defect, + "panel_severity": { + "psi": psi_result.psi, + "tier": psi_result.tier, + "iec_severity_class": psi_result.iec_class, + "dominant_defect": psi_result.dominant_defect, + "critical_defect_present": psi_result.critical_flag, + "string_cascade_risk": psi_result.string_risk_flag, + "norm_method": "EL Chebyshev-L2 hybrid (0.6/0.4)", + }, + "bayesian_risk": { + "posterior_mean_pct": round(risk_result.posterior_mean * 100, 2), + "posterior_std_pct": round(risk_result.posterior_std * 100, 2), + "credible_interval_95_pct": [ + round(risk_result.credible_interval_95[0] * 100, 2), + round(risk_result.credible_interval_95[1] * 100, 2), + ], + "prior": f"Beta({risk_result.prior_alpha}, {risk_result.prior_beta})", + "posterior": f"Beta({risk_result.posterior_alpha}, {risk_result.posterior_beta})", + "mcmc_method": "Metropolis-Hastings | EL uncertainty eps~Beta(2.5,3.5)", + "mcmc_samples": MCMC_SAMPLES, + "mcmc_ess": risk_result.mcmc_ess, + }, + "rul_estimate": { + "rul_years_p50_median": rul_result.rul_years_p50, + "rul_years_p10_pessimistic": rul_result.rul_years_p10, + "rul_years_p90_optimistic": rul_result.rul_years_p90, + "hazard_rate_per_year": rul_result.hazard_rate, + "el_acceleration_factor": rul_result.el_acceleration_factor, + "virtual_age_years": rul_result.virtual_age_years, + "model": f"Weibull PH (beta={PHYSICS['weibull_shape_beta']}, eta={PHYSICS['weibull_scale_eta_years']} yr) + EL-AF", + }, + "financial_impact": { + "annual_energy_loss_kwh": fin_result.annual_energy_loss_kwh, + "annual_revenue_loss_usd": fin_result.annual_revenue_loss_usd, + "npv_revenue_loss_usd": fin_result.npv_loss_usd, + "annual_carbon_loss_kgCO2e": fin_result.annual_carbon_loss_kg, + "horizon_carbon_loss_tonnesCO2e": fin_result.horizon_carbon_loss_tonnes, + "carbon_cost_usd": fin_result.carbon_cost_usd, + "discount_rate_pct": FINANCE["discount_rate"] * 100, + "analysis_horizon_years": FINANCE["analysis_horizon_years"], + }, + "maintenance_decision": { + "recommended_action": milp_result.recommended_action, + "solver": "scipy HiGHS MILP (binary integer variables)", + "solver_status": milp_result.solver_status, + "optimal_net_cost_usd": milp_result.optimal_cost, + "action_binary_vector": milp_result.action_binary_vector, + "action_direct_costs_usd": milp_result.action_costs, + "action_npv_benefits_usd": milp_result.action_benefits, + "action_net_costs_usd": milp_result.net_values, + }, + "operational_guidance": { + "urgency_code": urgency_code, + "urgency_note": urgency_note, + "next_inspection_interval": ( + "<=3 months" if psi_result.tier in ("HIGH", "CRITICAL") else + "<=6 months" if psi_result.tier == "MODERATE" else + "<=12 months" + ), + "string_isolation_required": psi_result.string_risk_flag and psi_result.critical_flag, + "confidence_statement": ( + f"EL analysis: {round(risk_result.posterior_mean * 100, 1)}% " + f"[95% CI {round(risk_result.credible_interval_95[0]*100,1)}%-" + f"{round(risk_result.credible_interval_95[1]*100,1)}%] " + f"probability of accelerated degradation. " + f"Median RUL: {rul_result.rul_years_p50} yr. " + f"Action: {milp_result.recommended_action}." + ), + }, + } + + +# ── §12 PUBLIC ENTRY POINT ─────────────────────────────────────────────────── + +def analyze_panel(image_path: str, annotated_out_path: str | None = None) -> Dict[str, Any]: + t0 = time.perf_counter() + log.info("Starting EL analysis: %s", image_path) + + det_result = run_detection(image_path) + raw_dets = det_result.get("detections", []) + height, width = det_result["image_shape"] + panel_area = height * width + + raw_dets = compute_area_ratios(raw_dets, width, height) + + defects: List[DefectInstance] = [] + for d in raw_dets: + cls = str(d.get("class_name", "crack")).lower().strip() + if cls not in DEFECT_CATALOG: + log.warning("Unknown class '%s' -> remapped to 'crack'", cls) + cls = "crack" + x1, y1, x2, y2 = d["bbox"] + defects.append(DefectInstance( + class_name=cls, + confidence=float(np.clip(d.get("confidence", 0.8), 0.0, 1.0)), + bbox=[x1, y1, x2, y2], + area_ratio=float(d.get("area_ratio", max(1, (x2-x1)*(y2-y1)) / panel_area)), + )) + + log.info("Layer 0: EL contrast scoring") + defects = compute_el_contrast_scores(image_path, defects) + + log.info("Layer 1: PSI") + psi_result = compute_psi(defects) + log.info(" PSI=%.4f Tier=%s", psi_result.psi, psi_result.tier) + + log.info("Layer 2: Bayesian MCMC risk (%d samples)", MCMC_SAMPLES) + risk_result = compute_bayesian_risk(psi_result) + log.info(" Risk=%.2f%% ESS=%.0f", risk_result.posterior_mean * 100, risk_result.mcmc_ess) + + log.info("Layer 3: Weibull RUL + EL-AF") + rul_result = compute_weibull_rul(psi_result, risk_result, defects) + log.info(" RUL_P50=%.1f yr AF=%.2f", rul_result.rul_years_p50, rul_result.el_acceleration_factor) + + log.info("Layer 4: NPV financial + carbon") + fin_result = compute_financial_impact(psi_result) + log.info(" NPV_loss=USD %.2f CO2=%.3f t", fin_result.npv_loss_usd, fin_result.horizon_carbon_loss_tonnes) + + log.info("Layer 5: HiGHS MILP optimisation") + milp_result = compute_milp_decision(psi_result, risk_result, fin_result, rul_result) + log.info(" Action=%s Status=%s", milp_result.recommended_action, milp_result.solver_status) + + if annotated_out_path: + draw_boxes(image_path, raw_dets, annotated_out_path) + log.info(" Annotated EL image saved: %s", annotated_out_path) + + elapsed_ms = (time.perf_counter() - t0) * 1000 + report = _build_report( + image_path, det_result, defects, + psi_result, risk_result, rul_result, fin_result, milp_result, elapsed_ms, + ) + log.info("Complete in %.1f ms -> %s", elapsed_ms, milp_result.recommended_action) + return report + + +# ── §13 CLI SELF-TEST ──────────────────────────────────────────────────────── + +if __name__ == "__main__": + import sys + img_path = sys.argv[1] if len(sys.argv) > 1 else "test_el_panel.jpg" + out_path = sys.argv[2] if len(sys.argv) > 2 else None + print(json.dumps(analyze_panel(img_path, annotated_out_path=out_path), indent=2, default=str)) \ No newline at end of file diff --git a/modules/llm_summary/pdf_report.py b/modules/llm_summary/pdf_report.py new file mode 100644 index 0000000..840ef89 --- /dev/null +++ b/modules/llm_summary/pdf_report.py @@ -0,0 +1,230 @@ +"""modules/llm_summary/pdf_report.py +Generates a styled PDF from the summary text using reportlab. +""" +from __future__ import annotations + +import io +import re +import textwrap +from datetime import datetime + +from reportlab.lib import colors +from reportlab.lib.pagesizes import A4 +from reportlab.lib.styles import ParagraphStyle, getSampleStyleSheet +from reportlab.lib.units import mm +from reportlab.platypus import ( + SimpleDocTemplate, + Paragraph, + Spacer, + HRFlowable, + Table, + TableStyle, +) +from reportlab.lib.enums import TA_LEFT, TA_CENTER + + +# ── Colour palette ──────────────────────────────────────────────────────── +AMBER = colors.HexColor("#f59e0b") +AMBER_DARK = colors.HexColor("#d97706") +DARK_BG = colors.HexColor("#0f172a") +SLATE = colors.HexColor("#334155") +TEXT_CLR = colors.HexColor("#1e293b") +MUTED = colors.HexColor("#64748b") + + +def _build_styles(): + """Return a dict of named ParagraphStyles for the PDF.""" + base = getSampleStyleSheet() + + return { + "title": ParagraphStyle( + "PDFTitle", + parent=base["Heading1"], + fontSize=18, + leading=22, + textColor=DARK_BG, + spaceAfter=4, + fontName="Helvetica-Bold", + ), + "subtitle": ParagraphStyle( + "PDFSubtitle", + parent=base["Normal"], + fontSize=9, + leading=12, + textColor=MUTED, + spaceAfter=14, + ), + "section_heading": ParagraphStyle( + "PDFSectionH", + parent=base["Heading2"], + fontSize=12, + leading=15, + textColor=AMBER_DARK, + spaceBefore=16, + spaceAfter=6, + fontName="Helvetica-Bold", + borderPadding=(0, 0, 0, 4), + ), + "sub_heading": ParagraphStyle( + "PDFSubH", + parent=base["Heading3"], + fontSize=10, + leading=13, + textColor=TEXT_CLR, + spaceBefore=10, + spaceAfter=4, + fontName="Helvetica-Bold", + ), + "body": ParagraphStyle( + "PDFBody", + parent=base["Normal"], + fontSize=9, + leading=14, + textColor=TEXT_CLR, + spaceAfter=5, + ), + "bullet": ParagraphStyle( + "PDFBullet", + parent=base["Normal"], + fontSize=9, + leading=13, + textColor=TEXT_CLR, + leftIndent=14, + spaceAfter=3, + bulletIndent=4, + ), + } + + +def _escape_xml(text: str) -> str: + """Escape XML entities for reportlab Paragraph.""" + text = text.replace("&", "&") + text = text.replace("<", "<") + text = text.replace(">", ">") + return text + + +def _inline_format(text: str) -> str: + """Convert **bold** and *italic* to reportlab XML tags.""" + text = _escape_xml(text) + text = re.sub(r"\*\*(.+?)\*\*", r"\1", text) + text = re.sub(r"__(.+?)__", r"\1", text) + text = re.sub(r"(?\1", text) + text = re.sub(r"(?\1", text) + return text + + +def generate_pdf(summary_text: str, filename: str = "SolarPV_Report") -> io.BytesIO: + """ + Generate a styled A4 PDF from the summary text. + Returns a BytesIO buffer containing the PDF data. + """ + buf = io.BytesIO() + styles = _build_styles() + + doc = SimpleDocTemplate( + buf, + pagesize=A4, + leftMargin=20 * mm, + rightMargin=20 * mm, + topMargin=20 * mm, + bottomMargin=18 * mm, + title=filename, + author="SolaritivityPlus", + ) + + story = [] + + # ── Header ──────────────────────────────────────────────────────────── + story.append(Paragraph("☀ Solar Panel Inspection Report", styles["title"])) + story.append( + Paragraph( + f"Generated on {datetime.now().strftime('%d %B %Y at %H:%M')} • SolaritivityPlus AI Engine", + styles["subtitle"], + ) + ) + story.append( + HRFlowable( + width="100%", thickness=1.5, color=AMBER, spaceAfter=12, spaceBefore=2 + ) + ) + + # ── Parse the summary text into sections ────────────────────────────── + text = summary_text.replace("\r\n", "\n").replace("\r", "\n") + # Remove separator lines + text = re.sub(r"^={3,}.*$", "", text, flags=re.MULTILINE) + text = re.sub(r"^-{3,}\s*$", "", text, flags=re.MULTILINE) + + # Split by SECTION N: + section_pattern = re.compile( + r"(?:^|\n)\s*SECTION\s+\d+\s*:\s*(.*)", re.IGNORECASE + ) + matches = list(section_pattern.finditer(text)) + + if not matches: + # No sections found — dump as body text + for line in text.strip().split("\n"): + line = line.strip() + if not line: + continue + story.append(Paragraph(_inline_format(line), styles["body"])) + else: + for i, m in enumerate(matches): + title = m.group(1).strip().rstrip("=-").strip() + start = text.index("\n", m.start() + 1) if "\n" in text[m.start() + 1 :] else m.end() + end = matches[i + 1].start() if i + 1 < len(matches) else len(text) + body = text[start:end].strip() + + # Section heading + story.append(Paragraph(_inline_format(title.upper()), styles["section_heading"])) + + # Process body lines + for line in body.split("\n"): + stripped = line.strip() + if not stripped or re.match(r"^[=\-]{3,}$", stripped): + continue + if re.match( + r"^(SOLAR PANEL INSPECTION REPORT|Image Reference:|END OF REPORT|Rules:|\[DEFECT BLOCK)", + stripped, + re.IGNORECASE, + ): + continue + + # Sub-headings + if re.match( + r"^(?:\d+[a-z]?[.)\s]|Defect Name|What This Defect|Detection Evidence|Root Causes|Consequences|Defect Priority|Immediate Actions|Short-Term|Preventive|Carbon Emission|Risk Level|Risk Justification|CO2|Power and Efficiency|Operational)", + stripped, + re.IGNORECASE, + ): + story.append(Paragraph(_inline_format(stripped), styles["sub_heading"])) + continue + + # Bullet items + if re.match(r"^\s*[-•]\s+", line): + content = re.sub(r"^[-•]\s+", "", stripped) + story.append( + Paragraph(f"▸ {_inline_format(content)}", styles["bullet"]) + ) + continue + + # Regular paragraph + story.append(Paragraph(_inline_format(stripped), styles["body"])) + + # ── Footer separator ────────────────────────────────────────────────── + story.append(Spacer(1, 16)) + story.append( + HRFlowable( + width="100%", thickness=1, color=SLATE, spaceAfter=6, spaceBefore=4 + ) + ) + story.append( + Paragraph( + "This report was generated by SolaritivityPlus using Gemini AI. " + "Data sourced from detections.csv and carbon.csv.", + styles["subtitle"], + ) + ) + + doc.build(story) + buf.seek(0) + return buf diff --git a/modules/llm_summary/prompt_templates.py b/modules/llm_summary/prompt_templates.py index 8382d51..e8d4823 100644 --- a/modules/llm_summary/prompt_templates.py +++ b/modules/llm_summary/prompt_templates.py @@ -1,18 +1,146 @@ """modules/llm_summary/prompt_templates.py""" -SUMMARY_PROMPT = """You are an expert solar PV panel analyst. Based on the detection and carbon emission data below, generate a concise professional report. +SYSTEM_INSTRUCTION = """ +You are a senior solar PV reliability engineer and decarbonization analyst with 15+ years of field experience. +Your task is to generate a detailed, professional, and easy-to-understand inspection report for a solar panel technician or plant operator. -=== DETECTION DATA === -{detection_data} +Use ONLY the provided CSV context for the target image. Do not invent values. +If a value is missing from the CSV, state "Not available in CSV" — never guess or fabricate numbers. -=== CARBON EMISSION DATA === -{carbon_data} +Writing style rules: +- Write in clear, plain English that a non-engineer plant operator can understand. +- Use full sentences and paragraphs for explanations — not just one-liners. +- For lists, use '-' as the bullet symbol only. +- No markdown formatting, no code fences, no asterisks, no bold/italic symbols. +- Numbers from CSV must be quoted exactly as they appear. +- Each section must be clearly headed and substantive — minimum 3-5 sentences per section. +""" -Write a structured report with these sections: -1. Executive Summary (2-3 sentences) -2. Defect Analysis (what defects found, severity, frequency) -3. Carbon Impact Assessment (CO₂ implications, degradation impact) -4. Recommendations (maintenance actions, priority order) -5. Risk Level: [LOW / MEDIUM / HIGH / CRITICAL] +USER_PROMPT_TEMPLATE = """ +Generate a comprehensive solar panel inspection report for image: {target_image} -Be concise, factual, and actionable. Use plain text, no markdown.""" \ No newline at end of file +The data below comes from two CSV sources: +1) Defect Detection CSV — contains defect class names, confidence scores, severity, bounding box areas, and detection metadata. +2) Carbon Emission CSV — contains CO2 emission estimates, power degradation percentages, panel capacity, and operational parameters. + +=== DETECTION ROWS (JSON) === +{detection_rows_json} + +=== CARBON ROWS (JSON) === +{carbon_rows_json} + +=== DETECTION NUMERIC SUMMARY (JSON) === +{detection_numeric_json} + +=== CARBON NUMERIC SUMMARY (JSON) === +{carbon_numeric_json} + +--- + +Write the full report using EXACTLY the section headings and structure below. +Each section must be detailed, informative, and written for a plant operator audience. +Do not skip any section. Do not shorten sections to one line. + +=========================================================== +SOLAR PANEL INSPECTION REPORT +Image Reference: {target_image} +=========================================================== + +SECTION 1: EXECUTIVE SUMMARY +Write 4 to 6 sentences covering: +- Overall health status of the panel based on the detected defects. +- The most critical issue found and why it demands attention. +- The carbon and power impact in plain numbers from the CSV. +- The urgency level and consequence of delayed action. + +SECTION 2: DEFECT ANALYSIS +For EACH unique defect type found in the detection CSV, write a full block with all of the following fields. +If only one defect type is present, write one block. If multiple, write one block per defect. + +[DEFECT BLOCK START] +Defect Name: + State the exact defect class name from the CSV (e.g., black_core, crack, finger, star_crack, thick_line). + +What This Defect Is: + Explain in 2-3 sentences what this defect physically looks like on a solar panel and what it represents technically. + +Detection Evidence from CSV: + - Number of detections: (from CSV) + - Confidence score: (from CSV, e.g., 0.91 means 91% certainty) + - Severity level: (from CSV if present, else state Not available in CSV) + - Defect area or bounding box coverage: (from CSV if present, else state Not available in CSV) + Interpret what these numbers mean — e.g., high confidence means the model is very certain this defect exists. + +Root Causes: + List at least 3 probable engineering causes for this specific defect type: + - Cause 1 (explain in 1-2 sentences why this causes the defect) + - Cause 2 (explain in 1-2 sentences) + - Cause 3 (explain in 1-2 sentences) + - Additional causes if applicable + +Consequences If Left Untreated: + Write 3-5 sentences describing what will happen to the panel over the next weeks, months, and years if this defect is not addressed. Include effects on power output, panel lifespan, safety risks, and cascade failures to adjacent cells or panels. + +Defect Priority: + Assign one of: CRITICAL / HIGH / MEDIUM / LOW + Justify the priority in 2-3 sentences using specific values from the CSV such as confidence, severity, and area. +[DEFECT BLOCK END] + +SECTION 3: SEVERITY, AREA, AND CONFIDENCE INTERPRETATION +Write 4-6 sentences that: +- Explain the overall severity pattern across all detected defects using the numeric summary values. +- Describe what the confidence score range means in practical terms (e.g., is the model very certain or uncertain?). +- Describe what the area or bounding box coverage values mean for how much of the panel is affected. +- If severity, area, or confidence columns are absent from the CSV, clearly state which ones are missing and what that means for interpretation. +- Conclude with whether the numerical evidence supports a conservative or aggressive maintenance response. + +SECTION 4: CARBON EMISSION AND POWER DEGRADATION IMPACT +Write this section in three clearly labeled sub-sections: + +4a. CO2 Emission Impact: +Write 3-4 sentences using the exact CO2 values from the carbon CSV. Explain what the annual CO2 figure means in practical environmental terms — for example, compare it to equivalent car trips or trees needed to offset it if possible from data. State whether the emission level is within acceptable range or alarming. + +4b. Power and Efficiency Degradation: +Write 3-4 sentences using the power degradation percentage and panel capacity from the carbon CSV. Explain what percentage degradation means for daily energy generation loss in practical kWh terms if calculable from the data. Describe how the detected defect type directly causes this degradation mechanism. + +4c. Operational and Financial Risk: +Write 3-4 sentences about what happens to operations if no action is taken. Include the cumulative effect on yearly energy yield, increased carbon liability, and potential cost implications of delayed maintenance versus early intervention. + +SECTION 5: RECOMMENDED REMEDIATION ACTIONS +Write detailed, actionable steps organized into three time horizons. Each action must be specific — not generic advice. + +Immediate Actions (Within 0 to 7 Days): +- Action 1: (specific step with reason) +- Action 2: (specific step with reason) +- Action 3: (specific step with reason) + +Short-Term Actions (Within 1 to 4 Weeks): +- Action 1: (specific step with reason) +- Action 2: (specific step with reason) +- Action 3: (specific step with reason) + +Preventive and Ongoing Actions (Monthly / Quarterly / Annually): +- Action 1: (specific step with reason) +- Action 2: (specific step with reason) +- Action 3: (specific step with reason) + +Carbon Emission Reduction Measures: +Write 3-5 sentences specifically addressing how repairing or replacing the defective panel will reduce CO2 output, restore efficiency, and contribute to the plant's decarbonization targets. Mention any monitoring or reporting practices that should be adopted. + +SECTION 6: FINAL RISK ASSESSMENT +Risk Level: (Choose exactly one: LOW / MEDIUM / HIGH / CRITICAL) + +Risk Justification: +Write 4-5 sentences that synthesize evidence from all sections — defect severity, confidence, area affected, power degradation percentage, and CO2 impact — to justify the assigned risk level. Be specific with numbers. Conclude with a clear statement on whether the panel should remain in operation, be throttled, or be taken offline immediately. + +=========================================================== +END OF REPORT +=========================================================== + +Rules: +- Use ONLY values from the provided CSV data. Do not invent numbers. +- Every section must be written in full sentences. No one-word answers. +- Plain text only. No markdown, no asterisks, no bold symbols. +- Use '-' for all bullet points only inside lists. +- Minimum total report length: 600 words. +""" \ No newline at end of file diff --git a/modules/llm_summary/summary_engine.py b/modules/llm_summary/summary_engine.py index ad9023e..a47dbb5 100644 --- a/modules/llm_summary/summary_engine.py +++ b/modules/llm_summary/summary_engine.py @@ -1,91 +1,225 @@ """modules/llm_summary/summary_engine.py -Uses local GGUF model (orca-mini-3b) via ctransformers for report generation. +Gemini-based summary generation using Google AI Studio API key. +Uses per-image rows from detections.csv and carbon.csv. """ -import re +from __future__ import annotations + +import json +import os +from typing import Dict, List, Tuple + import pandas as pd -from ctransformers import AutoModelForCausalLM -from config import (LOCAL_LLM_PATH, LOCAL_LLM_TYPE, - LOCAL_LLM_MAX_TOKENS, LOCAL_LLM_TEMP, - DETECTIONS_CSV, CARBON_CSV) -from .prompt_templates import SUMMARY_PROMPT - -_llm = None - - -def _get_llm(): - global _llm - if _llm is None: - print(f"[LLM] Loading local model from: {LOCAL_LLM_PATH}") - _llm = AutoModelForCausalLM.from_pretrained( - LOCAL_LLM_PATH, - model_type=LOCAL_LLM_TYPE, - max_new_tokens=LOCAL_LLM_MAX_TOKENS, - temperature=LOCAL_LLM_TEMP, - local_files_only=True, +import requests + +from config import DETECTIONS_CSV, CARBON_CSV +from .prompt_templates import SYSTEM_INSTRUCTION, USER_PROMPT_TEMPLATE + +# REPLACE with a function so key is always read fresh: +def _get_api_key() -> str: + key = os.environ.get("GEMINI_API_KEY") or os.environ.get("GOOGLE_API_KEY") + if not key: + raise ValueError( + "Missing GEMINI_API_KEY (or GOOGLE_API_KEY) in environment. " + "Add it to your .env file without spaces: GEMINI_API_KEY=AIza..." ) - print("[LLM] Model loaded.") - return _llm + return key +GEMINI_MODEL = os.environ.get("GEMINI_MODEL", "").strip() +GEMINI_TIMEOUT_SEC = int(os.environ.get("GEMINI_TIMEOUT_SEC", "90")) -def _preprocess(text: str) -> str: - text = re.sub(r"[^a-zA-Z0-9.,!?%:/()\-\s]", "", text) - text = re.sub(r"\s+", " ", text).strip() - return text +_PREFERRED_MODELS = [ + "gemini-2.5-flash", + "gemini-2.5-flash-lite", + "gemini-2.0-flash", + "gemini-2.0-flash-lite", + "gemini-1.5-flash", + "gemini-1.5-flash-latest", + "gemini-1.5-pro", + "gemini-1.5-pro-latest", +] -def _read_csv(path: str, max_rows: int = 25) -> str: +def _read_csv_safe(path: str) -> pd.DataFrame: try: - df = pd.read_csv(path) - return df.tail(max_rows).to_string(index=False) + if os.path.exists(path): + return pd.read_csv(path) except Exception: - return "No data available." + pass + return pd.DataFrame() + + +def _sanitize_records(df: pd.DataFrame, max_rows: int = 30) -> List[Dict]: + if df.empty: + return [] + tail_df = df.tail(max_rows).copy() + tail_df = tail_df.where(pd.notna(tail_df), None) + return tail_df.to_dict(orient="records") + + +def _numeric_summary(df: pd.DataFrame) -> Dict: + if df.empty: + return {} + numeric_cols = df.select_dtypes(include="number").columns.tolist() + if not numeric_cols: + return {} + out = {} + for col in numeric_cols: + s = pd.to_numeric(df[col], errors="coerce").dropna() + if s.empty: + continue + out[col] = { + "count": int(s.shape[0]), + "min": float(s.min()), + "max": float(s.max()), + "mean": float(s.mean()), + "median": float(s.median()), + } + return out + + +def _normalize_filename(v: str) -> str: + return os.path.basename(str(v)).strip().lower() + + +def _filter_by_image(df: pd.DataFrame, filename: str | None) -> Tuple[pd.DataFrame, str]: + if df.empty: + return df, filename or "unknown" + + if "image_filename" not in df.columns: + if filename: + return df, filename + return df.tail(1), "latest_record" + + work = df.copy() + work["_norm_image"] = work["image_filename"].astype(str).map(_normalize_filename) + + if filename and filename.strip(): + target = _normalize_filename(filename) + exact = work[work["_norm_image"] == target] + if not exact.empty: + return exact.drop(columns=["_norm_image"]), target + + partial = work[work["_norm_image"].str.contains(target, na=False)] + if not partial.empty: + return partial.drop(columns=["_norm_image"]), target + + return work.iloc[0:0].drop(columns=["_norm_image"]), target + + latest = work.tail(1) + resolved = str(latest["image_filename"].iloc[0]) + return latest.drop(columns=["_norm_image"]), resolved + + +def list_available_models() -> List[str]: + api_key = _get_api_key() + url = f"https://generativelanguage.googleapis.com/v1beta/models?key={api_key}" + resp = requests.get(url, timeout=GEMINI_TIMEOUT_SEC) + + if resp.status_code == 403: + error_body = resp.json().get("error", {}) + raise ValueError( + f"API key rejected (403): {error_body.get('message', resp.text)}\n" + f"Your key may be leaked or invalid. Generate a new one at https://aistudio.google.com" + ) + if resp.status_code != 200: + raise ValueError(f"Failed to list models: {resp.status_code} {resp.text}") + data = resp.json() + names = [] + for m in data.get("models", []): + if "generateContent" in m.get("supportedGenerationMethods", []): + names.append(m.get("name", "").replace("models/", "")) + return names -def _build_prompt(det_text: str, carbon_text: str) -> str: - system = ( - "You are a solar PV inspection analyst. " - "Read the defect detection and carbon emission data below, " - "then write a concise structured report." - ) - data_block = ( - f"=== DETECTION DATA ===\n{_preprocess(det_text)}\n\n" - f"=== CARBON EMISSION DATA ===\n{_preprocess(carbon_text)}" - ) - instruction = SUMMARY_PROMPT.format( - detection_data=_preprocess(det_text), - carbon_data=_preprocess(carbon_text), - ) - prompt = ( - f"### System:\n{system}\n\n" - f"### User:\n{instruction}\n\n" - f"### Response:\n" + +def _resolve_model() -> str: + available = list_available_models() + + if not available: + raise ValueError( + "No generateContent-capable Gemini models found for this API key. " + "Check your key at https://aistudio.google.com" + ) + + if GEMINI_MODEL: + if GEMINI_MODEL in available: + return GEMINI_MODEL + raise ValueError( + f"Configured GEMINI_MODEL '{GEMINI_MODEL}' is not available for your key.\n" + f"Available models for your key: {available}" + ) + + for m in _PREFERRED_MODELS: + if m in available: + return m + + # Use first available if none of preferred match + return available[0] + + +def _call_gemini(system_instruction: str, user_prompt: str) -> str: + api_key = _get_api_key() + model_name = _resolve_model() + url = ( + f"https://generativelanguage.googleapis.com/v1beta/models/" + f"{model_name}:generateContent?key={api_key}" ) - return prompt + payload = { + "system_instruction": {"parts": [{"text": system_instruction}]}, + "contents": [{"role": "user", "parts": [{"text": user_prompt}]}], + "generationConfig": { + "temperature": 0.2, + "maxOutputTokens": 4096, + }, + } + + resp = requests.post(url, json=payload, timeout=GEMINI_TIMEOUT_SEC) + + if resp.status_code == 403: + error_body = resp.json().get("error", {}) + raise ValueError( + f"Gemini API key rejected (403): {error_body.get('message', resp.text)}\n" + f"Generate a new key at https://aistudio.google.com" + ) + if resp.status_code != 200: + raise ValueError(f"Gemini API error {resp.status_code}: {resp.text}") + + data = resp.json() + candidates = data.get("candidates", []) + if not candidates: + raise ValueError("Gemini returned no candidates.") + + parts = candidates[0].get("content", {}).get("parts", []) + text = "".join(p.get("text", "") for p in parts).strip() + if not text: + raise ValueError("Gemini returned empty text.") + return text -def generate_summary(filename_filter: str = None) -> str: - det_text = _read_csv(DETECTIONS_CSV) - carbon_text = _read_csv(CARBON_CSV) +def generate_summary(filename_filter: str | None = None) -> str: + det_df = _read_csv_safe(DETECTIONS_CSV) + car_df = _read_csv_safe(CARBON_CSV) - if filename_filter: - try: - d = pd.read_csv(DETECTIONS_CSV) - d = d[d["image_filename"].str.contains(filename_filter, na=False)] - if not d.empty: - det_text = d.to_string(index=False) + if det_df.empty and car_df.empty: + raise ValueError("No data found in detections.csv or carbon.csv.") - c = pd.read_csv(CARBON_CSV) - c = c[c["image_filename"].str.contains(filename_filter, na=False)] - if not c.empty: - carbon_text = c.to_string(index=False) - except Exception: - pass + det_filtered, resolved_image = _filter_by_image(det_df, filename_filter) + car_filtered, _ = _filter_by_image( + car_df, resolved_image if not filename_filter else filename_filter + ) - prompt = _build_prompt(det_text, carbon_text) - llm = _get_llm() + if filename_filter and det_filtered.empty and car_filtered.empty: + raise ValueError(f"No records found for image filename filter: {filename_filter}") - response = "" - for token in llm(prompt, stream=True): - response += token + detection_rows = _sanitize_records(det_filtered) + carbon_rows = _sanitize_records(car_filtered) + + prompt = USER_PROMPT_TEMPLATE.format( + target_image=resolved_image, + detection_rows_json=json.dumps(detection_rows, indent=2, ensure_ascii=True), + carbon_rows_json=json.dumps(carbon_rows, indent=2, ensure_ascii=True), + detection_numeric_json=json.dumps(_numeric_summary(det_filtered), indent=2, ensure_ascii=True), + carbon_numeric_json=json.dumps(_numeric_summary(car_filtered), indent=2, ensure_ascii=True), + ) - return response.strip() \ No newline at end of file + return _call_gemini(SYSTEM_INSTRUCTION, prompt) \ No newline at end of file diff --git a/modules/llm_summary/summary_routes.py b/modules/llm_summary/summary_routes.py index b026f45..6258b6a 100644 --- a/modules/llm_summary/summary_routes.py +++ b/modules/llm_summary/summary_routes.py @@ -1,19 +1,79 @@ """modules/llm_summary/summary_routes.py""" -from flask import Blueprint, request, jsonify +from flask import Blueprint, request, jsonify, send_file from auth import require_auth -from .summary_engine import generate_summary +from .summary_engine import generate_summary, list_available_models +from .thermal_summary_engine import generate_thermal_summary +from .pdf_report import generate_pdf llm_bp = Blueprint("llm_summary", __name__, url_prefix="/api/summary") + @llm_bp.route("/generate", methods=["POST"]) @require_auth def generate(): body = request.get_json(silent=True) or {} - filename_filter = body.get("filename") + filename_filter = body.get("filename") or body.get("image_filename") try: text = generate_summary(filename_filter) return jsonify({"success": True, "summary": text}), 200 except ValueError as e: return jsonify({"success": False, "error": str(e)}), 503 + except Exception as e: + return jsonify({"success": False, "error": str(e)}), 500 + + +@llm_bp.route("/thermal/generate", methods=["POST"]) +@require_auth +def generate_thermal(): + """Generate a summary for the thermal flow.""" + body = request.get_json(silent=True) or {} + filename = body.get("image_filename", "thermal_image") + detections = body.get("detections", []) + lime_features = body.get("lime_features", []) + + try: + text = generate_thermal_summary(filename, detections, lime_features) + return jsonify({"success": True, "summary": text}), 200 + except ValueError as e: + return jsonify({"success": False, "error": str(e)}), 503 + except Exception as e: + return jsonify({"success": False, "error": str(e)}), 500 + + +@llm_bp.route("/models", methods=["GET"]) +@require_auth +def available_models(): + """Diagnostic endpoint: returns all generateContent models your API key can access.""" + try: + models = list_available_models() + return jsonify({"success": True, "models": models}), 200 + except ValueError as e: + return jsonify({"success": False, "error": str(e)}), 503 + except Exception as e: + return jsonify({"success": False, "error": str(e)}), 500 + + +@llm_bp.route("/pdf", methods=["POST"]) +@require_auth +def download_pdf(): + """Generate a PDF from the provided summary text.""" + body = request.get_json(silent=True) or {} + summary_text = body.get("summary", "") + filename = body.get("filename", "SolarPV_Report") + + if not summary_text: + return jsonify({"success": False, "error": "No summary text provided."}), 400 + + try: + pdf_buf = generate_pdf(summary_text, filename) + safe_name = "".join( + c for c in filename if c.isalnum() or c in ("_", "-", " ") + ).strip() or "SolarPV_Report" + return send_file( + pdf_buf, + mimetype="application/pdf", + as_attachment=True, + download_name=f"{safe_name}.pdf", + ) except Exception as e: return jsonify({"success": False, "error": str(e)}), 500 \ No newline at end of file diff --git a/modules/llm_summary/thermal_prompt_templates.py b/modules/llm_summary/thermal_prompt_templates.py new file mode 100644 index 0000000..b0110b3 --- /dev/null +++ b/modules/llm_summary/thermal_prompt_templates.py @@ -0,0 +1,76 @@ +"""modules/llm_summary/thermal_prompt_templates.py""" + +SYSTEM_INSTRUCTION_THERMAL = """ +You are a senior solar thermal inspector and thermography expert with 15+ years of experience in PV hot-spot analysis. +Your task is to generate a detailed, professional, and easy-to-understand thermal inspection report for a solar panel technician or plant operator. + +Use ONLY the provided detection and LIME context. Do not invent values. +If a value is missing, state "Data not available" — never guess or fabricate numbers. + +Writing style rules: +- Write in clear, plain English that a non-expert plant operator can understand. +- Use full sentences and paragraphs for explanations. +- For lists, use '-' as the bullet symbol only. +- No markdown formatting, no code fences, no asterisks, no bold/italic symbols. +- Each section must be clearly headed and substantive. +""" + +USER_PROMPT_THERMAL_TEMPLATE = """ +Generate a comprehensive thermal solar panel inspection report for image: {target_image} + +The data below comes from thermal hotspot detection and LIME explainability analysis. + +=== THERMAL DETECTION DATA (JSON) === +{thermal_detections_json} + +=== LIME EXPLAINABILITY DATA (JSON) === +{lime_features_json} + +--- + +Write the full report using EXACTLY the section headings and structure below. +Do not skip any section. Do not shorten sections to one line. + +=========================================================== +THERMAL SOLAR PANEL INSPECTION REPORT +Image Reference: {target_image} +=========================================================== + +SECTION 1: EXECUTIVE SUMMARY +Write 4 to 6 sentences covering: +- Overall thermal health status based on hotspot counts and intensities. +- The most significant hotspot detected and its potential risk. +- Brief mention of the urgency level (Immediate Action vs. Routine Monitoring). + +SECTION 2: HOTSPOT ANALYSIS +For EACH unique hotspot detected, describe: +- Severity class (e.g., High, Medium, Low based on area and confidence). +- Location metadata (bounding box coordinates or relative area). +- Technical interpretation: what a temperature anomaly of this size usually implies (e.g., cell mismatch, bypass diode failure, or shading). + +SECTION 3: EXPLAINABILITY (LIME) INTERPRETATION +Write 4-6 sentences explaining the LIME features provided: +- Which image regions or features are driving the detection model. +- How the model identifies these hotspots as "defective" versus background noise. +- The confidence the AI has in this specific thermal signature. + +SECTION 4: OPERATIONAL IMPACT +Describe path to failure: +- How these hotspots affect the long-term integrity of the module. +- Potential for fire hazards if temperature differentials are extreme. +- Expected power loss mechanisms (localized overheating reducing efficiency). + +SECTION 5: RECOMMENDED REMEDIATION +Organize into: +- Immediate Actions: (e.g., onsite verification with IR camera, bypass diode check). +- Short-Term Actions: (e.g., cleaning, electrical testing). +- Preventive Measures: (e.g., quarterly thermal scans). + +SECTION 6: FINAL RISK ASSESSMENT +Risk Level: (Choose exactly one: LOW / MEDIUM / HIGH / CRITICAL) +Justify the risk based on the evidence provided above. + +=========================================================== +END OF REPORT +=========================================================== +""" \ No newline at end of file diff --git a/modules/llm_summary/thermal_summary_engine.py b/modules/llm_summary/thermal_summary_engine.py new file mode 100644 index 0000000..27d573b --- /dev/null +++ b/modules/llm_summary/thermal_summary_engine.py @@ -0,0 +1,24 @@ +"""modules/llm_summary/thermal_summary_engine.py +Gemini-based thermal summary generation. +""" +from __future__ import annotations +import json +import os +from typing import Dict, List +from .summary_engine import _call_gemini +from .thermal_prompt_templates import SYSTEM_INSTRUCTION_THERMAL, USER_PROMPT_THERMAL_TEMPLATE + +def generate_thermal_summary(image_filename: str, detections: List[Dict], lime_features: List[Dict] = None) -> str: + """ + Generate a thermal report using LLM. + """ + if not detections and not lime_features: + raise ValueError("No thermal detection or XAI data provided for summary.") + + prompt = USER_PROMPT_THERMAL_TEMPLATE.format( + target_image=image_filename, + thermal_detections_json=json.dumps(detections, indent=2), + lime_features_json=json.dumps(lime_features or [], indent=2) + ) + + return _call_gemini(SYSTEM_INSTRUCTION_THERMAL, prompt) diff --git a/modules/thermal/thermal_detect_model.py b/modules/thermal/thermal_detect_model.py new file mode 100644 index 0000000..757bc29 --- /dev/null +++ b/modules/thermal/thermal_detect_model.py @@ -0,0 +1,41 @@ +"""modules/thermal/thermal_detect_model.py — YOLO detection on thermal images.""" +from ultralytics import YOLO +from config import THERMAL_YOLO_MODEL_PATH + +_model = None + + +def _load(): + global _model + if _model is None: + _model = YOLO(THERMAL_YOLO_MODEL_PATH) + return _model + + +def run_thermal_detection(image_path: str, conf: float = 0.3) -> dict: + model = _load() + results = model.predict(source=image_path, conf=conf, save=False, verbose=False) + r = results[0] + + # Read class names from the model itself + names = r.names # {0: 'class_a', 1: 'class_b', ...} + + detections = [] + boxes = r.boxes + if boxes is not None: + for box in boxes: + cls_id = int(box.cls[0]) + conf_val = float(box.conf[0]) + x1, y1, x2, y2 = [round(float(v)) for v in box.xyxy[0]] + detections.append({ + "class_id": cls_id, + "class_name": names.get(cls_id, f"class_{cls_id}"), + "confidence": round(conf_val, 4), + "bbox": [x1, y1, x2, y2], + }) + + return { + "detections": detections, + "count": len(detections), + "image_shape": list(r.orig_shape), + } diff --git a/modules/thermal/thermal_routes.py b/modules/thermal/thermal_routes.py index aa2ed66..41f0977 100644 --- a/modules/thermal/thermal_routes.py +++ b/modules/thermal/thermal_routes.py @@ -1,13 +1,17 @@ """modules/thermal/thermal_routes.py""" import os, uuid from flask import Blueprint, request, jsonify +from PIL import Image from auth import require_auth -from config import THERMAL_UPLOAD_FOLDER, RESULT_FOLDER +from config import THERMAL_UPLOAD_FOLDER, RESULT_FOLDER, UPLOAD_FOLDER, EXPLAIN_FOLDER from modules.el_upload.upload_utils import allowed_file from .thermal_model import generate_hotspot_overlay +from .thermal_detect_model import run_thermal_detection thermal_bp = Blueprint("thermal", __name__, url_prefix="/api/thermal") + +# ── Original hotspot overlay route ───────────────────────────────────────── @thermal_bp.route("/predict", methods=["POST"]) @require_auth def predict(): @@ -19,7 +23,6 @@ def predict(): uid = uuid.uuid4().hex ext = file.filename.rsplit(".", 1)[1].lower() - # Save original orig_path = os.path.join(THERMAL_UPLOAD_FOLDER, f"{uid}.{ext}") with open(orig_path, "wb") as fp: fp.write(image_bytes) @@ -38,4 +41,77 @@ def predict(): }), 200 except Exception as e: + return jsonify({"success": False, "error": str(e)}), 500 + + +# ── YOLO thermal detection ───────────────────────────────────────────────── +@thermal_bp.route("/detect", methods=["POST"]) +@require_auth +def detect(): + file = request.files.get("image") + if not file or not allowed_file(file.filename): + return jsonify({"success": False, "error": "Valid image required."}), 400 + + ext = file.filename.rsplit(".", 1)[1].lower() + uid = uuid.uuid4().hex + img_path = os.path.join(THERMAL_UPLOAD_FOLDER, f"{uid}.{ext}") + file.save(img_path) + + try: + result = run_thermal_detection(img_path) + img = Image.open(img_path) + w, h = img.size + + dets = result["detections"] + # Compute area ratios + panel_area = w * h if w * h > 0 else 1 + for d in dets: + x1, y1, x2, y2 = d["bbox"] + d["area_ratio"] = round((x2 - x1) * (y2 - y1) / panel_area, 6) + + # Draw boxes and save result image + from modules.detection.detection_utils import draw_boxes + result_name = f"thdet_{uid}.{ext}" + result_path = os.path.join(RESULT_FOLDER, result_name) + draw_boxes(img_path, dets, result_path) + + return jsonify({ + "success": True, + "filename": f"{uid}.{ext}", + "detections": dets, + "count": len(dets), + "result_image": f"/results/{result_name}", + "image_size": {"width": w, "height": h}, + }), 200 + + except Exception as e: + import traceback; traceback.print_exc() + return jsonify({"success": False, "error": str(e)}), 500 + + +# ── Thermal LIME ──────────────────────────────────────────────────────── +@thermal_bp.route("/lime", methods=["POST"]) +@require_auth +def thermal_lime(): + file = request.files.get("image") + if not file or not allowed_file(file.filename): + return jsonify({"success": False, "error": "Valid image required."}), 400 + + ext = file.filename.rsplit(".", 1)[1].lower() + uid = uuid.uuid4().hex + img_path = os.path.join(UPLOAD_FOLDER, f"thxai_{uid}.{ext}") + file.save(img_path) + + out_name = f"thlime_{uid}.jpg" + out_path = os.path.join(EXPLAIN_FOLDER, out_name) + + try: + from modules.xai.lime_xai import generate_lime + generate_lime(img_path, out_path) + return jsonify({ + "success": True, + "explanation_image": f"/explanations/{out_name}", + }), 200 + except Exception as e: + import traceback; traceback.print_exc() return jsonify({"success": False, "error": str(e)}), 500 \ No newline at end of file diff --git a/modules/weather/weather_routes.py b/modules/weather/weather_routes.py index fe258a6..a20728c 100644 --- a/modules/weather/weather_routes.py +++ b/modules/weather/weather_routes.py @@ -20,8 +20,8 @@ def get_location(): # Fallback for localhost / private IPs return jsonify({ "success": True, - "city": "Chennai", "region": "Tamil Nadu", "country": "India", - "lat": 13.0827, "lon": 80.2707, + "city": "Thalavapalayam, Karur", "region": "Tamil Nadu", "country": "India", + "lat": 11.0630, "lon": 78.0466, "note": "Fallback location (localhost detected)", }), 200 return jsonify({ diff --git a/modules/xai/gradcam.py b/modules/xai/gradcam.py index f553e9f..08b6006 100644 --- a/modules/xai/gradcam.py +++ b/modules/xai/gradcam.py @@ -64,7 +64,7 @@ def generate_gradcam(image_path: str, out_path: str) -> str: orig = cv2.imread(image_path) cam_resized = cv2.resize(cam, (orig.shape[1], orig.shape[0])) heatmap = cv2.applyColorMap(np.uint8(255 * cam_resized), cv2.COLORMAP_JET) - overlay = cv2.addWeighted(orig, 0.55, heatmap, 0.45, 0) + overlay = cv2.addWeighted(orig, 0.4, heatmap, 0.6, 0) cv2.imwrite(out_path, overlay) return out_path \ No newline at end of file diff --git a/modules/xai/lime_xai.py b/modules/xai/lime_xai.py new file mode 100644 index 0000000..351784d --- /dev/null +++ b/modules/xai/lime_xai.py @@ -0,0 +1,134 @@ +"""modules/xai/lime_xai.py""" +import torch, torch.nn as nn +import numpy as np +import cv2 +from torchvision import transforms, models +from PIL import Image +import os +from config import INTEGRITY_MODEL_PATH # reuse same backbone for XAI; swap if needed + +from lime import lime_image +from skimage.segmentation import mark_boundaries + +TRANSFORM = transforms.Compose([ + transforms.Resize((224, 224)), + transforms.ToTensor(), + transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), +]) + +_model = None + +def _get_model(): + global _model + if _model: return _model + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + net = models.resnet18(weights=None) + net.fc = nn.Linear(net.fc.in_features, 2) + try: + state = torch.load(INTEGRITY_MODEL_PATH, map_location=device) + net.load_state_dict(state if not isinstance(state, dict) or "model" not in state else state["model"], strict=False) + except Exception: + pass + net.to(device).eval() + _model = (net, device) + return _model + +def batch_predict(images: np.ndarray) -> np.ndarray: + """ + LIME requires a batch prediction function that takes numpy array of images + (N, H, W, C) in [0, 255] range and returns probabilities (N, num_classes). + """ + net, device = _get_model() + + # Process batch + batch_tensor = [] + for img_arr in images: + pil_img = Image.fromarray(img_arr.astype("uint8")).convert("RGB") + tensor = TRANSFORM(pil_img) + batch_tensor.append(tensor) + + batch_tensor = torch.stack(batch_tensor).to(device) + + with torch.no_grad(): + logits = net(batch_tensor) + # Apply softmax to get probabilities + probs = torch.nn.functional.softmax(logits, dim=1) + + return probs.cpu().numpy() + +def generate_lime(image_path: str, out_path: str, num_samples: int = 250) -> str: + """ + Generate LIME explanation for the given thermal image and save the heatmap overlay. + """ + net, device = _get_model() + + # Read original image via OpenCV to get array in [0, 255] RGB format + orig_bgr = cv2.imread(image_path) + if orig_bgr is None: + raise ValueError(f"Could not read image from {image_path}") + + orig_rgb = cv2.cvtColor(orig_bgr, cv2.COLOR_BGR2RGB) + + # Get a single prediction to know which class we're explaining + probs = batch_predict(np.expand_dims(orig_rgb, axis=0)) + pred_class = int(np.argmax(probs[0])) + + # Set up LIME Explainer + explainer = lime_image.LimeImageExplainer() + + # Generate explanation + # Note: num_samples can be increased for more stability, but 250-500 is good for web speed + explanation = explainer.explain_instance( + orig_rgb, + batch_predict, + top_labels=1, + hide_color=0, + num_samples=num_samples + ) + + # Get mask of superpixels with weights for the predicted class + # We want a heatmap representation of these weights + dict_heatmap = dict(explanation.local_exp[pred_class]) + segments = explanation.segments # (H, W) array of superpixel segments + + # Create the weight heatmap corresponding to segment mask + heatmap = np.zeros_like(segments, dtype=float) + for k, v in dict_heatmap.items(): + heatmap[segments == k] = v + + # Scale heatmap properly for coloring + # LIME gives positive (warm) and negative (cool) weights + # Normalize weights so that 0 is at mapped value 0.5 + max_val = np.max(np.abs(heatmap)) + if max_val > 1e-8: + heatmap_norm = 0.5 + (heatmap / (2.0 * max_val)) + else: + heatmap_norm = np.full_like(heatmap, 0.5) + + heatmap_norm = np.clip(heatmap_norm, 0, 1) + + # Resize heatmap to match original image if needed (Lime segmenter works on original resolution) + heatmap_resized = cv2.resize(heatmap_norm, (orig_bgr.shape[1], orig_bgr.shape[0])) + + # Apply JET color map + # 0 -> blue (strong negative influence) + # 0.5 -> yellow-green (neutral influence) + # 1.0 -> red (strong positive influence) + heatmap_color = cv2.applyColorMap(np.uint8(255 * heatmap_resized), cv2.COLORMAP_JET) + + # Optionally draw superpixel boundaries subtly + temp, mask = explanation.get_image_and_mask(pred_class, positive_only=False, num_features=5, hide_rest=False) + boundaries = mark_boundaries(np.zeros_like(orig_rgb), mask) + + # Overlay heatmap over original BGR + # Weighting: orig 60%, heatmap 40% + overlay = cv2.addWeighted(orig_bgr, 0.60, heatmap_color, 0.40, 0) + + # Add boundary lines to overlay + # mark_boundaries returns float [0, 1]. Get border pixels and color them yellow + border_mask = np.any(boundaries > 0, axis=-1) + overlay[border_mask] = [0, 255, 255] # Yellow boundaries + + cv2.imwrite(out_path, overlay) + + return out_path diff --git a/requirements.txt b/requirements.txt index 7c721c3..b7cccaf 100644 --- a/requirements.txt +++ b/requirements.txt @@ -37,5 +37,7 @@ langchain>=0.2.0 langchain-chroma>=0.1.0 langchain-google-genai>=1.0.0 pypdf>=4.0.0 -sentence-transformers>=2.7.0 +sentence-transformers==2.6.0 +transformers==4.35.2 ctransformers>=0.2.27 +reportlab>=4.0.0 diff --git a/templates/dashboard.html b/templates/dashboard.html index 7e25c23..94f6f86 100644 --- a/templates/dashboard.html +++ b/templates/dashboard.html @@ -1,895 +1,83 @@ - - -Solar PV — Dashboard - + {% include 'partials/css.html' %} + - - +
-
- - -
-
- - -
-
🔍 EL Image — Defect Detection
- -
- -
Drop EL image here or click to browse
-
PNG · JPG · BMP · up to 50 MB
-
+ {% include 'partials/panel_detect.html' %} -
-
- -
- - -
- - -
- - -
-
📊 Detection Result
-
- Upload an EL image and run detection to see results here. -
-
- -
- - - -
- - - -
-
- - -
-
🌍 Carbon Emission Prediction
- - -
- 📍 - - | - 🌡 - — °C - -
- -
- ⚠ Run detection first, then predict carbon emission. -
- -
-
- - -
-
- - -
-
- -
-
- - -
-
- - -
-
+ {% include 'partials/panel_carbon.html' %} - -
- -
-
📈 Carbon Results
-
- Carbon prediction will appear here. -
-
- -
-
- - - -
-
- -
-
🧠 GradCAM Explainability
-
- -
Select EL image for GradCAM analysis
-
Visualises which regions drove the model's decision
-
-
- -
- -
-
🔬 Activation Heatmap
-
- Heatmap will appear here. -
-
- -
-
+ {% include 'partials/panel_xai.html' %} - -
+ {% include 'partials/panel_thermal.html' %} -
- 🌡 Thermal Mode — Independent hotspot detection workflow. - Does not affect EL detection or carbon prediction. -
-
- -
-
- 🌡 Thermal / IR Image - SEPARATE -
-
- -
Upload thermal or infrared image
-
PNG · JPG · BMP · TIFF
-
-
- -
+ + {% include 'partials/panel_detect_thermal.html' %} -
-
🔥 Hotspot Overlay
-
- Hotspot overlay will appear here. -
-
-
-
+ + {% include 'partials/panel_xai_thermal.html' %} - -
-
-
- 📋 AI Report Summary - (local LLM — orca-mini-3b) -
- -
-
- - -
- - -
- -
+ {% include 'partials/panel_summary.html' %} -
Summary will appear here after generation. -⏳ Note: First generation may take 30–60 seconds while the local model loads into memory.
-
-
- - - -
-
-
- 💬 RAG Chatbot - (local LLM + ChromaDB · domain docs + CSV data) -
- -
- Ask anything about solar PV defects, inspection results, or carbon emissions. - Answers are grounded in your uploaded documents and real inspection CSV data. - First message loads the model (~30s). -
+ {% include 'partials/panel_summary_thermal.html' %} -
-
Hello! I'm your Solar PV assistant powered by a local AI model. -Ask me about defects, carbon emissions, or your inspection results.
-
+
-
- - - -
- - - -
+ + {% include 'partials/panel_chat.html' %} - - + {% include 'partials/js.html' %} + \ No newline at end of file diff --git a/templates/login.html b/templates/login.html index 445ca65..271cfc0 100644 --- a/templates/login.html +++ b/templates/login.html @@ -1,90 +1,297 @@ - -Solar PV — Login + + +Solaritivity -
- - -
- -
- -
+ - + \ No newline at end of file diff --git a/templates/partials/css.html b/templates/partials/css.html new file mode 100644 index 0000000..556a21a --- /dev/null +++ b/templates/partials/css.html @@ -0,0 +1,1252 @@ + \ No newline at end of file diff --git a/templates/partials/js.html b/templates/partials/js.html new file mode 100644 index 0000000..36eed0f --- /dev/null +++ b/templates/partials/js.html @@ -0,0 +1,940 @@ + \ No newline at end of file diff --git a/templates/partials/nav.html b/templates/partials/nav.html new file mode 100644 index 0000000..c984bb0 --- /dev/null +++ b/templates/partials/nav.html @@ -0,0 +1,361 @@ + + + + + \ No newline at end of file diff --git a/templates/partials/panel_carbon.html b/templates/partials/panel_carbon.html new file mode 100644 index 0000000..986511d --- /dev/null +++ b/templates/partials/panel_carbon.html @@ -0,0 +1,934 @@ + + + + + +
+ +
+ + + + +
+ + + + +
+
+ + +
+
+ Initialising
+
Preparing + carbon pipeline...
+
+ + +
+
+
+
+ + +
+ + +
+
+ + + + +
+
+
Detecting Location
+
Waiting...
+
+
+ + +
+
+ + + +
+
+
Fetching Temperature
+
Waiting...
+
+
+ + +
+
+ + + + +
+
+
Predicting Emission
+
Waiting...
+
+
+ + +
+
+ + + + +
+
+
Finalising
+
Waiting...
+
+
+ +
+ + +
+ + + +
+
+ + +
+ + +
+
+ Carbon Emission Prediction +
+
+ CO₂ · Climate Model
+
+ + +
+ + + + +
+ +
+
+
+ Model + Input +
+ + +
+ + +
+ + + + + + Detection + Summary +
+ + +
+
No detection data yet. Run detection + first.
+
+ + +
+
+ + +
+ + + + + Parameters +
+ + +
+
+
+ City
+
Auto-detecting... +
+
+
+
+ Temperature
+
Auto-detecting... +
+
+
+
+ Panel Power
+
380 W
+
+
+
+ Irradiance
+
900 W/m²
+
+
+ + +
+
+
+ Resolution
+
--
+
+
+
+ Filename
+
+ --
+
+
+ +
+
+ + + + +
+ +
+
+
+ Carbon + Results +
+ +
+ + +
+ + + +

+ Awaiting Prediction

+
+ + + + +
+
+ +
+ + + + + + + + + + + + +
+ +
+ +
+ + + \ No newline at end of file diff --git a/templates/partials/panel_chat.html b/templates/partials/panel_chat.html new file mode 100644 index 0000000..2677bf8 --- /dev/null +++ b/templates/partials/panel_chat.html @@ -0,0 +1,41 @@ + +
+ +
+ +
+ +
+
+ Ask Helio +
+ +
+ + +
+
Hello! I'm your Solar PV assistant. + How can I help you today?
+ +
+ + + +
+
+ + +
+ + +
+
+ \ No newline at end of file diff --git a/templates/partials/panel_detect.html b/templates/partials/panel_detect.html new file mode 100644 index 0000000..dd3e65c --- /dev/null +++ b/templates/partials/panel_detect.html @@ -0,0 +1,692 @@ + + + + + +
+ + +
+ + +
+ + + + + +
+ + + + + + + + +
+
+ + +
+
+ Initialising
+
Preparing + pipeline…
+
+ + +
+
+
+
+ + +
+ + + +
+
+ + + + +
+ + +
+ + +
+
+ EL Image — Defect Detection +
+
+ Neural Pipeline
+
+ + +
+ + + + +
+ +
+
+
+ Input +
+ + +
+ + +
+ + +
+ + + + + +
+
+ Drop EL image here or click to browse +
+
+ PNG · JPG · BMP · up to 50 MB
+
+ + + + + + + + +
+
+ + + + + + + +
+
+ + + + +
+ +
+
+
+ Detection + Result +
+ + +
+ + +
+ + + + + +

+ Awaiting Detection

+
+ + + + + + + +
+ + + + +
+ +
+ + + + + +
+ +
+ +
+ + + \ No newline at end of file diff --git a/templates/partials/panel_detect_thermal.html b/templates/partials/panel_detect_thermal.html new file mode 100644 index 0000000..8942fab --- /dev/null +++ b/templates/partials/panel_detect_thermal.html @@ -0,0 +1,548 @@ + + + + + +
+
+ + + + +
+ + + +
+
+ +
+
+ Running Detection
+
Processing + thermal image...
+
+ +
+
+
+
+ + +
+
+
+ + + + + +
+
+
+ Uploading Image
+
Waiting... +
+
+
+
+
+ + + + +
+
+
+ Detecting Defects
+
Waiting... +
+
+
+
+
+ + + + +
+
+
+ Rendering Results
+
Waiting... +
+
+
+
+ +
+ + + +
+
+ + +
+ + +
+
+ Thermal Defect Detection +
+
+ Detection Model
+
+ + +
+ + + + +
+
+
+
+ Thermal + Image Input +
+ +
+ + +
+ + + + +
Upload Thermal Image +
+
PNG · JPG · BMP · TIFF
+
+ + + + + + + +
+
+ + + + +
+
+
+
+ Detection + Results +
+ +
+ +
+ + + + +

+ Awaiting Detection

+
+ + + +
+
+ +
+ + +
+ +
+ +
+ + + \ No newline at end of file diff --git a/templates/partials/panel_summary.html b/templates/partials/panel_summary.html new file mode 100644 index 0000000..e1d57c9 --- /dev/null +++ b/templates/partials/panel_summary.html @@ -0,0 +1,128 @@ + +
+ + + + + +
+
+
AI-Powered Inspection Report
+ +
Click to generate a comprehensive AI inspection report
+ +
+ + +
+
+ + + + + + + + + + + +
+ +
+
\ No newline at end of file diff --git a/templates/partials/panel_summary_thermal.html b/templates/partials/panel_summary_thermal.html new file mode 100644 index 0000000..51728f6 --- /dev/null +++ b/templates/partials/panel_summary_thermal.html @@ -0,0 +1,86 @@ + +
+ + + + + +
+
+
AI-Powered Thermal Inspection Report
+ +
Based on hotspots and LIME analysis
+ +
+ +
+
+ + + + +
diff --git a/templates/partials/panel_thermal.html b/templates/partials/panel_thermal.html new file mode 100644 index 0000000..c483dfc --- /dev/null +++ b/templates/partials/panel_thermal.html @@ -0,0 +1,44 @@ + +
+ +
+ 🌡 Thermal Mode — Independent hotspot detection workflow. + Does not affect EL detection or carbon prediction. +
+ +
+ +
+
+ 🌡 Thermal / IR Image + SEPARATE +
+
+ +
Upload thermal or infrared image
+
PNG · JPG · BMP · TIFF
+
+
+ +
+ +
+
🔥 Hotspot Overlay
+
+ Hotspot overlay will appear here. +
+
+ +
+ + +
+ +
+
\ No newline at end of file diff --git a/templates/partials/panel_xai.html b/templates/partials/panel_xai.html new file mode 100644 index 0000000..1f4945f --- /dev/null +++ b/templates/partials/panel_xai.html @@ -0,0 +1,719 @@ + + + + + +
+ +
+ + + + +
+ + + + + +
+
+ + +
+
+ Initialising
+
Preparing + explainability pipeline...
+
+ + +
+
+
+
+ + +
+ + +
+
+ + + + + +
+
+
Preparing Image
+
Waiting...
+
+
+ + +
+
+ + + + +
+
+
Generating Heatmap
+
Waiting...
+
+
+ + +
+
+ + + + +
+
+
Rendering Results
+
Waiting...
+
+
+ +
+ + +
+ + + +
+
+ + +
+ + +
+
+ GradCAM Explainability +
+
+ XAI · Neural Attention
+
+ + +
+ + + + +
+ +
+
+
+ Activation + Heatmap +
+ + +
+ + +
+ + + + + +

+ Awaiting GradCAM

+
+ + + + +
+ + + +
+ + + + +
+ +
+
+
+ Explainability + Analysis +
+ + +
+ + +
+ +

+ Awaiting Analysis

+
+ + + + +
+
+ +
+ + + + + + + + +
+ +
+ +
+ + + \ No newline at end of file diff --git a/templates/partials/panel_xai1.html b/templates/partials/panel_xai1.html new file mode 100644 index 0000000..d261407 --- /dev/null +++ b/templates/partials/panel_xai1.html @@ -0,0 +1,513 @@ + + + + + + GradCAM Explainability · XAI + + + + + + + + + + + + +
+ + +
+
+ GradCAM Explainability +
+
+ XAI · Neural Attention +
+
+ + +
+ + +
+
+
+ Activation Heatmap +
+
+ + + +
+ +
+
+
+
+ Warm = high activation +
+
+
+ Cool = low activation +
+
+
+
+ + +
+
+
+ Explainability Analysis +
+
+
+ + +
+
+ + + + What is GradCAM? +
+

Gradient-weighted Class Activation Mapping (GradCAM) uses the gradients flowing into the final convolutional layer to produce a coarse localisation map highlighting important regions the model focused on for its prediction.

+
+ +
+ + +
+
+ + + + How to Read +
+
+
+
+ Warm regions (red/orange) indicate high neural activation — these are the features that most influenced the model's classification decision. +
+
+
+ Cool regions (blue/cyan) indicate low activation — these areas had minimal impact on the prediction. +
+
+
+ +
+ + +
+
+ + + + + + + Detection Context +
+ +
+
+
+ black_core + 94.3% +
+
+
+ crack + 87.1% +
+
+
+ finger + 72.6% +
+
+
+ + +
+ + + + Generated at --:--:-- -- +
+ +
+
+
+ +
+ +
+ + + + + + \ No newline at end of file diff --git a/templates/partials/panel_xai_thermal.html b/templates/partials/panel_xai_thermal.html new file mode 100644 index 0000000..1e3f62e --- /dev/null +++ b/templates/partials/panel_xai_thermal.html @@ -0,0 +1,539 @@ + + + + + +
+
+ + + + +
+ + + + +
+
+ +
+
+ Initialising
+
Preparing + thermal explainability...
+
+ +
+
+
+
+ +
+
+
+ + + + + +
+
+
+ Preparing Image
+
Waiting... +
+
+
+
+
+ + + + +
+
+
+ Generating Explanation
+
Waiting... +
+
+
+
+
+ + + + +
+
+
+ Rendering Results
+
Waiting... +
+
+
+
+ +
+ + + +
+
+ + +
+ + +
+
+ Thermal LIME +
+
+ XAI · Local Interpretable Explanations
+
+ + +
+ + +
+
+
+
+ Thermal + LIME Explanation +
+
+
+ + + + + +

+ Awaiting LIME

+
+ +
+ +
+ + +
+
+
+
+ Explainability + Analysis +
+
+
+

+ Awaiting Analysis

+
+ +
+
+ +
+ + +
+ +
+ +
+ + + \ No newline at end of file diff --git a/valid_test_images/img000001.jpg b/valid_test_images/img000001.jpg new file mode 100644 index 0000000..2cf3732 Binary files /dev/null and b/valid_test_images/img000001.jpg differ diff --git a/valid_test_images/img000002.jpg b/valid_test_images/img000002.jpg new file mode 100644 index 0000000..8d77bed Binary files /dev/null and b/valid_test_images/img000002.jpg differ diff --git a/valid_test_images/img000003.jpg b/valid_test_images/img000003.jpg new file mode 100644 index 0000000..f424683 Binary files /dev/null and b/valid_test_images/img000003.jpg differ diff --git a/valid_test_images/img000004.jpg b/valid_test_images/img000004.jpg new file mode 100644 index 0000000..57e3f4d Binary files /dev/null and b/valid_test_images/img000004.jpg differ diff --git a/valid_test_images/img000005.jpg b/valid_test_images/img000005.jpg new file mode 100644 index 0000000..bbe73cf Binary files /dev/null and b/valid_test_images/img000005.jpg differ diff --git a/valid_test_images/img000006.jpg b/valid_test_images/img000006.jpg new file mode 100644 index 0000000..7ec9b71 Binary files /dev/null and b/valid_test_images/img000006.jpg differ diff --git a/valid_test_images/img000007.jpg b/valid_test_images/img000007.jpg new file mode 100644 index 0000000..78982cf Binary files /dev/null and b/valid_test_images/img000007.jpg differ diff --git a/valid_test_images/img000008.jpg b/valid_test_images/img000008.jpg new file mode 100644 index 0000000..a2cfdd4 Binary files /dev/null and b/valid_test_images/img000008.jpg differ diff --git a/valid_test_images/img000009.jpg b/valid_test_images/img000009.jpg new file mode 100644 index 0000000..f42ccdb Binary files /dev/null and b/valid_test_images/img000009.jpg differ diff --git a/valid_test_images/img000010.jpg b/valid_test_images/img000010.jpg new file mode 100644 index 0000000..9cdb23f Binary files /dev/null and b/valid_test_images/img000010.jpg differ diff --git a/valid_test_images/img000011.jpg b/valid_test_images/img000011.jpg new file mode 100644 index 0000000..86e079b Binary files /dev/null and b/valid_test_images/img000011.jpg differ diff --git a/valid_test_images/img000012.jpg b/valid_test_images/img000012.jpg new file mode 100644 index 0000000..26c93bf Binary files /dev/null and b/valid_test_images/img000012.jpg differ diff --git a/valid_test_images/img000013.jpg b/valid_test_images/img000013.jpg new file mode 100644 index 0000000..21a498a Binary files /dev/null and b/valid_test_images/img000013.jpg differ diff --git a/valid_test_images/img000014.jpg b/valid_test_images/img000014.jpg new file mode 100644 index 0000000..9a5b8a1 Binary files /dev/null and b/valid_test_images/img000014.jpg differ diff --git a/valid_test_images/img000015.jpg b/valid_test_images/img000015.jpg new file mode 100644 index 0000000..201c46e Binary files /dev/null and b/valid_test_images/img000015.jpg differ diff --git a/valid_test_images/img000016.jpg b/valid_test_images/img000016.jpg new file mode 100644 index 0000000..cb1f251 Binary files /dev/null and b/valid_test_images/img000016.jpg differ diff --git a/valid_test_images/img000017.jpg b/valid_test_images/img000017.jpg new file mode 100644 index 0000000..5ff3244 Binary files /dev/null and b/valid_test_images/img000017.jpg differ diff --git a/valid_test_images/img000018.jpg b/valid_test_images/img000018.jpg new file mode 100644 index 0000000..565984f Binary files /dev/null and b/valid_test_images/img000018.jpg differ diff --git a/valid_test_images/img000019.jpg b/valid_test_images/img000019.jpg new file mode 100644 index 0000000..41dcc6e Binary files /dev/null and b/valid_test_images/img000019.jpg differ diff --git a/valid_test_images/img000020.jpg b/valid_test_images/img000020.jpg new file mode 100644 index 0000000..a28fe6f Binary files /dev/null and b/valid_test_images/img000020.jpg differ diff --git a/valid_test_images/img000021.jpg b/valid_test_images/img000021.jpg new file mode 100644 index 0000000..b755416 Binary files /dev/null and b/valid_test_images/img000021.jpg differ diff --git a/valid_test_images/img000022.jpg b/valid_test_images/img000022.jpg new file mode 100644 index 0000000..4495235 Binary files /dev/null and b/valid_test_images/img000022.jpg differ diff --git a/valid_test_images/img000023.jpg b/valid_test_images/img000023.jpg new file mode 100644 index 0000000..571fe5c Binary files /dev/null and b/valid_test_images/img000023.jpg differ diff --git a/valid_test_images/img000024.jpg b/valid_test_images/img000024.jpg new file mode 100644 index 0000000..b224ea5 Binary files /dev/null and b/valid_test_images/img000024.jpg differ diff --git a/valid_test_images/img000025.jpg b/valid_test_images/img000025.jpg new file mode 100644 index 0000000..e94efb5 Binary files /dev/null and b/valid_test_images/img000025.jpg differ diff --git a/valid_test_images/img000026.jpg b/valid_test_images/img000026.jpg new file mode 100644 index 0000000..14e5c51 Binary files /dev/null and b/valid_test_images/img000026.jpg differ diff --git a/valid_test_images/img000027.jpg b/valid_test_images/img000027.jpg new file mode 100644 index 0000000..56b35f0 Binary files /dev/null and b/valid_test_images/img000027.jpg differ diff --git a/valid_test_images/img000028.jpg b/valid_test_images/img000028.jpg new file mode 100644 index 0000000..5bf9071 Binary files /dev/null and b/valid_test_images/img000028.jpg differ diff --git a/valid_test_images/img000029.jpg b/valid_test_images/img000029.jpg new file mode 100644 index 0000000..4e82197 Binary files /dev/null and b/valid_test_images/img000029.jpg differ diff --git a/valid_test_images/img000030.jpg b/valid_test_images/img000030.jpg new file mode 100644 index 0000000..8253cad Binary files /dev/null and b/valid_test_images/img000030.jpg differ diff --git a/valid_test_images/img000031.jpg b/valid_test_images/img000031.jpg new file mode 100644 index 0000000..2ec6ab6 Binary files /dev/null and b/valid_test_images/img000031.jpg differ diff --git a/valid_test_images/img000032.jpg b/valid_test_images/img000032.jpg new file mode 100644 index 0000000..86b4a81 Binary files /dev/null and b/valid_test_images/img000032.jpg differ diff --git a/valid_test_images/img000033.jpg b/valid_test_images/img000033.jpg new file mode 100644 index 0000000..62b9122 Binary files /dev/null and b/valid_test_images/img000033.jpg differ diff --git a/valid_test_images/img000034.jpg b/valid_test_images/img000034.jpg new file mode 100644 index 0000000..ba998d9 Binary files /dev/null and b/valid_test_images/img000034.jpg differ diff --git a/valid_test_images/img000035.jpg b/valid_test_images/img000035.jpg new file mode 100644 index 0000000..4d4a16f Binary files /dev/null and b/valid_test_images/img000035.jpg differ diff --git a/valid_test_images/img000036.jpg b/valid_test_images/img000036.jpg new file mode 100644 index 0000000..f874928 Binary files /dev/null and b/valid_test_images/img000036.jpg differ diff --git a/valid_test_images/img000037.jpg b/valid_test_images/img000037.jpg new file mode 100644 index 0000000..4830c71 Binary files /dev/null and b/valid_test_images/img000037.jpg differ diff --git a/valid_test_images/img000038.jpg b/valid_test_images/img000038.jpg new file mode 100644 index 0000000..e7ec120 Binary files /dev/null and b/valid_test_images/img000038.jpg differ diff --git a/valid_test_images/img000039.jpg b/valid_test_images/img000039.jpg new file mode 100644 index 0000000..ac85dba Binary files /dev/null and b/valid_test_images/img000039.jpg differ diff --git a/valid_test_images/img000040.jpg b/valid_test_images/img000040.jpg new file mode 100644 index 0000000..3df4135 Binary files /dev/null and b/valid_test_images/img000040.jpg differ diff --git a/valid_test_images/img000041.jpg b/valid_test_images/img000041.jpg new file mode 100644 index 0000000..a69bba8 Binary files /dev/null and b/valid_test_images/img000041.jpg differ diff --git a/valid_test_images/img000042.jpg b/valid_test_images/img000042.jpg new file mode 100644 index 0000000..698681d Binary files /dev/null and b/valid_test_images/img000042.jpg differ diff --git a/valid_test_images/img000043.jpg b/valid_test_images/img000043.jpg new file mode 100644 index 0000000..867eee5 Binary files /dev/null and b/valid_test_images/img000043.jpg differ diff --git a/valid_test_images/img000044.jpg b/valid_test_images/img000044.jpg new file mode 100644 index 0000000..dfe0fa2 Binary files /dev/null and b/valid_test_images/img000044.jpg differ diff --git a/valid_test_images/img000045.jpg b/valid_test_images/img000045.jpg new file mode 100644 index 0000000..8c2222c Binary files /dev/null and b/valid_test_images/img000045.jpg differ diff --git a/valid_test_images/img000046.jpg b/valid_test_images/img000046.jpg new file mode 100644 index 0000000..b884fc4 Binary files /dev/null and b/valid_test_images/img000046.jpg differ diff --git a/valid_test_images/img000047.jpg b/valid_test_images/img000047.jpg new file mode 100644 index 0000000..252cf94 Binary files /dev/null and b/valid_test_images/img000047.jpg differ diff --git a/valid_test_images/img000048.jpg b/valid_test_images/img000048.jpg new file mode 100644 index 0000000..e3ee5ca Binary files /dev/null and b/valid_test_images/img000048.jpg differ diff --git a/valid_test_images/img000049.jpg b/valid_test_images/img000049.jpg new file mode 100644 index 0000000..b755385 Binary files /dev/null and b/valid_test_images/img000049.jpg differ diff --git a/valid_test_images/img000050.jpg b/valid_test_images/img000050.jpg new file mode 100644 index 0000000..41cfa33 Binary files /dev/null and b/valid_test_images/img000050.jpg differ diff --git a/valid_test_images/img000051.jpg b/valid_test_images/img000051.jpg new file mode 100644 index 0000000..3240d75 Binary files /dev/null and b/valid_test_images/img000051.jpg differ diff --git a/valid_test_images/img000052.jpg b/valid_test_images/img000052.jpg new file mode 100644 index 0000000..778960a Binary files /dev/null and b/valid_test_images/img000052.jpg differ diff --git a/valid_test_images/img000053.jpg b/valid_test_images/img000053.jpg new file mode 100644 index 0000000..d3e3abd Binary files /dev/null and b/valid_test_images/img000053.jpg differ diff --git a/valid_test_images/img000054.jpg b/valid_test_images/img000054.jpg new file mode 100644 index 0000000..5f43e96 Binary files /dev/null and b/valid_test_images/img000054.jpg differ diff --git a/valid_test_images/img000055.jpg b/valid_test_images/img000055.jpg new file mode 100644 index 0000000..fa37d75 Binary files /dev/null and b/valid_test_images/img000055.jpg differ diff --git a/valid_test_images/img000056.jpg b/valid_test_images/img000056.jpg new file mode 100644 index 0000000..0c8cbb6 Binary files /dev/null and b/valid_test_images/img000056.jpg differ diff --git a/valid_test_images/img000057.jpg b/valid_test_images/img000057.jpg new file mode 100644 index 0000000..ed76336 Binary files /dev/null and b/valid_test_images/img000057.jpg differ diff --git a/valid_test_images/img000058.jpg b/valid_test_images/img000058.jpg new file mode 100644 index 0000000..3d49dfd Binary files /dev/null and b/valid_test_images/img000058.jpg differ diff --git a/valid_test_images/img000059.jpg b/valid_test_images/img000059.jpg new file mode 100644 index 0000000..a4fdc34 Binary files /dev/null and b/valid_test_images/img000059.jpg differ diff --git a/valid_test_images/img000060.jpg b/valid_test_images/img000060.jpg new file mode 100644 index 0000000..db17f16 Binary files /dev/null and b/valid_test_images/img000060.jpg differ diff --git a/valid_test_images/img000061.jpg b/valid_test_images/img000061.jpg new file mode 100644 index 0000000..5cf1799 Binary files /dev/null and b/valid_test_images/img000061.jpg differ diff --git a/valid_test_images/img000062.jpg b/valid_test_images/img000062.jpg new file mode 100644 index 0000000..7f0f109 Binary files /dev/null and b/valid_test_images/img000062.jpg differ diff --git a/valid_test_images/img000063.jpg b/valid_test_images/img000063.jpg new file mode 100644 index 0000000..d7a26b8 Binary files /dev/null and b/valid_test_images/img000063.jpg differ diff --git a/valid_test_images/img000064.jpg b/valid_test_images/img000064.jpg new file mode 100644 index 0000000..c19f423 Binary files /dev/null and b/valid_test_images/img000064.jpg differ diff --git a/valid_test_images/img000065.jpg b/valid_test_images/img000065.jpg new file mode 100644 index 0000000..3477a86 Binary files /dev/null and b/valid_test_images/img000065.jpg differ diff --git a/valid_test_images/img000066.jpg b/valid_test_images/img000066.jpg new file mode 100644 index 0000000..bbede2d Binary files /dev/null and b/valid_test_images/img000066.jpg differ diff --git a/valid_test_images/img000067.jpg b/valid_test_images/img000067.jpg new file mode 100644 index 0000000..64aead1 Binary files /dev/null and b/valid_test_images/img000067.jpg differ diff --git a/valid_test_images/img000068.jpg b/valid_test_images/img000068.jpg new file mode 100644 index 0000000..e430828 Binary files /dev/null and b/valid_test_images/img000068.jpg differ diff --git a/valid_test_images/img000069.jpg b/valid_test_images/img000069.jpg new file mode 100644 index 0000000..9ab4da4 Binary files /dev/null and b/valid_test_images/img000069.jpg differ diff --git a/valid_test_images/img000070.jpg b/valid_test_images/img000070.jpg new file mode 100644 index 0000000..c127a00 Binary files /dev/null and b/valid_test_images/img000070.jpg differ diff --git a/valid_test_images/img000071.jpg b/valid_test_images/img000071.jpg new file mode 100644 index 0000000..f88dbd0 Binary files /dev/null and b/valid_test_images/img000071.jpg differ diff --git a/valid_test_images/img000072.jpg b/valid_test_images/img000072.jpg new file mode 100644 index 0000000..76393f8 Binary files /dev/null and b/valid_test_images/img000072.jpg differ diff --git a/valid_test_images/img000073.jpg b/valid_test_images/img000073.jpg new file mode 100644 index 0000000..ffa06ca Binary files /dev/null and b/valid_test_images/img000073.jpg differ diff --git a/valid_test_images/img000074.jpg b/valid_test_images/img000074.jpg new file mode 100644 index 0000000..6b24400 Binary files /dev/null and b/valid_test_images/img000074.jpg differ diff --git a/valid_test_images/img000075.jpg b/valid_test_images/img000075.jpg new file mode 100644 index 0000000..3b3d476 Binary files /dev/null and b/valid_test_images/img000075.jpg differ diff --git a/valid_test_images/img000076.jpg b/valid_test_images/img000076.jpg new file mode 100644 index 0000000..1cdbb1e Binary files /dev/null and b/valid_test_images/img000076.jpg differ diff --git a/valid_test_images/img000077.jpg b/valid_test_images/img000077.jpg new file mode 100644 index 0000000..9944cc0 Binary files /dev/null and b/valid_test_images/img000077.jpg differ diff --git a/valid_test_images/img000078.jpg b/valid_test_images/img000078.jpg new file mode 100644 index 0000000..16fe35a Binary files /dev/null and b/valid_test_images/img000078.jpg differ diff --git a/valid_test_images/img000079.jpg b/valid_test_images/img000079.jpg new file mode 100644 index 0000000..d1f63ce Binary files /dev/null and b/valid_test_images/img000079.jpg differ diff --git a/valid_test_images/img000080.jpg b/valid_test_images/img000080.jpg new file mode 100644 index 0000000..1d8d1ae Binary files /dev/null and b/valid_test_images/img000080.jpg differ diff --git a/valid_test_images/img000081.jpg b/valid_test_images/img000081.jpg new file mode 100644 index 0000000..642f63b Binary files /dev/null and b/valid_test_images/img000081.jpg differ diff --git a/valid_test_images/img000082.jpg b/valid_test_images/img000082.jpg new file mode 100644 index 0000000..af820a7 Binary files /dev/null and b/valid_test_images/img000082.jpg differ diff --git a/valid_test_images/img000083.jpg b/valid_test_images/img000083.jpg new file mode 100644 index 0000000..73fb4d8 Binary files /dev/null and b/valid_test_images/img000083.jpg differ diff --git a/valid_test_images/img000084.jpg b/valid_test_images/img000084.jpg new file mode 100644 index 0000000..5b2be22 Binary files /dev/null and b/valid_test_images/img000084.jpg differ diff --git a/valid_test_images/img000085.jpg b/valid_test_images/img000085.jpg new file mode 100644 index 0000000..e4a5653 Binary files /dev/null and b/valid_test_images/img000085.jpg differ diff --git a/valid_test_images/img000086.jpg b/valid_test_images/img000086.jpg new file mode 100644 index 0000000..fa98e2a Binary files /dev/null and b/valid_test_images/img000086.jpg differ diff --git a/valid_test_images/img000087.jpg b/valid_test_images/img000087.jpg new file mode 100644 index 0000000..a5c6952 Binary files /dev/null and b/valid_test_images/img000087.jpg differ diff --git a/valid_test_images/img000088.jpg b/valid_test_images/img000088.jpg new file mode 100644 index 0000000..a9091e1 Binary files /dev/null and b/valid_test_images/img000088.jpg differ diff --git a/valid_test_images/img000089.jpg b/valid_test_images/img000089.jpg new file mode 100644 index 0000000..d266588 Binary files /dev/null and b/valid_test_images/img000089.jpg differ diff --git a/valid_test_images/img000090.jpg b/valid_test_images/img000090.jpg new file mode 100644 index 0000000..5809cc4 Binary files /dev/null and b/valid_test_images/img000090.jpg differ diff --git a/valid_test_images/img000091.jpg b/valid_test_images/img000091.jpg new file mode 100644 index 0000000..1d4c78b Binary files /dev/null and b/valid_test_images/img000091.jpg differ diff --git a/valid_test_images/img000092.jpg b/valid_test_images/img000092.jpg new file mode 100644 index 0000000..96cb1c2 Binary files /dev/null and b/valid_test_images/img000092.jpg differ diff --git a/valid_test_images/img000093.jpg b/valid_test_images/img000093.jpg new file mode 100644 index 0000000..642f63b Binary files /dev/null and b/valid_test_images/img000093.jpg differ diff --git a/valid_test_images/img000094.jpg b/valid_test_images/img000094.jpg new file mode 100644 index 0000000..4d72fde Binary files /dev/null and b/valid_test_images/img000094.jpg differ diff --git a/valid_test_images/img000095.jpg b/valid_test_images/img000095.jpg new file mode 100644 index 0000000..cb57604 Binary files /dev/null and b/valid_test_images/img000095.jpg differ diff --git a/valid_test_images/img000096.jpg b/valid_test_images/img000096.jpg new file mode 100644 index 0000000..2d465a6 Binary files /dev/null and b/valid_test_images/img000096.jpg differ diff --git a/valid_test_images/img000097.jpg b/valid_test_images/img000097.jpg new file mode 100644 index 0000000..514a3c6 Binary files /dev/null and b/valid_test_images/img000097.jpg differ diff --git a/valid_test_images/img000098.jpg b/valid_test_images/img000098.jpg new file mode 100644 index 0000000..a83b0ae Binary files /dev/null and b/valid_test_images/img000098.jpg differ diff --git a/valid_test_images/img000099.jpg b/valid_test_images/img000099.jpg new file mode 100644 index 0000000..275a089 Binary files /dev/null and b/valid_test_images/img000099.jpg differ diff --git a/valid_test_images/img000100.jpg b/valid_test_images/img000100.jpg new file mode 100644 index 0000000..7a67704 Binary files /dev/null and b/valid_test_images/img000100.jpg differ