@@ -528,9 +528,7 @@ def upload_command(args: argparse.Namespace) -> int:
528528 fw_api_key_value = get_fireworks_api_key ()
529529 if not fw_account_id and fw_api_key_value :
530530 # Attempt to verify and resolve account id from server headers
531- resolved = verify_api_key_and_get_account_id (
532- api_key = fw_api_key_value , api_base = get_fireworks_api_base ()
533- )
531+ resolved = verify_api_key_and_get_account_id (api_key = fw_api_key_value , api_base = get_fireworks_api_base ())
534532 if resolved :
535533 fw_account_id = resolved
536534 # Propagate to environment so downstream calls use it if needed
@@ -593,31 +591,17 @@ def upload_command(args: argparse.Namespace) -> int:
593591
594592 print (f"\n Uploading evaluator '{ evaluator_id } ' for { qualname .split ('.' )[- 1 ]} ..." )
595593 try :
596- # Upload full directory of the test as multi -metric if the dir contains multiple files
594+ # Always treat as a single evaluator (single -metric) even if folder has helper modules
597595 test_dir = os .path .dirname (source_file_path ) if source_file_path else root
598- # Use multi_metrics if multiple .py files exist at the root dir; otherwise treat as single metric dir
599- py_files = [f for f in os .listdir (test_dir ) if f .endswith (".py" )]
600- if len (py_files ) > 1 :
601- result = create_evaluation (
602- evaluator_id = evaluator_id ,
603- multi_metrics = True ,
604- folder = test_dir ,
605- display_name = display_name or evaluator_id ,
606- description = description or f"Evaluator for { qualname } " ,
607- force = force ,
608- entry_point = entry_point ,
609- )
610- else :
611- # Single metric mode: metric name derived from folder name; include all files recursively
612- metric_name = os .path .basename (test_dir ) or "metric"
613- result = create_evaluation (
614- evaluator_id = evaluator_id ,
615- metric_folders = [f"{ metric_name } ={ test_dir } " ],
616- display_name = display_name or evaluator_id ,
617- description = description or f"Evaluator for { qualname } " ,
618- force = force ,
619- entry_point = entry_point ,
620- )
596+ metric_name = os .path .basename (test_dir ) or "metric"
597+ result = create_evaluation (
598+ evaluator_id = evaluator_id ,
599+ metric_folders = [f"{ metric_name } ={ test_dir } " ],
600+ display_name = display_name or evaluator_id ,
601+ description = description or f"Evaluator for { qualname } " ,
602+ force = force ,
603+ entry_point = entry_point ,
604+ )
621605 name = result .get ("name" , evaluator_id ) if isinstance (result , dict ) else evaluator_id
622606
623607 # Print success message with Fireworks dashboard link
0 commit comments