diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7b230a81..45198534 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -55,7 +55,7 @@ jobs: CLOUDOS_WORKSPACE_ID: ${{ secrets.CLOUDOS_WORKSPACE_ID_ADAPT }} CLOUDOS_URL: "https://cloudos.lifebit.ai" run: | - cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID + echo 'q' | cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID job_list_filtering: needs: job_run_and_status runs-on: ubuntu-latest @@ -81,15 +81,15 @@ jobs: run: | JOB_ID="${{ needs.job_run_and_status.outputs.job_id }}" # Test filtering by status, project and workflow name - cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID --filter-status completed --filter-project cloudos-cli-tests --filter-workflow GH-rnatoy --last --last-n-jobs 10 + echo 'q' | cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID --filter-status completed --filter-project cloudos-cli-tests --filter-workflow GH-rnatoy --last --last-n-jobs 10 # Test filtering job id - cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID --filter-job-id $JOB_ID + echo 'q' | cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID --filter-job-id $JOB_ID # Test filtering job name - cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID --filter-job-name "cloudos-cli-CI-test" --last-n-jobs 10 + echo 'q' | cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID --filter-job-name "cloudos-cli-CI-test" --last-n-jobs 10 # Test filtering by only mine - cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID --filter-only-mine --last-n-jobs 10 + echo 'q' | cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID --filter-only-mine --last-n-jobs 10 # Test filtering by queue - cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID --filter-queue "cost_saving_standard_nextflow" --last-n-jobs 10 + echo 'q' | cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID --filter-queue "cost_saving_standard_nextflow" --last-n-jobs 10 job_details: needs: job_run_and_status runs-on: ubuntu-latest diff --git a/.github/workflows/ci_az.yml b/.github/workflows/ci_az.yml index 2ccc8063..b8e5b893 100644 --- a/.github/workflows/ci_az.yml +++ b/.github/workflows/ci_az.yml @@ -36,7 +36,7 @@ jobs: CLOUDOS_WORKSPACE_ID: ${{ secrets.CLOUDOS_WORKSPACE_ID_AZURE }} CLOUDOS_URL: "https://dev.sdlc.lifebit.ai" run: | - cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID + echo q | cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID job_list_filtering_az: needs: job_run_and_status_az runs-on: ubuntu-latest @@ -62,15 +62,15 @@ jobs: run: | JOB_ID="${{ needs.job_run_and_status_az.outputs.job_id }}" # Test filtering by status, project and workflow name - cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID --filter-status completed --filter-project cloudos-cli-tests --filter-workflow GH-rnatoy --last --last-n-jobs 10 + echo q | cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID --filter-status completed --filter-project cloudos-cli-tests --filter-workflow GH-rnatoy --last --last-n-jobs 10 # Test filtering job id - cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID --filter-job-id $JOB_ID + echo q | cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID --filter-job-id $JOB_ID # Test filtering job name - cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID --filter-job-name "cloudos-cli-CI-test" --last-n-jobs 10 + echo q | cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID --filter-job-name "cloudos-cli-CI-test" --last-n-jobs 10 # Test filtering by only mine - cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID --filter-only-mine --last-n-jobs 10 + echo q | cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID --filter-only-mine --last-n-jobs 10 # Test filtering by queue - #cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID --filter-queue "cost_saving_standard_nextflow" --last-n-jobs 10 + #echo q | cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID --filter-queue "cost_saving_standard_nextflow" --last-n-jobs 10 job_details_az: needs: job_run_and_status_az runs-on: ubuntu-latest diff --git a/.github/workflows/ci_dev.yml b/.github/workflows/ci_dev.yml index fb576ae4..026d4fa0 100644 --- a/.github/workflows/ci_dev.yml +++ b/.github/workflows/ci_dev.yml @@ -36,7 +36,7 @@ jobs: CLOUDOS_WORKSPACE_ID: ${{ secrets.CLOUDOS_WORKSPACE_ID_DEV }} CLOUDOS_URL: "https://dev.sdlc.lifebit.ai" run: | - cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID + echo q | cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID job_list_filtering_dev: needs: job_run_and_status_dev runs-on: ubuntu-latest @@ -62,15 +62,15 @@ jobs: run: | JOB_ID="${{ needs.job_run_and_status_dev.outputs.job_id }}" # Test filtering by status, project and workflow name - cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID --filter-status completed --filter-project cloudos-cli-tests --filter-workflow GH-rnatoy --last --last-n-jobs 10 + echo q | cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID --filter-status completed --filter-project cloudos-cli-tests --filter-workflow GH-rnatoy --last --last-n-jobs 10 # Test filtering job id - cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID --filter-job-id $JOB_ID + echo q | cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID --filter-job-id $JOB_ID # Test filtering job name - cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID --filter-job-name "cloudos-cli-CI-test" --last-n-jobs 10 + echo q | cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID --filter-job-name "cloudos-cli-CI-test" --last-n-jobs 10 # Test filtering by only mine - cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID --filter-only-mine --last-n-jobs 10 + echo q | cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID --filter-only-mine --last-n-jobs 10 # Test filtering by queue - #cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID --filter-queue "cost_saving_standard_nextflow" --last-n-jobs 10 + #echo q | cloudos job list --cloudos-url $CLOUDOS_URL --apikey $CLOUDOS_TOKEN --workspace-id $CLOUDOS_WORKSPACE_ID --filter-queue "cost_saving_standard_nextflow" --last-n-jobs 10 job_details_dev: needs: job_run_and_status_dev runs-on: ubuntu-latest diff --git a/CHANGELOG.md b/CHANGELOG.md index 3da37d7c..dbc1ca3b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ ## lifebit-ai/cloudos-cli: changelog +## v2.87.0 (2026-03-30) + +### Feat + +- Adds system queues + ## v2.86.0 (2026-03-23) ### Feat diff --git a/README.md b/README.md index ec56aac1..b998f00d 100644 --- a/README.md +++ b/README.md @@ -401,7 +401,7 @@ Job queues are required for running jobs using AWS batch executor. The available #### List Queues -This command allows you to view available computational queues and their configurations. You can get a summary of all available workspace job queues in three different output formats using the `--output-format` option: +This command allows you to view available computational queues and their configurations. By default, both regular workspace queues and system queues are displayed. You can get a summary of all available job queues in three different output formats using the `--output-format` option: - **stdout** (default): Displays a rich formatted table directly in the terminal with pagination and visual formatting - **csv**: Saves queue data to a CSV file with a selection of available queue information, or all information using the `--all-fields` flag @@ -415,6 +415,12 @@ cloudos queue list --profile my_profile cloudos queue list --profile my_profile --output-format stdout ``` +To exclude system queues and show only workspace queues: + +```bash +cloudos queue list --profile my_profile --exclude-system-queues +``` + To save all available job queues in JSON format: ```bash @@ -437,7 +443,7 @@ cloudos queue list --profile my_profile --output-format csv **Job queues for platform workflows** -Platform workflows (those provided by CloudOS in your workspace as modules) run on separate and specific AWS batch queues. Therefore, CloudOS will automatically assign the valid queue and you should not specify any queue using the `--job-queue` parameter. Any attempt to use this parameter will be ignored. Examples of such platform workflows are "System Tools" and "Data Factory" workflows. +Platform workflows (those provided by CloudOS in your workspace as modules) run on separate and specific AWS batch queues (system queues). Therefore, CloudOS will automatically assign the valid queue and you should not specify any queue using the `--job-queue` parameter. Any attempt to use this parameter will be ignored. Examples of such platform workflows are "System Tools" and "Data Factory" workflows. ### Workflow @@ -851,7 +857,7 @@ You can find specific jobs within your workspace using the filtering options. Fi - **`--filter-job-id`**: Filter jobs by specific job ID (exact match required) - **`--filter-only-mine`**: Show only jobs belonging to the current user - **`--filter-owner`**: Show only jobs for the specified owner (exact match required, e.g., "John Doe") -- **`--filter-queue`**: Filter jobs by queue name (only applies to batch jobs) +- **`--filter-queue`**: Filter jobs by queue name (works with both regular and system queues; only applies to batch jobs) **Filtering Examples** diff --git a/cloudos_cli/_version.py b/cloudos_cli/_version.py index c27892cb..46c521e6 100644 --- a/cloudos_cli/_version.py +++ b/cloudos_cli/_version.py @@ -1 +1 @@ -__version__ = '2.86.0' +__version__ = '2.87.0' diff --git a/cloudos_cli/clos.py b/cloudos_cli/clos.py index acee04d0..166504c1 100644 --- a/cloudos_cli/clos.py +++ b/cloudos_cli/clos.py @@ -1257,6 +1257,16 @@ def get_job_list(self, workspace_id, last_n_jobs=None, page=None, page_size=None if use_pagination_mode and target_job_count != 'all' and isinstance(target_job_count, int) and target_job_count > 0: all_jobs = all_jobs[:target_job_count] + # --- Adjust pagination metadata for client-side filtering --- + # When filter_queue is applied, we've fetched multiple API pages but we're showing + # the filtered results. Reset pagination to reflect the filtered view. + if filter_queue and last_pagination_metadata: + last_pagination_metadata = { + 'Pagination-Count': len(all_jobs), # Total filtered jobs collected + 'Pagination-Page': current_page, # Use current_page (guaranteed to be int) instead of page + 'Pagination-Limit': current_page_size # Page size + } + return {'jobs': all_jobs, 'pagination_metadata': last_pagination_metadata} @staticmethod diff --git a/cloudos_cli/jobs/cli.py b/cloudos_cli/jobs/cli.py index 53bac81b..e6e1250d 100644 --- a/cloudos_cli/jobs/cli.py +++ b/cloudos_cli/jobs/cli.py @@ -2,6 +2,7 @@ import rich_click as click import cloudos_cli.jobs.job as jb +from cloudos_cli.jobs.job import fetch_job_page from cloudos_cli.clos import Cloudos from cloudos_cli.utils.errors import BadRequestException from cloudos_cli.utils.resources import ssl_selector @@ -1221,7 +1222,7 @@ def job_details(ctx, help='Filter to show only jobs belonging to the current user.', is_flag=True) @click.option('--filter-queue', - help='Filter jobs by queue name. Only applies to jobs running in batch environment. Non-batch jobs are preserved in results.') + help='Filter jobs by queue name . Only applies to jobs running in batch environment. Non-batch jobs are preserved in results.') @click.option('--filter-owner', help='Filter jobs by owner username.') @click.option('--verbose', @@ -1335,7 +1336,13 @@ def list_jobs(ctx, ]) if output_format == 'stdout': # For stdout, always show a user-friendly message - create_job_list_table([], cloudos_url, pagination_metadata, selected_columns) + # Create callback for interactive pagination + fetch_page = lambda page_num: fetch_job_page( + cl, workspace_id, page_num, page_size, None, archived, verify_ssl, + filter_status, filter_job_name, filter_project, filter_workflow, + filter_job_id, filter_only_mine, filter_owner, filter_queue, last + ) + create_job_list_table([], cloudos_url, pagination_metadata, selected_columns, fetch_page_callback=fetch_page) else: if filters_used: print('A total of 0 jobs collected.') @@ -1347,8 +1354,14 @@ def list_jobs(ctx, 'does not exist. Please, try a smaller number for --page or collect all the jobs by not ' + 'using --page parameter.') elif output_format == 'stdout': - # Display as table - create_job_list_table(my_jobs_r, cloudos_url, pagination_metadata, selected_columns) + # Display as table with interactive pagination + # Create callback for interactive pagination + fetch_page = lambda page_num: fetch_job_page( + cl, workspace_id, page_num, page_size, None, archived, verify_ssl, + filter_status, filter_job_name, filter_project, filter_workflow, + filter_job_id, filter_only_mine, filter_owner, filter_queue, last + ) + create_job_list_table(my_jobs_r, cloudos_url, pagination_metadata, selected_columns, fetch_page_callback=fetch_page) elif output_format == 'csv': my_jobs = cl.process_job_list(my_jobs_r, all_fields) cl.save_job_list_to_csv(my_jobs, outfile) diff --git a/cloudos_cli/jobs/job.py b/cloudos_cli/jobs/job.py index 9c28d453..09eddc6b 100644 --- a/cloudos_cli/jobs/job.py +++ b/cloudos_cli/jobs/job.py @@ -1727,3 +1727,68 @@ def get_branches(self, repository_identifier, owner, workflow_owner_id, return {"branches": all_branches, "total": total or len(all_branches)} + +def fetch_job_page(cl, workspace_id, page_num, page_size, last_n_jobs, archived, verify_ssl, + filter_status, filter_job_name, filter_project, filter_workflow, + filter_job_id, filter_only_mine, filter_owner, filter_queue, last): + """Helper function to fetch a specific page of jobs. + + Parameters + ---------- + cl : Cloudos + CloudOS API client instance + workspace_id : str + The CloudOS workspace ID + page_num : int + Page number to fetch (1-indexed) + page_size : int + Number of jobs per page + last_n_jobs : int or None + Last N jobs parameter (should be None for pagination mode) + archived : bool + Whether to include archived jobs + verify_ssl : bool or str + SSL verification setting + filter_status : str or None + Status filter + filter_job_name : str or None + Job name filter + filter_project : str or None + Project filter + filter_workflow : str or None + Workflow filter + filter_job_id : str or None + Job ID filter + filter_only_mine : bool + Filter for user's own jobs + filter_owner : str or None + Owner filter + filter_queue : str or None + Queue filter + last : bool + Use latest workflow for duplicates + + Returns + ------- + dict + Dictionary with 'jobs' list and 'pagination_metadata' dict + """ + result = cl.get_job_list( + workspace_id, + last_n_jobs=last_n_jobs, + page=page_num, + page_size=page_size, + archived=archived, + verify=verify_ssl, + filter_status=filter_status, + filter_job_name=filter_job_name, + filter_project=filter_project, + filter_workflow=filter_workflow, + filter_job_id=filter_job_id, + filter_only_mine=filter_only_mine, + filter_owner=filter_owner, + filter_queue=filter_queue, + last=last + ) + return result + diff --git a/cloudos_cli/queue/cli.py b/cloudos_cli/queue/cli.py index 269ae3ff..99f0052f 100644 --- a/cloudos_cli/queue/cli.py +++ b/cloudos_cli/queue/cli.py @@ -47,6 +47,9 @@ def queue(): 'just the preconfigured selected fields. Only applicable ' + 'when --output-format=csv'), is_flag=True) +@click.option('--exclude-system-queues', + help='Exclude system job queues from the list.', + is_flag=True) @click.option('--disable-ssl-verification', help=('Disable SSL certificate verification. Please, remember that this option is ' + 'not generally recommended for security reasons.'), @@ -63,6 +66,7 @@ def list_queues(ctx, output_basename, output_format, all_fields, + exclude_system_queues, disable_ssl_verification, ssl_cert, profile): @@ -72,7 +76,7 @@ def list_queues(ctx, verify_ssl = ssl_selector(disable_ssl_verification, ssl_cert) print('Executing list...') j_queue = Queue(cloudos_url, apikey, None, workspace_id, verify=verify_ssl) - my_queues = j_queue.get_job_queues() + my_queues = j_queue.get_job_queues(exclude_system_queues=exclude_system_queues) if len(my_queues) == 0: raise ValueError('No AWS batch queues found. Please, make sure that your CloudOS supports AWS batch queues') if output_format == 'stdout': diff --git a/cloudos_cli/queue/queue.py b/cloudos_cli/queue/queue.py index a5bb832c..cdc9932c 100644 --- a/cloudos_cli/queue/queue.py +++ b/cloudos_cli/queue/queue.py @@ -33,9 +33,14 @@ class Queue(Cloudos): workspace_id: str verify: Union[bool, str] = True - def get_job_queues(self): + def get_job_queues(self, exclude_system_queues=False): """Get all the job queues from a CloudOS workspace. + Parameters + ---------- + exclude_system_queues : bool, default=False + Whether to exclude system job queues from the result. + Returns ------- r : list @@ -47,6 +52,30 @@ def get_job_queues(self): headers=headers, verify=self.verify) if r.status_code >= 400: raise BadRequestException(r) + + queues = json.loads(r.content) + + # By default, include system queues unless excluded + if not exclude_system_queues: + system_queues = self.get_system_job_queues() + queues.extend(system_queues) + + return queues + + def get_system_job_queues(self): + """Get all the system job queues from CloudOS. + + Returns + ------- + r : list + A list of dicts, each corresponding to a system job queue. + """ + headers = {"apikey": self.apikey} + r = requests.get("{}/api/v1/teams/aws/v2/system-job-queues?teamId={}".format(self.cloudos_url, + self.workspace_id), + headers=headers, verify=self.verify) + if r.status_code >= 400: + raise BadRequestException(r) return json.loads(r.content) @staticmethod @@ -118,7 +147,7 @@ def fetch_job_queue_id(self, workflow_type, batch=True, job_queue=None): if len(available_queues) == 0: raise Exception(f'There are no available job queues for {workflow_type} ' + 'workflows. Consider creating one using CloudOS UI.') - default_queue = [q for q in available_queues if q['isDefault']] + default_queue = [q for q in available_queues if q.get('isDefault', False)] if len(default_queue) > 0: default_queue_id = default_queue[0]['id'] default_queue_name = default_queue[0]['label'] diff --git a/cloudos_cli/utils/details.py b/cloudos_cli/utils/details.py index 288d9a99..26ccb159 100644 --- a/cloudos_cli/utils/details.py +++ b/cloudos_cli/utils/details.py @@ -337,9 +337,9 @@ def create_job_details(j_details_h, job_id, output_format, output_basename, para print(f"\tJob details have been saved to '{output_basename}.csv'") -def create_job_list_table(jobs, cloudos_url, pagination_metadata=None, selected_columns=None): +def create_job_list_table(jobs, cloudos_url, pagination_metadata=None, selected_columns=None, fetch_page_callback=None): """ - Creates a formatted job list table for stdout output with responsive design. + Creates a formatted job list table for stdout output with responsive design and interactive pagination. The table automatically adapts to terminal width by showing different column sets: - Very narrow (<60 chars): Essential columns only (status, name, pipeline, id) @@ -373,6 +373,10 @@ def create_job_list_table(jobs, cloudos_url, pagination_metadata=None, selected_ - List: List of column names Valid columns: 'status', 'name', 'project', 'owner', 'pipeline', 'id', 'submit_time', 'end_time', 'run_time', 'commit', 'cost', 'resources', 'storage_type' + fetch_page_callback : callable, optional + Callback function to fetch a specific page of results for interactive pagination. + Should accept page number (1-indexed) and return dict with 'jobs' and 'pagination_metadata' keys. + If provided, enables interactive navigation (n=next, p=previous, q=quit). Returns ------- @@ -637,16 +641,371 @@ def create_job_list_table(jobs, cloudos_url, pagination_metadata=None, selected_ row_values = [column_values[col] for col in columns_to_show] table.add_row(*row_values) - console.print(table) + # If no fetch_page_callback, display static table + if not fetch_page_callback or not pagination_metadata: + console.print(table) + + # Display pagination info at the bottom + if pagination_metadata: + total_jobs = pagination_metadata.get('Pagination-Count', 0) + current_page = pagination_metadata.get('Pagination-Page', 1) + page_size = pagination_metadata.get('Pagination-Limit', 10) + total_pages = (total_jobs + page_size - 1) // page_size if total_jobs > 0 else 1 - # Display pagination info at the bottom - if pagination_metadata: - total_jobs = pagination_metadata.get('Pagination-Count', 0) - current_page = pagination_metadata.get('Pagination-Page', 1) - page_size = pagination_metadata.get('Pagination-Limit', 10) - total_pages = (total_jobs + page_size - 1) // page_size if total_jobs > 0 else 1 + console.print(f"\n[cyan]Showing {len(jobs)} of {total_jobs} total jobs | Page {current_page} of {total_pages}[/cyan]") + return - console.print(f"\n[cyan]Showing {len(jobs)} of {total_jobs} total jobs | Page {current_page} of {total_pages}[/cyan]") + # Interactive pagination mode + import sys + current_page = pagination_metadata.get('Pagination-Page', 1) or 1 # Ensure never None + total_jobs = pagination_metadata.get('Pagination-Count', 0) + page_size_value = pagination_metadata.get('Pagination-Limit', 10) + total_pages = (total_jobs + page_size_value - 1) // page_size_value if total_jobs > 0 else 1 + + show_error = None + + while True: + # Clear console and display table + console.clear() + console.print(table) + + # Display pagination info + console.print(f"\n[cyan]Total jobs:[/cyan] {total_jobs}") + if total_pages > 1: + console.print(f"[cyan]Page:[/cyan] {current_page} of {total_pages}") + console.print(f"[cyan]Jobs on this page:[/cyan] {len(jobs)}") + + # Show error message if any + if show_error: + console.print(show_error) + show_error = None + + # Show pagination controls only if there are multiple pages + if total_pages > 1: + # Check if we're in an interactive environment + if not sys.stdin.isatty(): + console.print("\n[yellow]Note: Pagination not available in non-interactive mode. Showing page 1 of {0}.[/yellow]".format(total_pages)) + console.print("[yellow]Run in an interactive terminal to navigate through all pages.[/yellow]") + break + + console.print(f"\n[bold cyan]n[/] = next, [bold cyan]p[/] = prev, [bold cyan]q[/] = quit") + + # Get user input for navigation + try: + choice = input(">>> ").strip().lower() + except (EOFError, KeyboardInterrupt): + console.print("\n[yellow]Pagination interrupted.[/yellow]") + break + + if choice in ("q", "quit"): + break + elif choice in ("n", "next"): + if current_page < total_pages: + try: + result = fetch_page_callback(current_page + 1) + jobs = result.get('jobs', []) + pagination_metadata = result.get('pagination_metadata', {}) + current_page = pagination_metadata.get('Pagination-Page', current_page + 1) + total_pages = pagination_metadata.get('totalPages', + (pagination_metadata.get('Pagination-Count', 0) + page_size_value - 1) // page_size_value + if pagination_metadata.get('Pagination-Count', 0) > 0 else 1) + + # Rebuild table with new jobs + table = Table(title="Job List") + for col_key in columns_to_show: + col_config = all_columns[col_key] + table.add_column( + col_config["header"], + style=col_config.get("style"), + no_wrap=col_config.get("no_wrap", False), + overflow=col_config.get("overflow"), + min_width=col_config.get("min_width"), + max_width=col_config.get("max_width") + ) + + # Rebuild rows + for job in jobs: + # (Rebuild the same row logic - will reference the variables from above) + status_raw = str(job.get("status", "N/A")) + status_symbol_map = { + "completed": "[bold green]✓[/bold green]", + "running": "[bold bright_black]◐[/bold bright_black]", + "failed": "[bold red]✗[/bold red]", + "aborted": "[bold orange3]■[/bold orange3]", + "initialising": "[bold bright_black]○[/bold bright_black]", + "N/A": "[bold bright_black]?[/bold bright_black]" + } + status = status_symbol_map.get(status_raw.lower(), status_raw) + + name = str(job.get("name", "N/A")) + project = str(job.get("project", {}).get("name", "N/A")) + + user_info = job.get("user", {}) + name_part = user_info.get('name', '') + surname_part = user_info.get('surname', '') + if terminal_width < 90: + if name_part and surname_part: + owner = f"{name_part[0]}.{surname_part[0]}." + elif name_part or surname_part: + owner = (name_part or surname_part)[:8] + else: + owner = "N/A" + else: + if name_part and surname_part: + owner = f"{name_part}\n{surname_part}" + elif name_part or surname_part: + owner = name_part or surname_part + else: + owner = "N/A" + + pipeline = str(job.get("workflow", {}).get("name", "N/A")).split('\n')[0].strip() + if len(pipeline) > 25: + pipeline = pipeline[:22] + "..." + + job_id = str(job.get("_id", "N/A")) + job_url = f"{cloudos_url}/app/advanced-analytics/analyses/{job_id}" + job_id_with_link = f"[link={job_url}]{job_id}[/link]" + + created_at = job.get("createdAt") + if created_at: + try: + dt = datetime.fromisoformat(created_at.replace('Z', '+00:00')) + submit_time = dt.strftime('%m-%d\n%H:%M') if terminal_width < 90 else dt.strftime('%Y-%m-%d\n%H:%M:%S') + except: + submit_time = "N/A" + else: + submit_time = "N/A" + + end_time_raw = job.get("endTime") + if end_time_raw: + try: + dt = datetime.fromisoformat(end_time_raw.replace('Z', '+00:00')) + end_time = dt.strftime('%m-%d\n%H:%M') if terminal_width < 90 else dt.strftime('%Y-%m-%d\n%H:%M:%S') + except: + end_time = "N/A" + else: + end_time = "N/A" + + start_time_raw = job.get("startTime") + if start_time_raw and end_time_raw: + try: + start_dt = datetime.fromisoformat(start_time_raw.replace('Z','+00:00')) + end_dt = datetime.fromisoformat(end_time_raw.replace('Z', '+00:00')) + duration = end_dt - start_dt + total_seconds = int(duration.total_seconds()) + hours = total_seconds // 3600 + minutes = (total_seconds % 3600) // 60 + seconds = total_seconds % 60 + if hours > 0: + run_time = f"{hours}h {minutes}m {seconds}s" + elif minutes > 0: + run_time = f"{minutes}m {seconds}s" + else: + run_time = f"{seconds}s" + except: + run_time = "N/A" + else: + run_time = "N/A" + + revision = job.get("revision", {}) + if job.get("jobType") == "dockerAWS": + commit = str(revision.get("digest", "N/A")) + else: + commit = str(revision.get("commit", "N/A")) + if commit != "N/A" and len(commit) > 7: + commit = commit[:7] + + cost_raw = job.get("computeCostSpent") or job.get("realInstancesExecutionCost") + if cost_raw is not None: + try: + cost = f"${float(cost_raw) / 100:.4f}" + except: + cost = "N/A" + else: + cost = "N/A" + + master_instance = job.get("masterInstance", {}) + used_instance = master_instance.get("usedInstance", {}) + instance_type = used_instance.get("type", "N/A") + resources = instance_type if instance_type else "N/A" + + storage_mode = job.get("storageMode", "N/A") + if storage_mode == "regular": + storage_type = "Regular" + elif storage_mode == "lustre": + storage_type = "Lustre" + else: + storage_type = str(storage_mode).capitalize() if storage_mode != "N/A" else "N/A" + + column_values = { + 'status': status, 'name': name, 'project': project, 'owner': owner, + 'pipeline': pipeline, 'id': job_id_with_link, 'submit_time': submit_time, + 'end_time': end_time, 'run_time': run_time, 'commit': commit, + 'cost': cost, 'resources': resources, 'storage_type': storage_type + } + + row_values = [column_values[col] for col in columns_to_show] + table.add_row(*row_values) + + except Exception as e: + show_error = f"[red]Error fetching page: {str(e)}[/red]" + else: + show_error = "[yellow]Already on last page[/yellow]" + elif choice in ("p", "prev", "previous"): + if current_page > 1: + try: + result = fetch_page_callback(current_page - 1) + jobs = result.get('jobs', []) + pagination_metadata = result.get('pagination_metadata', {}) + current_page = pagination_metadata.get('Pagination-Page', current_page - 1) + total_pages = pagination_metadata.get('totalPages', + (pagination_metadata.get('Pagination-Count', 0) + page_size_value - 1) // page_size_value + if pagination_metadata.get('Pagination-Count', 0) > 0 else 1) + + # Rebuild table (same logic as next) + table = Table(title="Job List") + for col_key in columns_to_show: + col_config = all_columns[col_key] + table.add_column( + col_config["header"], + style=col_config.get("style"), + no_wrap=col_config.get("no_wrap", False), + overflow=col_config.get("overflow"), + min_width=col_config.get("min_width"), + max_width=col_config.get("max_width") + ) + + # Rebuild rows with same logic + for job in jobs: + status_raw = str(job.get("status", "N/A")) + status_symbol_map = { + "completed": "[bold green]✓[/bold green]", + "running": "[bold bright_black]◐[/bold bright_black]", + "failed": "[bold red]✗[/bold red]", + "aborted": "[bold orange3]■[/bold orange3]", + "initialising": "[bold bright_black]○[/bold bright_black]", + "N/A": "[bold bright_black]?[/bold bright_black]" + } + status = status_symbol_map.get(status_raw.lower(), status_raw) + name = str(job.get("name", "N/A")) + project = str(job.get("project", {}).get("name", "N/A")) + + user_info = job.get("user", {}) + name_part = user_info.get('name', '') + surname_part = user_info.get('surname', '') + if terminal_width < 90: + if name_part and surname_part: + owner = f"{name_part[0]}.{surname_part[0]}." + elif name_part or surname_part: + owner = (name_part or surname_part)[:8] + else: + owner = "N/A" + else: + if name_part and surname_part: + owner = f"{name_part}\n{surname_part}" + elif name_part or surname_part: + owner = name_part or surname_part + else: + owner = "N/A" + + pipeline = str(job.get("workflow", {}).get("name", "N/A")).split('\n')[0].strip() + if len(pipeline) > 25: + pipeline = pipeline[:22] + "..." + + job_id = str(job.get("_id", "N/A")) + job_url = f"{cloudos_url}/app/advanced-analytics/analyses/{job_id}" + job_id_with_link = f"[link={job_url}]{job_id}[/link]" + + created_at = job.get("createdAt") + if created_at: + try: + dt = datetime.fromisoformat(created_at.replace('Z', '+00:00')) + submit_time = dt.strftime('%m-%d\n%H:%M') if terminal_width < 90 else dt.strftime('%Y-%m-%d\n%H:%M:%S') + except: + submit_time = "N/A" + else: + submit_time = "N/A" + + end_time_raw = job.get("endTime") + if end_time_raw: + try: + dt = datetime.fromisoformat(end_time_raw.replace('Z', '+00:00')) + end_time = dt.strftime('%m-%d\n%H:%M') if terminal_width < 90 else dt.strftime('%Y-%m-%d\n%H:%M:%S') + except: + end_time = "N/A" + else: + end_time = "N/A" + + start_time_raw = job.get("startTime") + if start_time_raw and end_time_raw: + try: + start_dt = datetime.fromisoformat(start_time_raw.replace('Z', '+00:00')) + end_dt = datetime.fromisoformat(end_time_raw.replace('Z', '+00:00')) + duration = end_dt - start_dt + total_seconds = int(duration.total_seconds()) + hours = total_seconds // 3600 + minutes = (total_seconds % 3600) // 60 + seconds = total_seconds % 60 + if hours > 0: + run_time = f"{hours}h {minutes}m {seconds}s" + elif minutes > 0: + run_time = f"{minutes}m {seconds}s" + else: + run_time = f"{seconds}s" + except: + run_time = "N/A" + else: + run_time = "N/A" + + revision = job.get("revision", {}) + if job.get("jobType") == "dockerAWS": + commit = str(revision.get("digest", "N/A")) + else: + commit = str(revision.get("commit", "N/A")) + if commit != "N/A" and len(commit) > 7: + commit = commit[:7] + + cost_raw = job.get("computeCostSpent") or job.get("realInstancesExecutionCost") + if cost_raw is not None: + try: + cost = f"${float(cost_raw) / 100:.4f}" + except: + cost = "N/A" + else: + cost = "N/A" + + master_instance = job.get("masterInstance", {}) + used_instance = master_instance.get("usedInstance", {}) + instance_type = used_instance.get("type", "N/A") + resources = instance_type if instance_type else "N/A" + + storage_mode = job.get("storageMode", "N/A") + if storage_mode == "regular": + storage_type = "Regular" + elif storage_mode == "lustre": + storage_type = "Lustre" + else: + storage_type = str(storage_mode).capitalize() if storage_mode != "N/A" else "N/A" + + column_values = { + 'status': status, 'name': name, 'project': project, 'owner': owner, + 'pipeline': pipeline, 'id': job_id_with_link, 'submit_time': submit_time, + 'end_time': end_time, 'run_time': run_time, 'commit': commit, + 'cost': cost, 'resources': resources, 'storage_type': storage_type + } + + row_values = [column_values[col] for col in columns_to_show] + table.add_row(*row_values) + + except Exception as e: + show_error = f"[red]Error fetching page: {str(e)}[/red]" + else: + show_error = "[yellow]Already on first page[/yellow]" + else: + show_error = "[yellow]Invalid choice. Use 'n' (next), 'p' (previous), or 'q' (quit)[/yellow]" + else: + # Only one page, exit after displaying + break def create_workflow_list_table(workflows, cloudos_url="https://cloudos.lifebit.ai", page_size=10): diff --git a/tests/test_clos/test_get_job_list_filtering.py b/tests/test_clos/test_get_job_list_filtering.py index 6232e5bd..26747f8b 100644 --- a/tests/test_clos/test_get_job_list_filtering.py +++ b/tests/test_clos/test_get_job_list_filtering.py @@ -162,4 +162,53 @@ def test_filter_by_queue(mock_get_queues): assert isinstance(jobs, list) assert len(jobs) == 1 assert jobs[0]["_id"] == "job1" + # Verify that get_job_queues was called (system queues included by default) + mock_get_queues.assert_called_once() + + +@responses.activate +@mock.patch('cloudos_cli.queue.queue.Queue.get_job_queues') +def test_filter_by_system_queue(mock_get_queues): + """Test filtering by system queue name""" + system_queue_id = 'sys_queue_id' + system_queue_name = 'system-v41' + + # Include a system queue in the mock list + mock_get_queues.return_value = MOCK_QUEUE_LIST + [ + {"id": system_queue_id, "name": system_queue_name, "label": system_queue_name, "resourceType": "system"} + ] + + # Mock job list with a job using system queue + mock_job_with_system_queue = { + "jobs": [ + { + "_id": "job3", + "name": "test-job-3", + "status": "running", + "user": {"id": USER_ID, "name": "Test User"}, + "project": {"id": PROJECT_ID, "name": "test-project"}, + "workflow": {"id": WORKFLOW_ID, "name": "test-workflow"}, + "batch": {"jobQueue": {"id": system_queue_id, "name": system_queue_name}} + } + ] + } + + responses.add( + responses.GET, + url=f"{CLOUDOS_URL}/api/v2/jobs", + json=mock_job_with_system_queue, + status=200 + ) + clos = setup_clos() + result = clos.get_job_list(WORKSPACE_ID, filter_queue=system_queue_name, page=1, page_size=10) + assert isinstance(result, dict) + assert 'jobs' in result + jobs = result['jobs'] + assert isinstance(jobs, list) + assert len(jobs) == 1 + assert jobs[0]["_id"] == "job3" + assert jobs[0]["batch"]["jobQueue"]["id"] == system_queue_id + # Verify that get_job_queues was called (system queues included by default) + mock_get_queues.assert_called_once() + diff --git a/tests/test_data/queue/system_queues.json b/tests/test_data/queue/system_queues.json new file mode 100644 index 00000000..aabd7bb4 --- /dev/null +++ b/tests/test_data/queue/system_queues.json @@ -0,0 +1 @@ +[{"id":"sys_xxxxx","resource":"sys_xxxx","name":"system_test_queue","label":"system_test_queue_label","description":"System managed queue","isDefault": false,"resourceType":"system","executor":"nextflow","arn":"","computeEnvironments":[{"label":"System Default","environment":{},"status":"Ready","createdAt":"2023-04-13T12:23:47.339Z", "updatedAt":"2023-04-13T12:23:54.032Z"}],"status":"Ready"}] \ No newline at end of file diff --git a/tests/test_queue/test_cli_queue_list.py b/tests/test_queue/test_cli_queue_list.py index f26d9b14..64bd0461 100644 --- a/tests/test_queue/test_cli_queue_list.py +++ b/tests/test_queue/test_cli_queue_list.py @@ -19,6 +19,11 @@ QUEUES_JSON_STR = f.read() QUEUES_JSON_DICT = json.loads(QUEUES_JSON_STR) +# Load test system queue data +with open("tests/test_data/queue/system_queues.json") as f: + SYSTEM_QUEUES_JSON_STR = f.read() + SYSTEM_QUEUES_JSON_DICT = json.loads(SYSTEM_QUEUES_JSON_STR) + def test_queue_list_command_exists(): """Test that the queue list command exists in the queue group.""" @@ -65,6 +70,12 @@ def test_queue_list_csv_output(): text=QUEUES_JSON_STR, status_code=200 ) + # Mock the system queues API endpoint + m.get( + f"{CLOUDOS_URL}/api/v1/teams/aws/v2/system-job-queues", + text=SYSTEM_QUEUES_JSON_STR, + status_code=200 + ) with tempfile.TemporaryDirectory() as tmpdir: output_file = os.path.join(tmpdir, 'test_queues.csv') @@ -110,6 +121,12 @@ def test_queue_list_json_output(): text=QUEUES_JSON_STR, status_code=200 ) + # Mock the system queues API endpoint + m.get( + f"{CLOUDOS_URL}/api/v1/teams/aws/v2/system-job-queues", + text=SYSTEM_QUEUES_JSON_STR, + status_code=200 + ) with tempfile.TemporaryDirectory() as tmpdir: output_file = os.path.join(tmpdir, 'test_queues.json') @@ -145,6 +162,12 @@ def test_queue_list_stdout_output(): text=QUEUES_JSON_STR, status_code=200 ) + # Mock the system queues API endpoint + m.get( + f"{CLOUDOS_URL}/api/v1/teams/aws/v2/system-job-queues", + text=SYSTEM_QUEUES_JSON_STR, + status_code=200 + ) result = runner.invoke(run_cloudos_cli, [ 'queue', 'list', @@ -171,6 +194,12 @@ def test_queue_list_default_output_is_stdout(): text=QUEUES_JSON_STR, status_code=200 ) + # Mock the system queues API endpoint + m.get( + f"{CLOUDOS_URL}/api/v1/teams/aws/v2/system-job-queues", + text=SYSTEM_QUEUES_JSON_STR, + status_code=200 + ) result = runner.invoke(run_cloudos_cli, [ 'queue', 'list', @@ -198,6 +227,12 @@ def test_queue_list_empty_queues(): json=empty_response, status_code=200 ) + # Mock the system queues API endpoint with empty response + m.get( + f"{CLOUDOS_URL}/api/v1/teams/aws/v2/system-job-queues", + json=empty_response, + status_code=200 + ) result = runner.invoke(run_cloudos_cli, [ 'queue', 'list', @@ -258,6 +293,12 @@ def test_queue_list_with_all_fields(): text=QUEUES_JSON_STR, status_code=200 ) + # Mock the system queues API endpoint + m.get( + f"{CLOUDOS_URL}/api/v1/teams/aws/v2/system-job-queues", + text=SYSTEM_QUEUES_JSON_STR, + status_code=200 + ) with tempfile.TemporaryDirectory() as tmpdir: output_file = os.path.join(tmpdir, 'test_queues_full.csv') @@ -295,6 +336,12 @@ def test_queue_list_with_profile(): text=QUEUES_JSON_STR, status_code=200 ) + # Mock the system queues API endpoint + m.get( + f"{CLOUDOS_URL}/api/v1/teams/aws/v2/system-job-queues", + text=SYSTEM_QUEUES_JSON_STR, + status_code=200 + ) result = runner.invoke(run_cloudos_cli, [ 'queue', 'list', @@ -320,6 +367,12 @@ def test_queue_list_custom_output_basename(): text=QUEUES_JSON_STR, status_code=200 ) + # Mock the system queues API endpoint + m.get( + f"{CLOUDOS_URL}/api/v1/teams/aws/v2/system-job-queues", + text=SYSTEM_QUEUES_JSON_STR, + status_code=200 + ) with tempfile.TemporaryDirectory() as tmpdir: custom_basename = 'my_custom_queues' @@ -350,6 +403,12 @@ def test_queue_list_with_ssl_options(): text=QUEUES_JSON_STR, status_code=200 ) + # Mock the system queues API endpoint + m.get( + f"{CLOUDOS_URL}/api/v1/teams/aws/v2/system-job-queues", + text=SYSTEM_QUEUES_JSON_STR, + status_code=200 + ) # Test with --disable-ssl-verification result = runner.invoke(run_cloudos_cli, [ @@ -376,6 +435,12 @@ def test_queue_list_all_output_formats(output_format): text=QUEUES_JSON_STR, status_code=200 ) + # Mock the system queues API endpoint + m.get( + f"{CLOUDOS_URL}/api/v1/teams/aws/v2/system-job-queues", + text=SYSTEM_QUEUES_JSON_STR, + status_code=200 + ) with tempfile.TemporaryDirectory() as tmpdir: output_basename = os.path.join(tmpdir, 'test_queues') @@ -445,6 +510,12 @@ def test_queue_list_multiple_queues(): json=multiple_queues, status_code=200 ) + # Mock the system queues API endpoint + m.get( + f"{CLOUDOS_URL}/api/v1/teams/aws/v2/system-job-queues", + text=SYSTEM_QUEUES_JSON_STR, + status_code=200 + ) result = runner.invoke(run_cloudos_cli, [ 'queue', 'list', @@ -488,6 +559,12 @@ def test_queue_list_table_formatting(): json=test_queues, status_code=200 ) + # Mock the system queues API endpoint + m.get( + f"{CLOUDOS_URL}/api/v1/teams/aws/v2/system-job-queues", + text=SYSTEM_QUEUES_JSON_STR, + status_code=200 + ) result = runner.invoke(run_cloudos_cli, [ 'queue', 'list', @@ -536,6 +613,12 @@ def test_queue_list_status_icons(): json=test_queues, status_code=200 ) + # Mock the system queues API endpoint + m.get( + f"{CLOUDOS_URL}/api/v1/teams/aws/v2/system-job-queues", + text=SYSTEM_QUEUES_JSON_STR, + status_code=200 + ) result = runner.invoke(run_cloudos_cli, [ 'queue', 'list', diff --git a/tests/test_queue/test_fetch_job_queue_id.py b/tests/test_queue/test_fetch_job_queue_id.py index f5cc4088..39a3cdc2 100644 --- a/tests/test_queue/test_fetch_job_queue_id.py +++ b/tests/test_queue/test_fetch_job_queue_id.py @@ -5,6 +5,7 @@ from tests.functions_for_pytest import load_json_file INPUT = 'tests/test_data/queue/queues.json' +SYSTEM_QUEUES_INPUT = 'tests/test_data/queue/system_queues.json' APIKEY = 'vnoiweur89u2ongs' CLOUDOS_URL = 'http://cloudos.lifebit.ai' WORKSPACE_ID = 'lv89ufc838sdig' @@ -31,18 +32,26 @@ def test_fetch_job_queue_id_batch_true_job_queue_none(): should fall back to the default one. """ create_json = load_json_file(INPUT) + system_json = load_json_file(SYSTEM_QUEUES_INPUT) header = { "Accept": "application/json, text/plain, */*", "Content-Type": "application/json;charset=UTF-8", "apikey": APIKEY } - # mock GET method with the .json + # mock GET method for regular queues responses.add( responses.GET, url=f"{CLOUDOS_URL}/api/v1/teams/aws/v2/job-queues?teamId={WORKSPACE_ID}", body=create_json, headers=header, status=200) + # mock GET method for system queues + responses.add( + responses.GET, + url=f"{CLOUDOS_URL}/api/v1/teams/aws/v2/system-job-queues?teamId={WORKSPACE_ID}", + body=system_json, + headers=header, + status=200) # Initialise Queue j_queue = Queue(cloudos_url=CLOUDOS_URL, apikey=APIKEY, cromwell_token=None, workspace_id=WORKSPACE_ID) @@ -59,18 +68,26 @@ def test_fetch_job_queue_id_batch_true_job_queue_correct(): Tests fetch_job_queue_id when batch=True and a correct job_queue is provided. """ create_json = load_json_file(INPUT) + system_json = load_json_file(SYSTEM_QUEUES_INPUT) header = { "Accept": "application/json, text/plain, */*", "Content-Type": "application/json;charset=UTF-8", "apikey": APIKEY } - # mock GET method with the .json + # mock GET method for regular queues responses.add( responses.GET, url=f"{CLOUDOS_URL}/api/v1/teams/aws/v2/job-queues?teamId={WORKSPACE_ID}", body=create_json, headers=header, status=200) + # mock GET method for system queues + responses.add( + responses.GET, + url=f"{CLOUDOS_URL}/api/v1/teams/aws/v2/system-job-queues?teamId={WORKSPACE_ID}", + body=system_json, + headers=header, + status=200) # Initialise Queue j_queue = Queue(cloudos_url=CLOUDOS_URL, apikey=APIKEY, cromwell_token=None, workspace_id=WORKSPACE_ID) @@ -88,18 +105,26 @@ def test_fetch_job_queue_id_batch_true_job_queue_wrong(): it will fall back to the default one. """ create_json = load_json_file(INPUT) + system_json = load_json_file(SYSTEM_QUEUES_INPUT) header = { "Accept": "application/json, text/plain, */*", "Content-Type": "application/json;charset=UTF-8", "apikey": APIKEY } - # mock GET method with the .json + # mock GET method for regular queues responses.add( responses.GET, url=f"{CLOUDOS_URL}/api/v1/teams/aws/v2/job-queues?teamId={WORKSPACE_ID}", body=create_json, headers=header, status=200) + # mock GET method for system queues + responses.add( + responses.GET, + url=f"{CLOUDOS_URL}/api/v1/teams/aws/v2/system-job-queues?teamId={WORKSPACE_ID}", + body=system_json, + headers=header, + status=200) # Initialise Queue j_queue = Queue(cloudos_url=CLOUDOS_URL, apikey=APIKEY, cromwell_token=None, workspace_id=WORKSPACE_ID) diff --git a/tests/test_queue/test_get_job_queues.py b/tests/test_queue/test_get_job_queues.py index f949067b..a193eeac 100644 --- a/tests/test_queue/test_get_job_queues.py +++ b/tests/test_queue/test_get_job_queues.py @@ -7,6 +7,7 @@ from tests.functions_for_pytest import load_json_file INPUT = "tests/test_data/queue/queues.json" +SYSTEM_QUEUES_INPUT = "tests/test_data/queue/system_queues.json" APIKEY = 'vnoiweur89u2ongs' CLOUDOS_URL = 'http://cloudos.lifebit.ai' WORKSPACE_ID = 'lv89ufc838sdig' @@ -16,30 +17,38 @@ @responses.activate def test_get_job_queues_correct_response(): """ - Test 'get_job_queues' to work as intended + Test 'get_job_queues' to work as intended (includes system queues by default) API request is mocked and replicated with json files """ create_json = load_json_file(INPUT) + system_json = load_json_file(SYSTEM_QUEUES_INPUT) header = { "Accept": "application/json, text/plain, */*", "Content-Type": "application/json;charset=UTF-8", "apikey": APIKEY } - # mock GET method with the .json + # mock GET method for regular queues responses.add( responses.GET, url=f"{CLOUDOS_URL}/api/v1/teams/aws/v2/job-queues?teamId={WORKSPACE_ID}", body=create_json, headers=header, status=200) + # mock GET method for system queues + responses.add( + responses.GET, + url=f"{CLOUDOS_URL}/api/v1/teams/aws/v2/system-job-queues?teamId={WORKSPACE_ID}", + body=system_json, + headers=header, + status=200) # Initialise Queue j_queue = Queue(cloudos_url=CLOUDOS_URL, apikey=APIKEY, cromwell_token=None, workspace_id=WORKSPACE_ID) - # get mock response + # get mock response (should include system queues by default) response = j_queue.get_job_queues() # check the response assert isinstance(response, list) - assert len(response) == 1 + assert len(response) == 2 # 1 regular + 1 system queue @mock.patch('cloudos_cli.queue', mock.MagicMock()) @@ -71,3 +80,70 @@ def test_get_job_queues_incorrect_response(): workspace_id=WORKSPACE_ID) j_queue.get_job_queues() assert "Server returned status 400." in (str(error)) + + +@mock.patch('cloudos_cli.queue', mock.MagicMock()) +@responses.activate +def test_get_system_job_queues_correct_response(): + """ + Test 'get_system_job_queues' to work as intended + API request is mocked and replicated with json files + """ + create_json = load_json_file(SYSTEM_QUEUES_INPUT) + header = { + "Accept": "application/json, text/plain, */*", + "Content-Type": "application/json;charset=UTF-8", + "apikey": APIKEY + } + # mock GET method with the .json + responses.add( + responses.GET, + url=f"{CLOUDOS_URL}/api/v1/teams/aws/v2/system-job-queues?teamId={WORKSPACE_ID}", + body=create_json, + headers=header, + status=200) + # Initialise Queue + j_queue = Queue(cloudos_url=CLOUDOS_URL, apikey=APIKEY, cromwell_token=None, + workspace_id=WORKSPACE_ID) + # get mock response + response = j_queue.get_system_job_queues() + # check the response + assert isinstance(response, list) + assert len(response) == 1 + assert response[0]['resourceType'] == 'system' + + +@mock.patch('cloudos_cli.queue', mock.MagicMock()) +@responses.activate +def test_get_job_queues_exclude_system_queues(): + """ + Test 'get_job_queues' with exclude_system_queues=True + Should return only regular queues (excluding system queues) + """ + regular_queues_json = load_json_file(INPUT) + header = { + "Accept": "application/json, text/plain, */*", + "Content-Type": "application/json;charset=UTF-8", + "apikey": APIKEY + } + + # Mock regular queues endpoint only (system queues should not be called) + responses.add( + responses.GET, + url=f"{CLOUDOS_URL}/api/v1/teams/aws/v2/job-queues?teamId={WORKSPACE_ID}", + body=regular_queues_json, + headers=header, + status=200) + + # Initialise Queue + j_queue = Queue(cloudos_url=CLOUDOS_URL, apikey=APIKEY, cromwell_token=None, + workspace_id=WORKSPACE_ID) + # get mock response without system queues + response = j_queue.get_job_queues(exclude_system_queues=True) + # check the response + assert isinstance(response, list) + assert len(response) == 1 # Only regular queue + + # Verify system queue endpoint was not called + assert len(responses.calls) == 1 + assert "system-job-queues" not in responses.calls[0].request.url