diff --git a/.github/workflows/dash-bmv2-ci.yml b/.github/workflows/dash-bmv2-ci.yml index cf3aadc63..4b622ece0 100644 --- a/.github/workflows/dash-bmv2-ci.yml +++ b/.github/workflows/dash-bmv2-ci.yml @@ -12,6 +12,7 @@ on: - 'test/**.yml' - 'dash-pipeline/**' - '!dash-pipeline/dockerfiles/Dockerfile.*' + - '!dash-pipeline/py_model*' - 'dash-pipeline/dockerfiles/*.env' - '!dash-pipeline/.dockerignore' - '!dash-pipeline/**.md' diff --git a/.github/workflows/dash-pymodel-ci.yml b/.github/workflows/dash-pymodel-ci.yml new file mode 100644 index 000000000..15cddd696 --- /dev/null +++ b/.github/workflows/dash-pymodel-ci.yml @@ -0,0 +1,103 @@ +name: DASH-PYMODEL-CI + +on: + push: + branches: [ "**" ] + paths: + - '.gitmodules' + - '.github/workflows/dash-pymodel-ci.yml' + - 'test/**.py' + - 'test/**requirements.txt' + - 'test/**.sh' + - 'test/**.yml' + - 'dash-pipeline/**' + - '!dash-pipeline/dockerfiles/Dockerfile.*' + - '!dash-pipeline/bmv2*' + - 'dash-pipeline/dockerfiles/*.env' + - '!dash-pipeline/.dockerignore' + - '!dash-pipeline/**.md' + - '!dash-pipeline/**.svg' + - '!dash-pipeline/**.png' + - '!dash-pipeline/**.txt' + pull_request: + branches: [ "**" ] + paths: + - '.gitmodules' + - '.github/workflows/dash-pymodel-ci.yml' + - 'test/**.py' + - 'test/**requirements.txt' + - 'test/**.sh' + - 'test/**.yml' + - 'dash-pipeline/**' + - '!dash-pipeline/dockerfiles/Dockerfile.*' + - '!dash-pipeline/bmv2*' + - 'dash-pipeline/dockerfiles/*.env' + - '!dash-pipeline/.dockerignore' + - '!dash-pipeline/**.md' + - '!dash-pipeline/**.svg' + - '!dash-pipeline/**.png' + - '!dash-pipeline/**.txt' + workflow_dispatch: + +jobs: + build: + name: Build and Test Python DASH Pipeline + runs-on: ubuntu-22.04 + env: + docker_fg_flags: --privileged + docker_fg_root_flags: --privileged -u root + docker_bg_root_flags: -d --privileged -u root + docker_bg_flags: -d --privileged + defaults: + run: + working-directory: ./dash-pipeline + steps: + - uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.12' + - name: Upgrade pip tooling + run: python -m pip install --upgrade pip setuptools wheel + - name: Install pymodel Python dependencies + run: python -m pip install -r py_model/requirements.txt + - name: Build python based pymodel artifacts + run: make py-artifacts + - name: Update SAI submodule + run: git submodule update --init + - name: Pull/Build docker saithrift-bldr image + run: make docker-saithrift-bldr + - name: Pull/Build docker pymodel-bldr image + run: make docker-pymodel-bldr + - name: Generate SAI API + run: DOCKER_FLAGS=$docker_fg_flags make sai TARGET=pymodel + - name: Run pymodel + run: DOCKER_FLAGS=$docker_bg_root_flags make run-pymodel HAVE_DPAPP=y + - name: Pull/Build docker dpapp image + run: make docker-dash-dpapp + - name: Build dpapp + run: DOCKER_FLAGS=$docker_fg_flags make dpapp TARGET=pymodel + - name: Check if SAI spec is updated + run: DOCKER_FLAGS=$docker_fg_flags make check-sai-spec + - name: Prepare network + run: DOCKER_FLAGS=$docker_fg_flags make network HAVE_DPAPP=y + - name: Run dpapp + run: DOCKER_FLAGS=$docker_bg_flags make run-dpapp TARGET=pymodel + - name: Generate saithrift-server + run: DOCKER_FLAGS=$docker_fg_flags make saithrift-server + - name: Generate saithrift-client local docker + run: DOCKER_FLAGS=$docker_fg_flags make docker-saithrift-client + - name: Run saithrift server + run: DOCKER_FLAGS=$docker_bg_flags make run-saithrift-server TARGET=pymodel + - name: Run PTF Tests + run: DOCKER_FLAGS=$docker_fg_root_flags make run-saithrift-ptftests + + - uses: azure/docker-login@v1 + if: ${{ github.event_name != 'pull_request' && github.repository == 'sonic-net/DASH' }} + with: + login-server: sonicdash.azurecr.io + username: ${{ secrets.DASH_ACR_USERNAME }} + password: ${{ secrets.DASH_ACR_PASSWORD }} + - name: Publish DASH pymodel builder base docker image + run: make docker-publish-pymodel-bldr + if: ${{ github.event_name != 'pull_request' && github.repository == 'sonic-net/DASH' }} diff --git a/.gitignore b/.gitignore index 175df9f4f..90e7db8f0 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,9 @@ __pycache__/ .pytest_cache/ dash-pipeline/bmv2/dash_pipeline.bmv2/ dash-pipeline/dpdk-pna/dash_pipeline.dpdk +dash-pipeline/py_model/dash_pipeline.py_model/ +dash-pipeline/py_model/p4_helper/v1/__pycache__/ +dash-pipeline/py_model/p4_helper/config/__pycache__/ dash-pipeline/SAI/lib/ dash-pipeline/SAI/rpc/ dash-pipeline/dpapp/build diff --git a/.wordlist.txt b/.wordlist.txt index e824528e7..6ef6037ed 100644 --- a/.wordlist.txt +++ b/.wordlist.txt @@ -96,6 +96,7 @@ CloudStorm cloudstorm CNIP codebase +codegen collaterals compat Compat @@ -105,7 +106,9 @@ confgen config configs configurated -Conntrack +conntrack +ConntrackIn +ConntrackOut Containerlab CP CPUs @@ -236,6 +239,7 @@ ENIs ENI's enqueue enqueues +entrypoint entrypoints enum EPUs @@ -504,6 +508,8 @@ PTF ptf ptftests py +py_model +pymodel PyPi pytest PyTest @@ -545,6 +551,7 @@ RJ Roadmap roadmap routable +rpc RPC RPCs RPF @@ -665,6 +672,7 @@ TEP testability testbed testbeds +textproto TGen Tgen Tgens diff --git a/README.md b/README.md index 41a6b280c..7fe73ddd1 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,5 @@ [![DASH-bmv2-CI](https://github.com/sonic-net/DASH/workflows/DASH-BMV2-CI/badge.svg?branch=main)](https://github.com/sonic-net/DASH/actions/workflows/dash-bmv2-ci.yml) +[![DASH-pymodel-CI](https://github.com/sonic-net/DASH/workflows/DASH-pymodel-CI/badge.svg?branch=main)](https://github.com/sonic-net/DASH/actions/workflows/dash-pymodel-ci.yml) [![Spellcheck](https://github.com/sonic-net/DASH/actions/workflows/dash-md-spellcheck.yml/badge.svg)](https://github.com/sonic-net/DASH/actions/workflows/dash-md-spellcheck.yml) # SONiC-DASH - Disaggregated API for SONiC Hosts - extending functionality to stateful workloads! diff --git a/assets/CI-badge-pymodel-failing.svg b/assets/CI-badge-pymodel-failing.svg new file mode 100644 index 000000000..14fa5e3f1 --- /dev/null +++ b/assets/CI-badge-pymodel-failing.svg @@ -0,0 +1,60 @@ + + DASH-PYMODEL-CI: failing + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + DASH-PYMODEL-CI + + + + + failing + + + diff --git a/assets/CI-badge-pymodel-passing.svg b/assets/CI-badge-pymodel-passing.svg new file mode 100644 index 000000000..b74168c92 --- /dev/null +++ b/assets/CI-badge-pymodel-passing.svg @@ -0,0 +1,60 @@ + + DASH-PYMODEL-CI: passing + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + DASH-PYMODEL-CI + + + + + passing + + + diff --git a/dash-pipeline/Makefile b/dash-pipeline/Makefile index 99bd1a99a..beafd6a2b 100644 --- a/dash-pipeline/Makefile +++ b/dash-pipeline/Makefile @@ -1,5 +1,10 @@ SHELL = /bin/bash +TARGET ?= bmv2 + +IFACE0 = veth0 +IFACE1 = veth2 + HAVE_DPAPP ?= ifeq ($(HAVE_DPAPP),y) DPAPP_LINK = veth4 @@ -53,6 +58,13 @@ include dockerfiles/DOCKER_BMV2_BLDR_IMG.env # Runs bmv2 process - consider slimmer if don't need sai/p4rt clients inside DOCKER_BMV2_RUN_IMG ?=$(DOCKER_BMV2_BLDR_IMG) +# Builds sai-P4rt clients to run inside pymodel process +# include file defines DOCKER_PYMODEL_BLDR_IMG_NAME and DOCKER_PYMODEL_BLDR_IMG_CTAG +include dockerfiles/DOCKER_PYMODEL_BLDR_IMG.env + +# Runs pymodel process - consider slimmer if don't need sai/p4rt clients inside +DOCKER_PYMODEL_RUN_IMG ?=$(DOCKER_PYMODEL_BLDR_IMG) + # Compiles sai headers, libsai, saithrift server & client/server libs # include file defines DOCKER_SAITHRIFT_BLDR_IMG_NAME and DOCKER_SAITHRIFT_BLDR_IMG_CTAG include dockerfiles/DOCKER_SAITHRIFT_BLDR_IMG.env @@ -88,8 +100,41 @@ DOCKER_RUN := docker run \ -u $(HOST_USER):$(HOST_GROUP) \ $(DOCKER_FLAGS) +# Should override --name in make targets +DOCKER_PY_RUN := docker run \ + -v $(PWD)/py_model:/py_model \ + -v $(PWD)/SAI:/SAI \ + -v $(PWD)/tests:/tests \ + --network=host \ + --rm \ + -u $(HOST_USER):$(HOST_GROUP) \ + $(DOCKER_FLAGS) + SHA1SUM := sha1sum | awk '{print substr($$1,0,11);}' + +ifeq ($(TARGET), pymodel) +MODEL_DIR = \ + -v $(PWD)/py_model:/py_model \ + +SAI_HEADERS_DEPS := docker-saithrift-bldr-image-exists +DOCKER_RUN_CMD := $(DOCKER_PY_RUN) + +PIPELINE_MOUNTS = \ + -v $(PWD)/$(PY_OUTDIR)/dash_pipeline.json:/etc/dash/dash_pipeline.json \ + -v $(PWD)/$(PY_OUTDIR)/dash_pipeline_p4rt.txt:/etc/dash/dash_pipeline_p4rt.txt +else +MODEL_DIR = \ + -v $(PWD)/bmv2:/bmv2 \ + +SAI_HEADERS_DEPS := p4 docker-saithrift-bldr-image-exists +DOCKER_RUN_CMD := $(DOCKER_RUN) + +PIPELINE_MOUNTS = \ + -v $(PWD)/$(P4_OUTDIR)/dash_pipeline.json:/etc/dash/dash_pipeline.json \ + -v $(PWD)/$(P4_OUTDIR)/dash_pipeline_p4rt.txt:/etc/dash/dash_pipeline_p4rt.txt +endif + SAI/SAI: sai-submodule sai-submodule: @@ -99,6 +144,71 @@ sai-submodule: cd SAI/SAI/test && git submodule update --init ptf +####################################### +# Pymodel DOCKER BUILD/PUBLISH TARGETS +####################################### + +DOCKER_PYMODEL_BLDR_IMG_TAG := $(shell cat dockerfiles/Dockerfile.pymodel-bldr | $(SHA1SUM) | cut -d' ' -f1) +DOCKER_PYMODEL_BLDR_IMG = $(DOCKER_PYMODEL_BLDR_IMG_NAME):$(DOCKER_PYMODEL_BLDR_IMG_TAG) + +# Docker build target (local only, no pulls) +docker-pymodel-bldr: + @echo "Building local PyModel image: $(DOCKER_PYMODEL_BLDR_IMG)" + docker build --no-cache \ + -f dockerfiles/Dockerfile.pymodel-bldr \ + -t $(DOCKER_PYMODEL_BLDR_IMG) \ + --build-arg user=$(DASH_USER) \ + --build-arg group=$(DASH_GROUP) \ + --build-arg uid=$(DASH_UID) \ + --build-arg guid=$(DASH_GUID) \ + --build-arg hostname=$(DASH_HOST) \ + --build-arg available_processors=$(shell nproc) \ + . + [ -n $(DOCKER_PYMODEL_BLDR_IMG_CTAG) ] && \ + docker tag $(DOCKER_PYMODEL_BLDR_IMG) \ + $(DOCKER_PYMODEL_BLDR_IMG_NAME):$(DOCKER_PYMODEL_BLDR_IMG_CTAG) + +docker-publish-pymodel-bldr: + @echo "Publish $(DOCKER_PYMODEL_BLDR_IMG) - requires credentials, can only do from DASH repo, not a fork" + docker push $(DOCKER_PYMODEL_BLDR_IMG) + [ -n $(DOCKER_PYMODEL_BLDR_IMG_CTAG) ] && \ + docker push $(DOCKER_PYMODEL_BLDR_IMG_NAME):$(DOCKER_PYMODEL_BLDR_IMG_CTAG) + +docker-pull-pymodel-bldr: + docker pull $(DOCKER_PYMODEL_BLDR_IMG) + +# Only used for CI publishing to save artifact between jobs: +docker-save-pymodel-bldr: + docker save --output pymodel-bldr-image.tar $(DOCKER_PYMODEL_BLDR_IMG) +docker-load-pymodel-bldr: + docker load --input pymodel-bldr-image.tar + + +####################### +# Python DASH Model +####################### + +PY_OUTDIR=py_model/dash_pipeline.py_model +PY_ARTIFACTS=$(PY_OUTDIR)/dash_pipeline_p4rt.json $(PY_OUTDIR)/dash_pipeline_p4rt.txt + +py-artifacts: + @echo "Generating artifacts for pymodel..." + python3 -m py_model.scripts.artifacts_gen + +py-artifacts-clean: + rm -rf $(PY_OUTDIR) + +.PHONY:run-pymodel +run-pymodel: network + $(DOCKER_PY_RUN) \ + --name dash-pymodel-$(USER) \ + $(DOCKER_PYMODEL_RUN_IMG) \ + python3 -m py_model.main_dash $(IFACE0) $(IFACE1) $(DPAPP_LINK) + +kill-pymodel: + -docker kill dash-pymodel-$(USER) + + ###################################### # P4 Source code compile TARGETS ###################################### @@ -135,7 +245,6 @@ $(P4_OUTDIR)/dash_pipeline_p4rt.txt: $(P4_SRC) --toJSON $(P4_OUTDIR)/dash_pipeline_ir.json # DPDK - experimental/WIP - P4_DPDK_OUTDIR=dpdk-pna/dash_pipeline.dpdk p4c-dpdk-pna: @@ -185,7 +294,7 @@ DOCKER_RUN_SAITHRIFT_BLDR =\ .PHONY:sai sai: sai-headers sai-meta libsai -sai-headers: p4 docker-saithrift-bldr-image-exists | SAI/SAI +sai-headers: $(SAI_HEADERS_DEPS) | SAI/SAI @echo "Generate SAI library headers and implementation..." # Revert any local changes before generating the new specs. @@ -194,12 +303,11 @@ sai-headers: p4 docker-saithrift-bldr-image-exists | SAI/SAI mkdir -p SAI/lib - $(DOCKER_RUN) \ - $(DOCKER_FLAGS) \ + $(DOCKER_RUN_CMD) \ --name build_sai-$(USER) \ -u $(HOST_USER):$(HOST_GROUP) \ -w /SAI $(DOCKER_SAITHRIFT_BLDR_IMG) \ - make + make TARGET=$(TARGET) sai-meta: @echo "Generate SAI metadata..." @@ -263,13 +371,12 @@ run-saithrift-bldr-bash: /bin/bash ###################################### -# bmv2 date-plane app TARGETS +# date-plane app TARGETS ###################################### DOCKER_RUN_DPAPP = docker run\ $(DOCKER_FLAGS) \ - -v $(PWD)/bmv2:/bmv2 \ - -v $(PWD)/$(P4_OUTDIR)/dash_pipeline.json:/etc/dash/dash_pipeline.json \ - -v $(PWD)/$(P4_OUTDIR)/dash_pipeline_p4rt.txt:/etc/dash/dash_pipeline_p4rt.txt \ + $(MODEL_DIR) \ + $(PIPELINE_MOUNTS) \ -v $(PWD)/SAI:/SAI \ -v $(PWD)/tests:/tests \ -v $(PWD)/../:/dash \ @@ -347,8 +454,7 @@ DOCKER_RUN_SAITHRIFT_SRVR =\ $(DOCKER_FLAGS) \ --net=host \ --name dash-saithrift-server-$(USER) \ - -v $(PWD)/$(P4_OUTDIR)/dash_pipeline.json:/etc/dash/dash_pipeline.json \ - -v $(PWD)/$(P4_OUTDIR)/dash_pipeline_p4rt.txt:/etc/dash/dash_pipeline_p4rt.txt \ + $(PIPELINE_MOUNTS) \ -v $(PWD)/SAI:/SAI \ -v $(PWD)/SAI/SAI/meta:/meta \ -w /SAI/rpc/usr/sbin \ @@ -881,3 +987,7 @@ run-saichallenger-tutorials: deploy-ixiac $(DOCKER_SAI_CHALLENGER_CLIENT_IMG) \ ./run-tests.sh --setup=$(SAI_CHALLENGER_SETUP_FILE) tutorial + + + + diff --git a/dash-pipeline/README-dash-pymodel-workflows.md b/dash-pipeline/README-dash-pymodel-workflows.md new file mode 100644 index 000000000..a29e928fc --- /dev/null +++ b/dash-pipeline/README-dash-pymodel-workflows.md @@ -0,0 +1,277 @@ +**>> I Don't have time to RTFM!*** [Jump to Concise Developer Workflows](#concise-developer-workflows) + +*(Read the Fancy Manual) + +See also: +* [README.md](README.md) Top-level README for dash-pipeline +* [README-dash-workflows.md](README-dash-workflows.md) for bmv2-based workflows +* [README-pymodel.md](README-pymodel.md) for Python model developer guide +* [README-dash-ci](README-dash-ci.md) for CI pipelines +* [README-dash-docker](README-dash-docker.md) for Docker overview and workflows +* [README-saithrift](README-saithrift.md) for saithrift client/server and test workflows +* [README-ptftests](README-ptftests.md) for saithrift PTF test-case development and usage +* [README-pytests](README-pytests.md) for saithrift pytest development and usage + +**Table of Contents** +- [Concise Developer Workflows](#concise-developer-workflows) + - [Use Case I - Developing Python Model Code](#use-case-i---developing-python-model-code) + - [Use-Case II - Developing End-to-End Tests with saithrift PTF](#use-case-ii---developing-end-to-end-tests-with-saithrift-ptf) + - [Use-Case III - Incremental Test-Case Development](#use-case-iii---incremental-test-case-development) +- [Make Target Summary](#make-target-summary) + - [Build Artifacts](#build-artifacts) + - [Launch Daemons/Containers](#launch-daemonscontainers) + - [Run Tests](#run-tests) +- [Detailed Python Model Build Workflow](#detailed-python-model-build-workflow) + - [Docker Image(s)](#docker-images) + - [Build Workflow Diagram](#build-workflow-diagram) + - [Generate Python Model Artifacts](#generate-python-model-artifacts) + - [Stop Containers](#stop-containers) + - [Build libsai.so adaptor library](#build-libsaiso-adaptor-library) + - [Build saithrift-server](#build-saithrift-server) + - [Create veth pairs for py_model](#create-veth-pairs-for-py_model) + - [Run Python Model](#run-python-model) + - [Run saithrift-server](#run-saithrift-server) + - [Build saithrift-client docker image](#build-saithrift-client-docker-image) + - [Run saithrift-client PTF tests](#run-saithrift-client-ptf-tests) + - [Run saithrift-client Pytests](#run-saithrift-client-pytests) + + +# Concise Developer Workflows +This section gives you a quick idea of how to work on various tasks efficiently with the Python model. The Python model provides an alternative to the bmv2-based switch for faster development cycles and easier debugging. + +## Use Case I - Developing Python Model Code +Developing Python model code requires generating artifacts via `make py-artifacts`. This is very quick since Python code doesn't need compilation. You can run the code via ` make run-pymodel HAVE_DPAPP=y +`. This setup doesn't support any switch configuration, so the testability is minimal. +![dev-workflow-pymodel](images/dev-workflow-pymodel.svg) + + +## Use-Case II - Developing End-to-End Tests with saithrift PTF +End-to-end tests require building artifacts and saithrift-client docker image. + +A concise set of commands to run, in three separate terminals: +``` +Console #1: +----------- + Clean (Optional): + make py-artifacts-clean + make sai-clean + make saithrift-server-clean + + Build the Pymodel: + make py-artifacts + make docker-saithrift-bldr + make docker-pymodel-bldr + make sai TARGET=pymodel + make docker-dash-dpapp + make dpapp + make check-sai-spec + make saithrift-server + make docker-saithrift-client + + Run the Pymodel: + make run-pymodel HAVE_DPAPP=y + +Console #2: +----------- + make run-dpapp TARGET=pymodel + +Console #3: +----------- + make run-saithrift-server TARGET=pymodel + +Console #4: +----------- + make run-saithrift-ptftests + +``` + +![dev-workflow-pymodel-saithrift](images/dev-workflow-pymodel-saithrift.svg) + +## Use-Case III - Incremental Test-Case Development +This builds upon the previous use-case. + +Once you have stable Python model code, `libsai` and a saithrift client/server framework, you can start the Python model and sai-thrift server, then develop test-cases interactively. The figure above illustrates this process in the lower-right corner. You can edit and save saithrift PTF tests in your host PC's workspace; save the files; then run the tests. + +# Make Target Summary +The tables below summarize the most important `make` targets for the Python model for easy reference. You can click on a link to jump to further explanations. Not all make targets are shown. See the [Makefile](Makefile) to learn more. + +Dockerfile build targets are separately described in [README-dash-docker](README-dash-docker.md) since they are mainly for infrastructure and generally not part of day-to-day code and test-case development. The one exception is the [docker-saithrift-client](#build-saithrift-client-docker-image) target. + + +## Build Artifacts +| Target(s) | Description | +| ---------------------- | --------------------------------------------------| +| [py-artifacts](#generate-python-model-artifacts)
[py-artifacts-clean](#generate-python-model-artifacts)| Generates P4Info `.json` and `.txt` files from Python model code
Delete py-model artifacts | +| [sai TARGET=pymodel](#build-libsaiso-adaptor-library)
[sai-clean](#build-libsaiso-adaptor-library)| Auto-generate sai headers, sai adaptor code and compile into `libsai.so` library
Cleans up artifacts and restores SAI submodule | +| [saithrift-server](#build-saithrift-server) | Auto-generate the saithrift client-server framework and libraries | +| [docker-saithrift-client](#build-saithrift-client-docker-image) | Build a docker image containing tools, libraries and saithrift test-cases for PTF + +## Launch Daemons/Containers +| Target(s) | Description | +| ---------------------- | --------------------------------------------------| +| [run-pymodel HAVE_DPAPP=y](#run-python-model)
[make kill-pymodel](#run-python-model) | Run the Python model packet sniffer
Stop the Python model process | +| [run-saithrift-server TARGET=pymodel](#run-saithrift-server)
[ kill-saithrift-server](#run-saithrift-server) | Run a saithrift server which translates SAI over thrift into P4Runtime
Stop the saithrift server container| + +## Run Tests +| Target(s) | Description | +| ---------------------- | --------------------------------------------------| +| [run-saithrift-ptftests](#run-saithrift-client-ptf-tests) | Run PTF tests under [test/test-cases/functional](../test/test-cases/functional) using tests built into [docker-saithrift-client](#build-saithrift-client-docker-image) image +| [run-saithrift-pytests](#run-saithrift-client-pytests) | Run pytests under [tests/saithrift/pytest](tests/saithrift/pytest) using tests built into [docker-saithrift-client](#build-saithrift-client-docker-image) image + +# Detailed Python Model Build Workflow + +This explains the various build steps for the Python model in more details. The CI pipeline does most of these steps as well. All filenames and directories mentioned in the sections below are relative to the `dash-pipeline` directory (containing this README) unless otherwise specified. + +The workflows described here are primarily driven by a [Makefile](Makefile) and are suitable for a variety of use-cases: +* Manual execution by developers - edit, build, test; commit and push to GitHub +* Automated script-based execution in a development or production environment, e.g. regression testing +* Cloud-based CI (Continuous Integration) build and test, every time code is pushed to GitHub or a Pull Request is submitted to the upstream repository. + +See the [Diagram](#build-workflow-diagram) below. You can read the [dockerfiles](dockerfiles) and all `Makefiles` in various directories to get a deeper understanding of the build process. You generally use the targets from the main [Makefile](Makefile) and not any subordinate ones. + +## Docker Image(s) + +Several docker images are used to compile artifacts or run processes. These Dockerfiles should not change often and are stored/retrieved from an external docker registry. See [README-dash.docker](README-dash.docker.md) for details. When a Dockerfile does change, it needs to be published in the registry. Dockerfile changes also trigger rebuilds of the docker images in the CI pipeline. + +See the diagram below. You can read the [Dockerfile](Dockerfile) and all `Makefiles` to get a deeper understanding of the build process. + +## Build Workflow Diagram + +![dash-pymodel-thrift-workflow](images/dash-pymodel-thrift-workflow.svg) + +## Generate Python Model Artifacts + +``` +make py-artifacts-clean # optional +make py-artifacts +``` +The primary outputs of interest are: + * `py_model/dash_pipeline.py_model/dash_pipeline_p4rt.json` - the P4Info metadata which describes all the P4 entities (P4 tables, counters, etc.). This metadata is used downstream as follows: + * P4Runtime controller used to manage the pipeline. The SAI API adaptor converts SAI library "c" code calls to P4Runtime socket calls. + * P4-to-SAI header code generation (see next step below) + * `py_model/dash_pipeline.py_model/dash_pipeline_p4rt.txt` - text-based P4Info format + * `py_model/dash_pipeline.py_model/dash_pipeline_ir.json` - intermediate representation JSON +This make target will generate artifacts from the Python model: +* Generate P4Info JSON and text files from Python model code +* These artifacts can be used to auto-generate DASH SAI API header files +* Compile `libsai` for dash including SAI-to-P4Runtime adaptor +* Auto-generate the saithrift server and client framework (server daemon + client libraries) based on the DASH SAI headers +* Build a saithrift-client Docker image containing all needed tools and test suites + +## Stop Containers +This will kill one or all containers: +``` +make kill +make kill-dpapp +make kill-pymodel +``` + + + +## Build libsai.so adaptor library +This library is the crucial item to allow integration with a Network Operating System (NOS) like SONiC. It wraps an implementation specific "SDK" with standard Switch Abstraction Interface (SAI) APIs. In this case, an adaptor translates SAI API table/attribute CRUD operations into equivalent P4Runtime RPC calls, which is the native RPC API for the Python model's gRPC server. + +``` +make sai-clean # Clean up artifacts and Git Submodule +make sai TARGET=pymodel # Combines steps above +``` + +These targets generate SAI headers from the P4Info which was described above. It uses [Jinja2](https://jinja.palletsprojects.com/en/3.1.x/) which renders [SAI/templates](SAI/templates) into C++ source code for the SAI headers corresponding to the DASH API as defined in the Python model code. It then compiles this code into a shared library `libsai.so` which will later be used to link to a test server (Thrift) or `syncd` daemon for production. + +This consists of two main steps +* Generate the SAI headers and implementation code via [SAI/sai_api_gen.py](SAI/sai_api_gen.py). This uses templates stored in [SAI/templates](SAI/templates). + + Headers are emitted into the imported `SAI` submodule (under `SAI/SAI`) under its `inc`, `meta` and `experimental` directories. + + Implementation code for each SAI accessor are emitted into the `SAI/lib` directory. +* Compile the implementation source code into `libsai.so`, providing the definitive DASH data plane API. Note this `libsai` makes calls to the Python model's embedded P4Runtime Server and must be linked with numerous libraries. + +## Build saithrift-server +This builds a saithrift-server daemon, which is linked to the `libsai` library and also includes the SAI-to-P4Runtime adaptor. It also builds Python thrift libraries and saithrift libraries. +``` +make saithrift-server +``` + +## Create veth pairs for py_model +This needs to be run just once. It will create veth pairs, set their MTU, disable IPV6, etc. + +``` +make network +``` + +You can delete the veth pairs when you're done testing via this command: +``` +make network-clean +``` + +## Build pymodel docker image +``` +docker-pymodel-bldr +``` + +This will build a docker image which is based python 3.12 and all packages needed to run pymodel and to talk to the saithrift-server daemon, including: +* saithrift client libraries (Python) +* PTF framework from [OCP SAI repo](https://github.com/opencomputeproject/SAI.git), including all test cases +* The [PTF repo](https://github.com/p4lang/ptf) imported from p4lang +* Scapy etc. + +It also contains all the artifacts under `tests/` which includes PTF test-cases. Thus, it comprises a self-contained test resource with tools, libraries and test scripts. + +## Run Python Model +This will run the Python model packet sniffer in the foreground. The main process is `main_dash.py` which includes an embedded P4Runtime gRPC server (listening on port 9559) and uses scapy to sniff packets on configured interfaces. This will spew out verbose content when control APIs are called or packets are processed. Use additional terminals to run other test scripts. + +``` +make run-pymodel HAVE_DPAPP=y +make kill-pymodel # kill pymodel container +``` + +## Run saithrift-server +>**Note:** the Python model must be running, see [Run Python Model](#run-python-model) + +When this server is launched, it will establish a P4Runtime session (behind the scenes) to the running Python model. The thrift server listens on port `9092` for Thrift messages carrying SAI rpc commands. These commands are dispatched to the SAI library handlers. These handlers translate them into corresponding P4Runtime RPC commands and are sent to the Python model daemon onto a socket at standard P4Runtime port `9559`. + +``` +make run-saithrift-server TARGET=pymodel +``` + +When the server starts, the first SAI command it receives will load the `libsai.so` shared library and establish a P4Runtime connection. This results in a console message similar to below. Note this message doesn't necessarily appear when the daemon starts. This also loads the Python model with the P4Info (JSON file), see [Initialize Python Model](#initialize-python-model). + +``` +Server listening on 0.0.0.0:9559 +``` + +To stop it: +``` +make kill-saithrift-server +``` + +## Build saithrift-client docker image +``` +make docker-saithrift-client +``` + +This will build a docker image which has all libraries needed to talk to the saithrift-server daemon, including: +* saithrift client libraries (Python) +* PTF framework from [OCP SAI repo](https://github.com/opencomputeproject/SAI.git), including all test cases +* The [PTF repo](https://github.com/p4lang/ptf) imported from p4lang +* Scapy etc. + +It also contains all the artifacts under `tests/` which includes PTF test-cases. Thus, it comprises a self-contained test resource with tools, libraries and test scripts. + +## Run saithrift-client PTF tests +To run all PTF tests which use the saithrift interface, execute the following. You must have the Python model and saithrift-server running. + +``` +make run-saithrift-ptftests +``` + +This will launch a saithrift-client docker container and execute tests under `test/test-cases/functional`. + +## Run saithrift-client Pytests +To run all Pytests which use the saithrift interface, execute the following. You must run PTF tests before running pytests. + +``` +make run-saithrift-pytests +``` + +This will launch the saithrift-client docker container and execute tests under `tests/saithrift/pytest`. \ No newline at end of file diff --git a/dash-pipeline/SAI/Makefile b/dash-pipeline/SAI/Makefile index 6acc9ba48..9e9a1b3b7 100644 --- a/dash-pipeline/SAI/Makefile +++ b/dash-pipeline/SAI/Makefile @@ -1,15 +1,32 @@ +TARGET ?= bmv2 all: copysrc - ./sai_api_gen.py \ - /bmv2/dash_pipeline.bmv2/dash_pipeline_p4rt.json \ - --ir /bmv2/dash_pipeline.bmv2/dash_pipeline_ir.json \ - --ignore-tables=underlay_mac,eni_meter,slb_decap \ - --sai-spec-dir=specs \ - dash + @echo "Generating SAI for target: $(TARGET)" + + @{ \ + if [ "$(TARGET)" = "bmv2" ]; then \ + ARTIFACTS_DIR="../bmv2/dash_pipeline.bmv2"; \ + elif [ "$(TARGET)" = "pymodel" ]; then \ + ARTIFACTS_DIR="../py_model/dash_pipeline.py_model"; \ + else \ + echo "Error: Unknown TARGET='$(TARGET)'. Use 'bmv2' or 'pymodel'."; \ + exit 1; \ + fi; \ + echo "Using artifacts from: $$ARTIFACTS_DIR"; \ + if [ ! -f "$$ARTIFACTS_DIR/dash_pipeline_p4rt.json" ]; then \ + echo "Error: $$ARTIFACTS_DIR/dash_pipeline_p4rt.json not found."; \ + exit 1; \ + fi; \ + ./sai_api_gen.py \ + $$ARTIFACTS_DIR/dash_pipeline_p4rt.json \ + --ir $$ARTIFACTS_DIR/dash_pipeline_ir.json \ + --ignore-tables=underlay_mac,eni_meter,slb_decap \ + --sai-spec-dir=specs \ + dash; \ + } copysrc: install -CDv src/Makefile src/*h src/*cpp lib/ - .PHONY: clean clean: rm -f lib/* diff --git a/dash-pipeline/SAI/specs/sai_spec.yaml b/dash-pipeline/SAI/specs/sai_spec.yaml index 62c6cd932..6f600190c 100644 --- a/dash-pipeline/SAI/specs/sai_spec.yaml +++ b/dash-pipeline/SAI/specs/sai_spec.yaml @@ -505,6 +505,118 @@ enums: name: FNIC description: '' value: '1' +- !!python/object:utils.sai_spec.sai_enum.SaiEnum + name: sai_dash_eni_mac_type_t + description: '' + members: + - !!python/object:utils.sai_spec.sai_enum_member.SaiEnumMember + name: SRC_MAC + description: '' + value: '0' + - !!python/object:utils.sai_spec.sai_enum_member.SaiEnumMember + name: DST_MAC + description: '' + value: '1' +- !!python/object:utils.sai_spec.sai_enum.SaiEnum + name: sai_dash_packet_source_t + description: '' + members: + - !!python/object:utils.sai_spec.sai_enum_member.SaiEnumMember + name: EXTERNAL + description: '' + value: '0' + - !!python/object:utils.sai_spec.sai_enum_member.SaiEnumMember + name: PIPELINE + description: '' + value: '1' + - !!python/object:utils.sai_spec.sai_enum_member.SaiEnumMember + name: DPAPP + description: '' + value: '2' + - !!python/object:utils.sai_spec.sai_enum_member.SaiEnumMember + name: PEER + description: '' + value: '3' +- !!python/object:utils.sai_spec.sai_enum.SaiEnum + name: sai_dash_packet_subtype_t + description: '' + members: + - !!python/object:utils.sai_spec.sai_enum_member.SaiEnumMember + name: NONE + description: '' + value: '0' + - !!python/object:utils.sai_spec.sai_enum_member.SaiEnumMember + name: FLOW_CREATE + description: '' + value: '1' + - !!python/object:utils.sai_spec.sai_enum_member.SaiEnumMember + name: FLOW_UPDATE + description: '' + value: '2' + - !!python/object:utils.sai_spec.sai_enum_member.SaiEnumMember + name: FLOW_DELETE + description: '' + value: '3' +- !!python/object:utils.sai_spec.sai_enum.SaiEnum + name: sai_dash_packet_type_t + description: '' + members: + - !!python/object:utils.sai_spec.sai_enum_member.SaiEnumMember + name: REGULAR + description: '' + value: '0' + - !!python/object:utils.sai_spec.sai_enum_member.SaiEnumMember + name: FLOW_SYNC_REQ + description: '' + value: '1' + - !!python/object:utils.sai_spec.sai_enum_member.SaiEnumMember + name: FLOW_SYNC_ACK + description: '' + value: '2' + - !!python/object:utils.sai_spec.sai_enum_member.SaiEnumMember + name: DP_PROBE_REQ + description: '' + value: '3' + - !!python/object:utils.sai_spec.sai_enum_member.SaiEnumMember + name: DP_PROBE_ACK + description: '' + value: '4' +- !!python/object:utils.sai_spec.sai_enum.SaiEnum + name: sai_dash_pipeline_stage_t + description: '' + members: + - !!python/object:utils.sai_spec.sai_enum_member.SaiEnumMember + name: INVALID + description: '' + value: '0' + - !!python/object:utils.sai_spec.sai_enum_member.SaiEnumMember + name: INBOUND_STAGE_STARTING + description: '' + value: '1' + - !!python/object:utils.sai_spec.sai_enum_member.SaiEnumMember + name: INBOUND_ROUTING + description: '' + value: '2' + - !!python/object:utils.sai_spec.sai_enum_member.SaiEnumMember + name: OUTBOUND_STAGE_STARTING + description: '' + value: '3' + - !!python/object:utils.sai_spec.sai_enum_member.SaiEnumMember + name: OUTBOUND_ROUTING + description: '' + value: '4' + - !!python/object:utils.sai_spec.sai_enum_member.SaiEnumMember + name: OUTBOUND_MAPPING + description: '' + value: '5' + - !!python/object:utils.sai_spec.sai_enum_member.SaiEnumMember + name: OUTBOUND_PRE_ROUTING_ACTION_APPLY + description: '' + value: '6' + - !!python/object:utils.sai_spec.sai_enum_member.SaiEnumMember + name: ROUTING_ACTION_APPLY + description: '' + value: '7' port_extenstion: !!python/object:utils.sai_spec.sai_api_extension.SaiApiExtension attributes: [] stats: diff --git a/dash-pipeline/dockerfiles/DOCKER_BMV2_BLDR_IMG.env b/dash-pipeline/dockerfiles/DOCKER_BMV2_BLDR_IMG.env index 6a99ecbc3..3a8215e45 100644 --- a/dash-pipeline/dockerfiles/DOCKER_BMV2_BLDR_IMG.env +++ b/dash-pipeline/dockerfiles/DOCKER_BMV2_BLDR_IMG.env @@ -4,3 +4,5 @@ export DASH_ACR_REGISTRY=sonicdash.azurecr.io export DOCKER_BMV2_BLDR_IMG_NAME?=${DASH_ACR_REGISTRY}/dash-bmv2-bldr export DOCKER_BMV2_BLDR_IMG_CTAG?=220819 + + diff --git a/dash-pipeline/dockerfiles/DOCKER_PYMODEL_BLDR_IMG.env b/dash-pipeline/dockerfiles/DOCKER_PYMODEL_BLDR_IMG.env new file mode 100644 index 000000000..e66cb81a4 --- /dev/null +++ b/dash-pipeline/dockerfiles/DOCKER_PYMODEL_BLDR_IMG.env @@ -0,0 +1,4 @@ +# dockerfiles/DOCKER_PYMODEL_IMG.env +export DASH_ACR_REGISTRY=sonicdash.azurecr.io +export DOCKER_PYMODEL_BLDR_IMG_NAME?=${DASH_ACR_REGISTRY}/dash-pymodel-bldr +export DOCKER_PYMODEL_BLDR_IMG_CTAG?=220830 diff --git a/dash-pipeline/dockerfiles/Dockerfile.pymodel-bldr b/dash-pipeline/dockerfiles/Dockerfile.pymodel-bldr new file mode 100644 index 000000000..6f446465a --- /dev/null +++ b/dash-pipeline/dockerfiles/Dockerfile.pymodel-bldr @@ -0,0 +1,53 @@ +# Start from official slim Python 3.12 image +FROM python:3.12-slim@sha256:099ee0a92a07ff652ef62145d7b6c70d316a14109d6e88a3a835de7ebd74c637 AS base + +LABEL maintainer="SONIC-DASH Community" +LABEL description="DASH Pymodel Builder" + +# Environment setup +ENV PYTHONUNBUFFERED=1 \ + PYTHONDONTWRITEBYTECODE=1 \ + TZ=America/Los_Angeles \ + DEBIAN_FRONTEND=noninteractive + +# Install only required system packages +RUN apt-get update -qq && \ + apt-get install -y --no-install-recommends \ + python3-dev \ + libpcap-dev \ + ca-certificates && \ + rm -rf /var/lib/apt/lists/* + +# Install Python dependencies +RUN pip install --no-cache-dir --upgrade pip && \ + pip install --no-cache-dir \ + scapy \ + p4runtime \ + grpcio \ + bitarray \ + openpyxl \ + cerberus \ + python-docx \ + google-cloud-storage \ + protobuf==3.20.* \ + googleapis-common-protos \ + packaging + +# Create non-root user +ARG user=dashuser +ARG group=dashusers +ARG uid=4321 +ARG guid=4321 +RUN addgroup --gid ${guid} ${group} && \ + adduser --disabled-password --gecos "" --uid ${uid} --gid ${guid} ${user} + +USER ${user} +WORKDIR / +# Copy project +COPY --chown=${user}:${group} py_model /py_model + +# Set Python path for the project +ENV PYTHONPATH=/py_model + +# Default command +CMD ["python"] diff --git a/dash-pipeline/images/dash-pymodel-thrift-workflow.svg b/dash-pipeline/images/dash-pymodel-thrift-workflow.svg new file mode 100644 index 000000000..b8eb3aaec --- /dev/null +++ b/dash-pipeline/images/dash-pymodel-thrift-workflow.svg @@ -0,0 +1,4 @@ + + + +
make  sai-thrift-client
Standard OCP SAI
header files subset
(underlay)
DASH SAI
header files
(overrlay)
Saithrift code
generator
Thrift server
skeleton C++ code
opencompute/SAI
Python thrift client  lib*
Git
make sai-thrift-server
Python Model
SAI-P4RT
Adaptor/
P4RT Client
make run-sai-thrift-server
SAI-Thrift
commands
make run-pymodel
make sai TARGET=pymodel
SAI implementation C++ code
Generate SAI headers
sai_api_gen.sh
SAI & meta headers
DASH Python code 
dash_pipeline_p4rt.json
dash_pipeline_p4rt.txt
dash_pipeline.ir

DASH/dash-pipeline
make py-artifacts
ixia-c
SW traffic 
generator
Tgen Commands
make deploy-ixia-c
Git
Containers provide the build & run environment:
make docker-XXX
meta/make
meta/gensairpc.pl
saithrift
server
libsai
Test scripts:
PTF, Pytest
built into container
make run-saithrift_XXXtests
(Git Submodule)
make docker-XXX-publish
local environment
Various repos (Ubuntu, p4.org, etc.)
local environment
dash-xxx
dash-XXX
Reg
make docker-XXX-pull (explicit)
docker-run (implicit)
Python thrift client  lib*
Git
opencompute/SAI
SAI PTF Framework
Scapy
Build-time container
Run-time container
Test scripts:
PTF, Pytest
mounted from host dev env
make run-saithrift_dev-XXXtests
/test-dev
/test
/SAI
make <target>
LEGEND
make target or script in dash-pipeline
make <target>
make target or script in another repo (e.g. SAI/meta)
Resource comes from external repo (resources assumed to be in this repo otherwise)
Runtime socket communications (RPC commands or test traffic)
Build step produces artifacts
\ No newline at end of file diff --git a/dash-pipeline/images/dev-workflow-pymodel-saithrift.svg b/dash-pipeline/images/dev-workflow-pymodel-saithrift.svg new file mode 100644 index 000000000..7edf29791 --- /dev/null +++ b/dash-pipeline/images/dev-workflow-pymodel-saithrift.svg @@ -0,0 +1,4 @@ + + + +
SAI-P4RT
Adaptor/
P4RT Client
saithrift-server
libsai
p4runtime
make sai TARGET=pymodel
make saithrift-server
make sai-thrift-client
Test scripts:
PTF, Pytest
built into container
make run-xxx-tests
saithrift commands
Python thrift client  lib*
SAI PTF Framework
Scapy
/test
/SAI
Tgen commands
python coding
make run-pymodel
make run-saithrift-server
/test-dev
test-case coding
run tests
interactively
make run-saithrift-client-bash
make run-pymodel
(make kill-pymodel)
Python Model
packets (veths)
make py-artifacts
(make py-artifacts-clean)
dash_pipeline_p4rt.json
dash_pipeline_p4rt.txt
dash_pipeline.ir

\ No newline at end of file diff --git a/dash-pipeline/images/dev-workflow-pymodel.svg b/dash-pipeline/images/dev-workflow-pymodel.svg new file mode 100644 index 000000000..e274e7a8a --- /dev/null +++ b/dash-pipeline/images/dev-workflow-pymodel.svg @@ -0,0 +1,4 @@ + + + +
make py-artifacts
(make py-artifacts-clean)
dash_pipeline_p4rt.json
dash_pipeline_p4rt.txt
dash_pipeline.ir

make run-pymodel
(make kill-pymodel)
make  sai-thrift-client
Test scripts:
PTF, Pytest
built into container
Python thrift client  lib*
SAI PTF Framework
Scapy
/test
/SAI
make run-saithrift-client-bash
User write the python code
Python Model
packets (veths)
\ No newline at end of file diff --git a/dash-pipeline/py_model/README.md b/dash-pipeline/py_model/README.md new file mode 100644 index 000000000..3ef0cd408 --- /dev/null +++ b/dash-pipeline/py_model/README.md @@ -0,0 +1,62 @@ +# Python model (py_model) for DASH pipeline Developer Guide + +This folder contains the Python "py_model" implementation of the DASH pipeline used for local testing, artifact generation, and a lightweight P4Runtime-compatible control plane. It mirrors the P4 pipeline logic in Python for inspection, unit tests and for producing P4 mirrored artifacts. + +Contents +- Core model and entrypoints + - [`py_model/dash_py_v1model.py`](py_model/dash_py_v1model.py) — contains the Python model entrypoint. + - [`py_model/main_dash.py`](py_model/main_dash.py) — CLI/runner used by `make pymodel` and `run.sh`. +- Control plane (gRPC / P4Runtime helper) + - [`py_model/control_plane/grpc_server.py`](py_model/control_plane/grpc_server.py) — lightweight P4Runtime server skeleton including [`grpc_server.P4RuntimeServicer`](py_model/control_plane/grpc_server.py), helpers such as [`grpc_server.populate_tables_actions_ids`](py_model/control_plane/grpc_server.py) and [`grpc_server.pretty_print_proto`](py_model/control_plane/grpc_server.py). + - [`py_model/control_plane/control_plane.py`](py_model/control_plane/control_plane.py) — control plane helpers and mappings used by the server. +- Data plane implementation (pipeline stages & routing) + - Pipeline orchestration: [`py_model/data_plane/dash_pipeline.py`](py_model/data_plane/dash_pipeline.py). + - Inbound/outbound flows: [`py_model/data_plane/dash_inbound.py`](py_model/data_plane/dash_inbound.py) (class `inbound`) and [`py_model/data_plane/dash_outbound.py`](py_model/data_plane/dash_outbound.py) (class `outbound`). + - Routing & mapping stages: e.g. [`py_model/data_plane/stages/outbound_routing.py`](py_model/data_plane/stages/outbound_routing.py) (class `outbound_routing_stage`, method `apply`) and [`py_model/data_plane/stages/outbound_mapping.py`](py_model/data_plane/stages/outbound_mapping.py) (class `outbound_mapping_stage`, table `ca_to_pa`). + - Per-packet routing actions grouped in [`py_model/data_plane/stages/routing_action_apply.py`](py_model/data_plane/stages/routing_action_apply.py). + - Headers, metadata and routing types: [`py_model/data_plane/dash_headers.py`](py_model/data_plane/dash_headers.py), [`py_model/data_plane/dash_metadata.py`](py_model/data_plane/dash_metadata.py) and [`py_model/data_plane/dash_routing_types.py`](py_model/data_plane/dash_routing_types.py). +- Libraries (runtime support) + - Table and table helpers: [`py_model/libs/__table.py`](py_model/libs/__table.py) + - Common runtime vars and metadata: [`py_model/libs/__vars.py`](py_model/libs/__vars.py) + - ID / name mapping utilities: [`py_model/libs/__id_map.py`](py_model/libs/__id_map.py) + - JSON/textproto helpers used by artifact generation: [`py_model/libs/__jsonize.py`](py_model/libs/__jsonize.py) + - Counters and small utilities: [`py_model/libs/__counters.py`](py_model/libs/__counters.py), [`py_model/libs/__utils.py`](py_model/libs/__utils.py) + - Object class imports that connect modules: [`py_model/libs/__obj_classes.py`](py_model/libs/__obj_classes.py) +- Scripts and artifact generation + - Artifact generator: [`py_model/scripts/artifacts_gen.py`](py_model/scripts/artifacts_gen.py) — optimized generator that produces runtime artifacts (`dash_pipeline_p4rt.json`, `dash_pipeline_p4rt.txt`, and `dash_pipeline_ir.json`) by reflecting over the in-memory Python model. + - Call graph and codegen helpers: [`py_model/scripts/call_graph.py`](py_model/scripts/call_graph.py), [`py_model/scripts/gen_table_chain.py`](py_model/scripts/gen_table_chain.py), etc. +- Generated artifacts + - Pre-generated outputs are under [`py_model/dash_pipeline.py_model/`](py_model/dash_pipeline.py_model) (e.g. `dash_pipeline_p4rt.json`, `dash_pipeline_p4rt.txt`) used by saithrift-server and dpapp. + +How it fits together (brief) +- The `dash_py_v1model.dash_py_model` function is the high-level packet processing entry that builds headers/metadata and calls the pipeline (`dash_pipeline` modules). +- The pipeline is organized into stages (seen in `py_model/data_plane/stages/*`) which model table lookups and actions. A stage exposes an `apply()` method (for example [`outbound_routing_stage.apply`](py_model/data_plane/stages/outbound_routing.py)) that performs the table lookup logic, counters and potential packet drop or forwarding decisions. +- Routing transformations and NAT/encap actions are implemented in `py_model/data_plane/routing_actions/*`. +- The control-plane helper (`grpc_server.py`) can load P4Info-like JSON and populate maps of table/action/counter IDs (`grpc_server.populate_tables_actions_ids`) so the test server can simulate control operations. + +Quick start +- Generate py-model artifacts: + - Run the artifact generator: `python3 -m py_model.scripts.artifacts_gen` (same as `make py-artifacts`). See [`py_model/scripts/artifacts_gen.py`](py_model/scripts/artifacts_gen.py). +- Run the python model interactively: + - Use the runner: `make pymodel` (see [`py_model/main_dash.py`](py_model/main_dash.py)). +- Launch the P4Runtime test server/sniffer (if used): + - Start the server: call [`grpc_server.serve`](py_model/control_plane/grpc_server.py) or run the server module. + +Development pointers +- Code generation and reflections: study [`py_model/scripts/artifacts_gen.py`](py_model/scripts/artifacts_gen.py) to understand how the Python model is introspected to generate P4RT artifacts. +- Tables and runtime model objects live in [`py_model/libs/__table.py`](py_model/libs/__table.py) and are referenced by stage modules under `py_model/data_plane/stages/`. +- To trace a packet path, follow `dash_py_v1model.dash_py_model` -> `dash_pipeline` -> stage `apply()` methods (e.g. [`outbound_routing_stage.apply`](py_model/data_plane/stages/outbound_routing.py), [`outbound_mapping_stage.ca_to_pa`](py_model/data_plane/stages/outbound_mapping.py)). + +Useful files (quick links) +- Model entry: [`py_model/dash_py_v1model.py`](py_model/dash_py_v1model.py) — [`dash_py_v1model.dash_py_model`](py_model/dash_py_v1model.py) +- Runner: [`py_model/main_dash.py`](py_model/main_dash.py) +- Artifact generator: [`py_model/scripts/artifacts_gen.py`](py_model/scripts/artifacts_gen.py) +- Pipeline orchestration: [`py_model/data_plane/dash_pipeline.py`](py_model/data_plane/dash_pipeline.py) +- Inbound/outbound: [`py_model/data_plane/dash_inbound.py`](py_model/data_plane/dash_inbound.py) (class `inbound`), [`py_model/data_plane/dash_outbound.py`](py_model/data_plane/dash_outbound.py) (class `outbound`) +- Example stage: [`py_model/data_plane/stages/outbound_routing.py`](py_model/data_plane/stages/outbound_routing.py) (class `outbound_routing_stage`) +- Table runtime: [`py_model/libs/__table.py`](py_model/libs/__table.py) +- Control plane server: [`py_model/control_plane/grpc_server.py`](py_model/control_plane/grpc_server.py) + +Where to look next +- If you want to extend tables or behavioral logic, add/modify tables in `py_model/data_plane/stages/*` and adjust artifact generation logic in `py_model/scripts/artifacts_gen.py`. +- If you need to adapt the control-plane mapping, modify [`py_model/control_plane/grpc_server.py`](py_model/control_plane/grpc_server.py) and [`py_model/libs/__id_map.py`](py_model/libs/__id_map.py). diff --git a/dash-pipeline/py_model/__init__.py b/dash-pipeline/py_model/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/dash-pipeline/py_model/control_plane/__init__.py b/dash-pipeline/py_model/control_plane/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/dash-pipeline/py_model/control_plane/control_plane.py b/dash-pipeline/py_model/control_plane/control_plane.py new file mode 100644 index 000000000..970e3b7e3 --- /dev/null +++ b/dash-pipeline/py_model/control_plane/control_plane.py @@ -0,0 +1,283 @@ +import base64 +import hashlib +import builtins +from py_model.libs.__utils import * +from py_model.libs.__table import * +from py_model.libs.__id_map import * +from py_model.libs.__obj_classes import * +from py_model.data_plane.dash_pipeline import * + +class InsertRequest: + class Value: + class Ternary: + def __init__(self): + self.value = "" + self.mask = "" + + class LPM: + def __init__(self): + self.value = "" + self.prefix_len = 0 + + class Range: + def __init__(self): + self.low = "" + self.high = "" + + def __init__(self): + self.exact = "" + self.ternary = InsertRequest.Value.Ternary() + self.prefix = InsertRequest.Value.LPM() + self.range = InsertRequest.Value.Range() + self.ternary_list = [] + self.range_list = [] + + def __init__(self): + self.table = 0 + self.values = [] + self.action = 0 + self.params = [] + self.priority = 0 + +def get_hex_value(value: str): + decoded = base64.b64decode(value) + return decoded.hex() + +def get_table_name(table_id: int): + return table_ids.get(table_id, "unknown") + +def get_action_name(action_id: int) -> str: + return action_ids.get(action_id, "unknown") + +def _resolve_name(name: str, ctx=None, fallback_dict=None): + if ctx is None: + ctx = globals() + obj = ctx + for part in name.split(".")[1:] if len(name.split(".")) > 2 else name.split("."): + try: + obj = obj[part] if isinstance(obj, dict) else getattr(obj, part) + except (KeyError, AttributeError): + if fallback_dict and part in fallback_dict: + obj = fallback_dict[part] + else: + py_log("info", f"ERROR: cannot resolve '{part}'") + return None + return obj + +def resolve_action_name(name: str, ctx=None): + return _resolve_name(name, ctx) + +def resolve_table_name(name: str, ctx=None): + if name in table_objs: + return table_objs[name] + return _resolve_name(name, ctx, globals()) + +def safe_int(value): + if isinstance(value, int): + return value + if isinstance(value, str): + value = value.strip() + if value.startswith(("0x", "0X")): + return int(value, 16) + try: + return int(value, 16) + except ValueError: + try: + return int(value) + except Exception: + raise ValueError(f"Invalid numeric value: {value}") + raise ValueError(f"Unsupported type for conversion: {type(value)}") + +def normalize_table_entry(entry: dict) -> dict: + normalized = dict(entry) + + if "match" in normalized: + normalized["match"].sort(key=lambda m: m.get("fieldId", 0)) + + action_data = normalized.get("action", {}).get("action") + if action_data and "params" in action_data: + action_data["params"].sort(key=lambda p: p.get("paramId", 0)) + normalized["action"]["action"] = action_data + + return normalized + +def populate_table_entry(insertRequest: InsertRequest, key_format: list): + entry = Entry() + entry.values = [] + + for idx, val in builtins.enumerate(insertRequest.values): + if idx >= len(key_format): + py_log("info", f"Skipping index {idx}, no matching key format.") + continue + + match_type = key_format[idx] + + try: + if match_type is EXACT: + entry.values.append(val.exact) + + elif match_type is TERNARY: + ternary = Entry.Ternary() + ternary.value = safe_int(val.ternary.value) + ternary.mask = safe_int(val.ternary.mask) + entry.values.append(ternary) + + elif match_type is LIST: + entry.values.append([ + Entry.Ternary(value=safe_int(t.value), mask=safe_int(t.mask)) + for t in val.ternary_list + ]) + + elif match_type is RANGE: + rng = Entry.Range() + rng.low = safe_int(val.range.low) + rng.high = safe_int(val.range.high) + entry.values.append(rng) + + elif match_type is RANGE_LIST: + entry.values.append([ + Entry.Range(low=safe_int(r.low), high=safe_int(r.high)) + for r in val.range_list + ]) + + elif match_type is LPM: + lpm = Entry.LPM() + lpm.value = val.prefix.value + lpm.prefix_len = val.prefix.prefix_len + entry.values.append(lpm) + + except Exception as e: + py_log("error", f"{match_type} conversion error: {e}") + continue + + # Handle action resolution + action_id = insertRequest.action + if action_id is not None: + action_name = get_action_name(action_id) + py_log(f"Action: {action_name}") + action_obj = resolve_action_name(action_name) + if not action_obj: + py_log("info", f"Could not resolve action name: {action_name}") + return None + entry.action = action_obj + entry.params = insertRequest.params + entry.priority = insertRequest.priority + + return entry + +def table_insert_api(insertRequest: InsertRequest, obj_type, hash): + table_name = get_table_name(insertRequest.table) + if table_name == "unknown": + return RETURN_FAILURE + + table = resolve_table_name(table_name) + if not table or not table.key: + return RETURN_FAILURE + + entry = populate_table_entry(insertRequest, list(table.key.values())) + if not entry: + return RETURN_FAILURE + + if obj_type == 'INSERT': + if hash in table.entries: + py_log("info", "Matching entry exists, use MODIFY if you wish to change action") + return RETURN_FAILURE + table.insert(hash, entry) + # py_log("info", f"Entry {table.entry_cnt - 1} added to table '{table_name}'") + elif obj_type == 'MODIFY': + ret = table.update(hash, entry) + else: + py_log("info", f"Unknown operation type: {obj_type}") + return RETURN_FAILURE + + return RETURN_SUCCESS + +def parse_insert_request(json_obj, obj_type): + insertRequest = InsertRequest() + + table_entry = normalize_table_entry( + json_obj.get("entity", {}).get("tableEntry", {}) + ) + match = table_entry.get("match", {}) + hash_val = hashlib.sha256(str(match).encode()).hexdigest() + + insertRequest.table = table_entry.get("tableId", []) + table_name = get_table_name(insertRequest.table) + table = resolve_table_name(table_name) + + if obj_type == 'DELETE': + if table.delete(hash_val) == RETURN_SUCCESS: + py_log("info", f"Removed entry {table.entry_cnt + 1} from table '{table_name}'") + return None, hash_val + elif obj_type == 'INSERT': + py_log("info", f"Entry {table.entry_cnt} being added to table '{table_name}'") + py_log("info", f"Dumpling entry {table.entry_cnt}") + elif obj_type == 'MODIFY': + py_log("info", f"Modifying entry in table '{table_name}'") + py_log("info", f"Dumpling entry") + + keys = list(table.key.keys()) + insertRequest.values = [] + + py_log(None, "Match key:") + for match_field in table_entry.get("match", []): + field_idx = match_field["fieldId"] - 1 + field_key = keys[field_idx] + val = InsertRequest.Value() + + def _get_val(field): + return get_hex_value(field.get("value", "0")) + + if "exact" in match_field: + val.exact = _get_val(match_field["exact"]) + py_log(None, f"* {field_key}: Exact : {val.exact}") + + if "ternary" in match_field: + t = val.ternary + t.value = _get_val(match_field["ternary"]) + t.mask = get_hex_value(match_field["ternary"]["mask"]) + val.ternary_list.append(t) + py_log(None, f"* {field_key}: TERNARY : {t.value} && {t.mask}") + + if "optional" in match_field: + opt_val = _get_val(match_field["optional"]) + if field_key in {'meta.dst_ip_addr', 'meta.src_ip_addr', 'meta.ip_protocol'}: + t = val.ternary + t.value = opt_val + mask_bits = ((1 << ((int(opt_val, 16).bit_length() + 7) // 8 * 8)) - 1) + t.mask = mask_bits + val.ternary_list.append(t) + py_log(None, f"* {field_key}: TERNARY-LIST : {t.value} && {hex(t.mask)}") + elif field_key in {'meta.src_l4_port', 'meta.dst_l4_port'}: + r = val.range + r.low = r.high = opt_val + val.range_list.append(r) + py_log(None, f"* {field_key}: RANGE-LIST: {r.low} -> {r.high}") + + if "lpm" in match_field: + val.prefix.value = _get_val(match_field["lpm"]) + val.prefix.prefix_len = match_field["lpm"]["prefixLen"] + py_log(None, f"* {field_key}: LPM : {val.prefix.value} : {hex(val.prefix.prefix_len)}") + + if "range" in match_field: + val.range.low = get_hex_value(match_field["range"]["low"]) + val.range.high = get_hex_value(match_field["range"]["high"]) + py_log(None, f"* {field_key}: Range: {val.range.low} -> {val.range.high}") + + insertRequest.values.append(val) + + insertRequest.priority = table_entry.get("priority", 0) + py_log(None, f"Priority: {insertRequest.priority}") + + action_data = table_entry.get("action", {}).get("action", {}) + insertRequest.action = action_data.get("actionId", None) + + if insertRequest.action is not None: + insertRequest.params = [ + int(get_hex_value(p["value"]), 16) if p.get("value") else 0 + for p in action_data.get("params", []) + ] + + py_log(None, f"Action entry: {table_name} - {insertRequest.params}") + + return insertRequest, hash_val diff --git a/dash-pipeline/py_model/control_plane/grpc_server.py b/dash-pipeline/py_model/control_plane/grpc_server.py new file mode 100644 index 000000000..ee618d80e --- /dev/null +++ b/dash-pipeline/py_model/control_plane/grpc_server.py @@ -0,0 +1,218 @@ +import json +import time +import grpc +import google.protobuf.json_format as json_format +from concurrent import futures +from p4.v1 import p4runtime_pb2 +from p4.v1 import p4runtime_pb2_grpc +from py_model.libs.__id_map import * +from py_model.control_plane.control_plane import * + +slices = 3 +table_entries = {} + +def pretty_print_proto(proto_msg, title="Protobuf Message"): + json_str = json_format.MessageToJson(proto_msg, indent=2, sort_keys=True) + py_log(None, f"\n==== {title} ====") + py_log(None, json_str) + py_log(None, "=" * 60 + "\n") + return json_str + +def populate_tables_actions_ids(json_data: str): + data = json.loads(json_data) + p4info = data.get("config", {}).get("p4info", {}) + + def shorten_name(full_name: str) -> str: + parts = full_name.split(".") + if len(parts) >= slices: + return ".".join(parts[-slices:]) + return full_name + + def extract_items(items): + mapping = {} + for item in items: + preamble = item.get("preamble", {}) + obj_id = preamble.get("id") + name = preamble.get("name", "") + if obj_id is not None and name: + mapping[obj_id] = shorten_name(name) + return mapping + + table_ids.update(extract_items(p4info.get("tables", []))) + action_ids.update(extract_items(p4info.get("actions", []))) + counter_ids.update(extract_items(p4info.get("counters", []))) + direct_counter_ids.update(extract_items(p4info.get("directCounters", []))) + +class P4RuntimeServicer(p4runtime_pb2_grpc.P4RuntimeServicer): + def __init__(self): + self.p4_pipeline_config = None + self.master_election_id = None + + # Handles pipeline configuration setup + def SetForwardingPipelineConfig(self, request, context): + # pretty_print_proto(request, "SetForwardingPipelineConfig Request") + + if request.action not in ( + p4runtime_pb2.SetForwardingPipelineConfigRequest.VERIFY, + p4runtime_pb2.SetForwardingPipelineConfigRequest.VERIFY_AND_COMMIT, + p4runtime_pb2.SetForwardingPipelineConfigRequest.COMMIT + ): + py_log("error", f"[P4Runtime] Unsupported action: {request.action}") + context.abort( + grpc.StatusCode.INVALID_ARGUMENT, + f"Unsupported action: {request.action}" + ) + + json_str = json_format.MessageToJson(request, indent=2, sort_keys=True) + populate_tables_actions_ids(json_str) + + self.p4_pipeline_config = request.config + return p4runtime_pb2.SetForwardingPipelineConfigResponse() + + def GetForwardingPipelineConfig(self, request, context): + # pretty_print_proto(request, "GetForwardingPipelineConfig Request") + resp = p4runtime_pb2.GetForwardingPipelineConfigResponse() + + if not self.p4_pipeline_config: + py_log("error", f"[P4Runtime] Pipeline config not set") + context.abort(grpc.StatusCode.NOT_FOUND, "Pipeline config not set") + + resp.config.CopyFrom(self.p4_pipeline_config) + return resp + + def Write(self, request, context): + # pretty_print_proto(request, "Write Request") + + for idx, update in enumerate(request.updates): + try: + # Convert Protobuf message to JSON + update_dict = json.loads(json_format.MessageToJson(update)) + obj_type = update_dict.get("type", {}) + + table_entry = update.entity.table_entry + table_id = table_entry.table_id + + if table_id not in table_entries: + table_entries[table_id] = [] + + ins_req, hash = parse_insert_request(update_dict, obj_type) + if obj_type == "DELETE": + # Remove matching entry by comparing match fields + removed = False + for i, existing_entry in enumerate(table_entries[table_id]): + if existing_entry.match == table_entry.match: + del table_entries[table_id][i] + removed = True + break + if not removed: + py_log("info", f"[P4Runtime] Delete target not found in table {table_id}") + else: + ret = table_insert_api(ins_req, obj_type, hash) + if ret == RETURN_FAILURE: + py_log("error", f"[P4Runtime] Entry already exists, skipping update [{idx}]") + context.abort( + grpc.StatusCode.ALREADY_EXISTS, + f"Error processing update [{idx}]" + ) + + if obj_type == "INSERT": + table_entries[table_id].append(table_entry) + + elif obj_type == "MODIFY": + # Find matching entry by comparing match fields + replaced = False + for i, existing_entry in enumerate(table_entries[table_id]): + if existing_entry.match == table_entry.match: + table_entries[table_id][i] = table_entry + replaced = True + # py_log("info", f"[P4Runtime] Modified entry in table {table_id}") + break + if not replaced: + py_log("info", f"[P4Runtime] Modify target not found, inserting instead") + table_entries[table_id].append(table_entry) + except Exception as e: + py_log("error", "[P4Runtime] Error processing update [{idx}]: {e}") + context.abort( + grpc.StatusCode.INVALID_ARGUMENT, + f"Error processing update [{idx}]: {e}" + ) + + return p4runtime_pb2.WriteResponse() + + def Read(self, request, context): + # pretty_print_proto(request, "Read Request") + if not self.p4_pipeline_config: + py_log("error", f"[P4Runtime] Pipeline config not set") + context.abort(grpc.StatusCode.FAILED_PRECONDITION, "Pipeline config not set") + + for entity in request.entities: + if entity.WhichOneof("entity") == "table_entry": + table_entry = entity.table_entry + table_id = table_entry.table_id + + found = False + if table_id in table_entries and table_entries[table_id]: + for stored_entry in table_entries[table_id]: + # Convert stored_entry.match into list of dicts + stored_entry_json = [json_format.MessageToDict(m) for m in stored_entry.match] + stored_entry_json = sorted(stored_entry_json, key=lambda m: m.get("fieldId", 0)) + + # Convert table_entry.match into list of dicts + table_entry_json = [json_format.MessageToDict(m) for m in table_entry.match] + table_entry_json = sorted(table_entry_json, key=lambda m: m.get("fieldId", 0)) + + # Now compare normalized dicts + if stored_entry_json == table_entry_json: + found = True + yield p4runtime_pb2.ReadResponse( + entities=[ + p4runtime_pb2.Entity( + table_entry=stored_entry + ) + ] + ) + break + if not found: + py_log("error", "[P4Runtime] Cannot find match entry") + + # Handles bi-directional communication (StreamChannel) + def StreamChannel(self, request_iterator, context): + for request in request_iterator: + # pretty_print_proto(request, "StreamChannel Message") + + if request.HasField("arbitration"): + election_id = request.arbitration.election_id + + if (not self.master_election_id) or ( + (election_id.high, election_id.low) > (self.master_election_id.high, self.master_election_id.low) + ): + self.master_election_id = election_id + + response = p4runtime_pb2.StreamMessageResponse() + response.arbitration.election_id.low = request.arbitration.election_id.low + response.arbitration.election_id.high = 0 + response.arbitration.status.code = 0 # OK + yield response + + elif request.HasField("packet"): + packet_out = p4runtime_pb2.StreamMessageResponse() + packet_out.packet.payload = request.packet.payload + yield packet_out + + return iter([]) + + +# Start the gRPC server and sniffer +def serve(): + server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) + p4runtime_pb2_grpc.add_P4RuntimeServicer_to_server(P4RuntimeServicer(), server) + server.add_insecure_port("[::]:9559") + server.start() + py_log(None, "Server listening on 0.0.0.0:9559\n") + + try: + while True: + time.sleep(86400) # Keep the server alive + except KeyboardInterrupt: + py_log(None, "Shutting down gRPC server.\n") + server.stop(0) diff --git a/dash-pipeline/py_model/dash_py_v1model.py b/dash-pipeline/py_model/dash_py_v1model.py new file mode 100755 index 000000000..7fda3d990 --- /dev/null +++ b/dash-pipeline/py_model/dash_py_v1model.py @@ -0,0 +1,96 @@ +import struct +import socket + +from py_model.libs.__utils import * +from py_model.libs.__obj_classes import * + +from py_model.data_plane.dash_parser import * +from py_model.data_plane.dash_headers import * +from py_model.data_plane.dash_metadata import * +from py_model.data_plane.dash_pipeline import * + +def dash_verify_checksum(): + pass + +def DASH_COMPUTE_CHECKSUM_DEF(underlay_id): + def compute_checksum(): + ipv4 = getattr(hdr, f"{underlay_id}_ipv4", None) + if ipv4 is None: + return None + + # First 16 bits: version (4b) + IHL (4b) + DiffServ/DSCP (8b) + ver_ihl = (ipv4.version << 12) | (ipv4.ihl << 8) | ipv4.diffserv + + # Flags (3b) + Fragment offset (13b) + flags_frag = (ipv4.flags << 13) | ipv4.frag_offset + + # Convert IP addresses to 32-bit integers + src = struct.unpack("!I", socket.inet_aton(str(ipv4.src_addr)))[0] + dst = struct.unpack("!I", socket.inet_aton(str(ipv4.dst_addr)))[0] + + # Pack the header fields (checksum = 0 while computing) + header = struct.pack( + "!HHHHBBHII", + ver_ihl, # Version + IHL + DiffServ + ipv4.total_len, # Total length + ipv4.identification, # Identification + flags_frag, # Flags + Fragment offset + ipv4.ttl, # TTL + ipv4.protocol, # Protocol + 0, # Placeholder checksum + src, # Source address + dst # Destination address + ) + + # Compute Internet checksum (RFC 791) + total = 0 + for i in range(0, len(header), 2): + word = (header[i] << 8) + header[i+1] + total += word + total = (total & 0xFFFF) + (total >> 16) + + checksum = ~total & 0xFFFF + + # Update the field directly + ipv4.hdr_checksum = checksum + + compute_checksum.__name__ = f"compute_checksum_{underlay_id}" + return compute_checksum + +compute_checksum_u0 = DASH_COMPUTE_CHECKSUM_DEF("u0") +compute_checksum_u1 = DASH_COMPUTE_CHECKSUM_DEF("u1") + + +def dash_compute_checksum(): + if hdr.u1_ipv4: + compute_checksum_u1() + compute_checksum_u0() + + +def dash_py_model(pkt_bytes): + hdr.__init__() + meta.__init__() + pkt_in.__init__() + pkt_out.__init__() + standard_metadata.__init__() + + pkt_in.set_data(pkt_bytes) + + state = dash_parser(pkt_in) + if state == State.reject: + py_log("info", "Parser rejected the packet") + + dash_verify_checksum() + + dash_ingress.apply() + + py_log("info", f"Egress port is: {standard_metadata.egress_spec}") + if is_dropped(standard_metadata): + py_log("info", "Pipeline dropped the packet\n") + else: + dash_compute_checksum() + + dash_deparser(pkt_out) + + final_pkt = pkt_out.data + pkt_in.get_unparsed_slice() + return final_pkt.tobytes() diff --git a/dash-pipeline/py_model/data_plane/__init__.py b/dash-pipeline/py_model/data_plane/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/dash-pipeline/py_model/data_plane/dash_acl.py b/dash-pipeline/py_model/data_plane/dash_acl.py new file mode 100755 index 000000000..215763cd0 --- /dev/null +++ b/dash-pipeline/py_model/data_plane/dash_acl.py @@ -0,0 +1,134 @@ +from py_model.libs.__utils import * +from py_model.libs.__table import * +from py_model.libs.__counters import * + + +# Store inbound/outbound ACL stage tables +ACL_STAGES = { + "inbound": [], + "outbound": [] +} + +def make_keys(stage_num: int): + key = { + f"meta.stage{stage_num}_dash_acl_group_id": + (EXACT, {"name": "dash_acl_group_id", "type": "sai_object_id_t", + "isresourcetype": "true", "objects": "SAI_OBJECT_TYPE_DASH_ACL_GROUP"}), + "meta.dst_ip_addr": (LIST, {"name": "dip", "type": "sai_ip_prefix_list_t", "match_type": "list"}), + "meta.src_ip_addr": (LIST, {"name": "sip", "type": "sai_ip_prefix_list_t", "match_type": "list"}), + "meta.ip_protocol": (LIST, {"name": "protocol", "type": "sai_u8_list_t", "match_type": "list"}), + "meta.src_l4_port": (RANGE_LIST, {"name": "src_port", "type": "sai_u16_range_list_t", "match_type": "range_list"}), + "meta.dst_l4_port": (RANGE_LIST, {"name": "dst_port", "type": "sai_u16_range_list_t", "match_type": "range_list"}) + } + return key + +def make_saitable(stage_num: int): + return SaiTable(name="dash_acl_rule", api="dash_acl", + stage=f"acl.stage{stage_num}", + order=1, isobject="true",) + +class acl: + @staticmethod + def permit(): + pass + + @staticmethod + def permit_and_continue(): + pass + + @staticmethod + def deny(): + meta.dropped = True + + @staticmethod + def deny_and_continue(): + meta.dropped = True + + # Define stage counters globally for ACL stages + DEFINE_TABLE_COUNTER("stage1_counter", CounterType.PACKETS_AND_BYTES) + DEFINE_TABLE_COUNTER("stage2_counter", CounterType.PACKETS_AND_BYTES) + DEFINE_TABLE_COUNTER("stage3_counter", CounterType.PACKETS_AND_BYTES) + + # Create table placeholders for ACL stages + stage1 = Table(key={}, actions=[], tname="inbound.acl.stage1") + stage2 = Table(key={}, actions=[], tname="inbound.acl.stage2") + stage3 = Table(key={}, actions=[], tname="inbound.acl.stage3") + + @classmethod + def apply(cls, direction="inbound"): + for stage_num, acl_table, _ in ACL_STAGES[direction]: + group_id = getattr(meta, f"stage{stage_num}_dash_acl_group_id", None) + if not group_id: + continue + + py_log("info", f"Applying table '{direction}.acl.stage{stage_num}'") + result = acl_table.apply() + action = result.get("action_run") + if action in (cls.deny, cls.permit): + return + + +def get_acl_namespace(cls_name: str, acl_cls): + direction = "inbound" if cls_name == "inbound" else "outbound" + + class _Namespace: + dir = direction + + def __init__(self): + def wrap(func, name): + def wrapped(*args, **kwargs): + return func(*args, **kwargs) + wrapped.__name__ = f"{direction}.acl.{name}" + wrapped.__qualname__ = f"{direction}.acl.{name}" + return staticmethod(wrapped) + + self.acl = type('acl_ns', (), { + 'permit': wrap(acl_cls.permit, "permit"), + 'permit_and_continue': wrap(acl_cls.permit_and_continue, "permit_and_continue"), + 'deny': wrap(acl_cls.deny, "deny"), + 'deny_and_continue': wrap(acl_cls.deny_and_continue, "deny_and_continue"), + })() + + def __str__(self): + return self.dir + + return _Namespace() + +def build_acl_table(stage_num: int, ns): + tab_name = f"{ns.dir}.acl.stage{stage_num}" + + actions = [ + ns.acl.permit, + ns.acl.permit_and_continue, + ns.acl.deny, + ns.acl.deny_and_continue, + ] + + table = Table( + key=make_keys(stage_num), + actions=[(func, {}) for func in actions], + default_action=ns.acl.deny, + tname=tab_name, + sai_table=make_saitable(stage_num), + ) + + table.default_action = ns.acl.deny + + ATTACH_TABLE_COUNTER(f"stage{stage_num}_counter", tab_name) + + for func, _ in table.actions: + action_objs[func.__name__] = (func, {}) + + group_id = getattr(meta, f"stage{stage_num}_dash_acl_group_id") + stage = (stage_num, table, group_id) + + return stage + +def setup_acl(direction: str): + ns = get_acl_namespace(direction, acl) + ACL_STAGES[direction] = [build_acl_table(stage, ns) for stage in range(1, 4)] + + +# Build both inbound and outbound ACL stages +setup_acl("inbound") +setup_acl("outbound") diff --git a/dash-pipeline/py_model/data_plane/dash_conntrack.py b/dash-pipeline/py_model/data_plane/dash_conntrack.py new file mode 100644 index 000000000..0822320d8 --- /dev/null +++ b/dash-pipeline/py_model/data_plane/dash_conntrack.py @@ -0,0 +1,131 @@ +from py_model.libs.__utils import * +from py_model.libs.__table import * + +if PNA_CONNTRACK: + EXPIRE_TIME_PROFILE_NOW = 0 # Expire immediately + EXPIRE_TIME_PROFILE_LONG = 2 # Expire after a long period + + # Helpers to neutralize direction for IP address and port + def directionNeutralAddr(direction: dash_direction_t, + outbound_address: Annotated[int, IPv4Address_size], + inbound_address: Annotated[int, IPv4Address_size]): + if direction == dash_direction_t.OUTBOUND: + return outbound_address + else: + return inbound_address + + def directionNeutralPort(direction: dash_direction_t, + outbound_port: Annotated[int, 16], + inbound_port: Annotated[int, 16]): + if direction == dash_direction_t.OUTBOUND: + return outbound_port + else: + return inbound_port + + + class ConntrackIn: + @staticmethod + def conntrackIn_allow(original_overlay_sip: Annotated[int, IPv4Address_size], + original_overlay_dip: Annotated[int, IPv4Address_size]): + # Invalidate entry based on TCP flags + # If FIN is 1 (0b000001), or if RST is 1 (0b000100): + if (hdr.customer_tcp is not None and hdr.customer_tcp.flags & 0b000101) != 0: # FIN/RST + # set_entry_expire_time(EXPIRE_TIME_PROFILE_NOW) # New PNA extern + # # set entry to be purged + pass + + # restart_expire_timer() # reset expiration timer for entry + meta.conntrack_data.allow_in = True + meta.overlay_data.is_ipv6 = 0 + meta.overlay_data.sip = original_overlay_sip + meta.overlay_data.dip = original_overlay_dip + + @staticmethod + def conntrackIn_miss(): + # TODO: Should this be ((hdr.tcp.flags & 0x2) != 0) instead? + if hdr.customer_tcp is not None and hdr.customer_tcp.flags == 0x2: # SYN + if meta.direction == dash_direction_t.OUTBOUND: + if (meta.routing_actions & dash_routing_actions_t.NAT46) != 0: + # # New PNA extern: add new entry to table + # add_entry( + # "conntrackIn_allow", + # [IPv4Address_size(meta.src_ip_addr), IPv4Address_size(meta.dst_ip_addr)], + # EXPIRE_TIME_PROFILE_LONG + # ) + pass + # TODO: Add failure handling + + + conntrackIn = Table( + key = { + "ipv4_addr1" : EXACT, + "ipv4_addr2" : EXACT, + "hdr.customer_ipv4.protocol" : EXACT, + "tcp_port1" : EXACT, + "tcp_port2" : EXACT, + "meta.eni_id" : EXACT + }, + actions = [ + conntrackIn_allow, + conntrackIn_miss + ], + const_default_action = conntrackIn_miss, + tname=f"{__qualname__}.conntrackIn", + ) + + @classmethod + def apply(cls): + py_log("info", "Applying table Table: 'conntrackIn'") + cls.conntrackIn.apply() + + class ConntrackOut: + @staticmethod + def conntrackOut_allow(): + # Invalidate entry based on TCP flags + # If FIN is 1 (0b000001), or if RST is 1 (0b000100): + if (hdr.customer_tcp is not None and hdr.customer_tcp.flags & 0b000101) != 0: # FIN/RST + # set_entry_expire_time(EXPIRE_TIME_PROFILE_NOW) # New PNA extern + pass + # # set entry to be purged + + # restart_expire_timer() # reset expiration timer for entry + meta.conntrack_data.allow_out = True + + # Handle miss (SYN packet cases) + @staticmethod + def conntrackOut_miss(): + # TODO: Should this be ((hdr.tcp.flags & 0x2) != 0) instead? + if hdr.customer_tcp is not None and hdr.customer_tcp.flags == 0x2: # SYN + if meta.direction == dash_direction_t.INBOUND: + # # New PNA extern: add new entry to table + # add_entry("conntrackOut_allow", {}, EXPIRE_TIME_PROFILE_LONG) + # # TODO: Add failure handling + pass + + + conntrackOut = Table( + key = { + "ipv4_addr1" : (EXACT, {}), + "ipv4_addr2" : (EXACT, {}), + "hdr.customer_ipv4.protocol": (EXACT, {}), + "tcp_port1" : (EXACT, {}), + "tcp_port2" : (EXACT, {}), + "meta.eni_id" : (EXACT, {}) + }, + actions = [ + conntrackOut_allow, + conntrackOut_miss + ], + const_default_action = conntrackOut_miss, + tname=f"{__qualname__}.conntrackOut", + ) + + @classmethod + def apply(cls): + py_log("info", "Applying table Table: 'conntrackOut'") + cls.conntrackOut.apply() + + +if STATEFUL_P4: + # related to state_graph + pass \ No newline at end of file diff --git a/dash-pipeline/py_model/data_plane/dash_counters.py b/dash-pipeline/py_model/data_plane/dash_counters.py new file mode 100644 index 000000000..d7f402545 --- /dev/null +++ b/dash-pipeline/py_model/data_plane/dash_counters.py @@ -0,0 +1,50 @@ +from py_model.libs.__utils import * +from py_model.libs.__counters import * + +# Port level +DEFINE_PACKET_COUNTER("vip_miss_drop", 1, attr_type="stats") +DEFINE_PACKET_COUNTER("eni_miss_drop", 1, attr_type="stats") +DEFINE_COUNTER("port_lb_fast_path_icmp_in", 1, attr_type="stats") +DEFINE_COUNTER("port_lb_fast_path_eni_miss_drop", 1, attr_type="stats") + +# ENI level +DEFINE_ENI_COUNTER("eni_rx", "rx") +DEFINE_ENI_COUNTER("eni_tx", "tx") +DEFINE_ENI_COUNTER("eni_outbound_rx", "outbound_rx") +DEFINE_ENI_COUNTER("eni_outbound_tx", "outbound_tx") +DEFINE_ENI_COUNTER("eni_inbound_rx", "inbound_rx") +DEFINE_ENI_COUNTER("eni_inbound_tx", "inbound_tx") +DEFINE_ENI_COUNTER("eni_lb_fast_path_icmp_in", "lb_fast_path_icmp_in") + +# ENI-level flow operation counters (hits) +DEFINE_ENI_HIT_COUNTER("flow_created") +DEFINE_ENI_HIT_COUNTER("flow_create_failed") +DEFINE_ENI_HIT_COUNTER("flow_updated") +DEFINE_ENI_HIT_COUNTER("flow_update_failed") +DEFINE_ENI_HIT_COUNTER("flow_updated_by_resimulation") +DEFINE_ENI_HIT_COUNTER("flow_update_by_resimulation_failed") +DEFINE_ENI_HIT_COUNTER("flow_deleted") +DEFINE_ENI_HIT_COUNTER("flow_delete_failed") +DEFINE_ENI_HIT_COUNTER("flow_aged") + +# ENI-level data plane flow sync packet counters +DEFINE_ENI_COUNTER("inline_sync_packet_rx") +DEFINE_ENI_COUNTER("inline_sync_packet_tx") +DEFINE_ENI_COUNTER("timed_sync_packet_rx") +DEFINE_ENI_COUNTER("timed_sync_packet_tx") + +# ENI-level data plane flow sync request counters: +DEFINE_ENI_FLOW_SYNC_COUNTERS("flow_create") +DEFINE_ENI_FLOW_SYNC_COUNTERS("flow_update") +DEFINE_ENI_FLOW_SYNC_COUNTERS("flow_delete") + +# ENI-level drop counters +DEFINE_ENI_PACKET_COUNTER("outbound_routing_entry_miss_drop") +DEFINE_ENI_PACKET_COUNTER("outbound_ca_pa_entry_miss_drop") +DEFINE_ENI_PACKET_COUNTER("inbound_routing_entry_miss_drop") +DEFINE_ENI_PACKET_COUNTER("outbound_routing_group_miss_drop") +DEFINE_ENI_PACKET_COUNTER("outbound_routing_group_disabled_drop") +DEFINE_ENI_PACKET_COUNTER("outbound_port_map_miss_drop") +DEFINE_ENI_PACKET_COUNTER("outbound_port_map_port_range_entry_miss_drop") +DEFINE_ENI_PACKET_COUNTER("eni_trusted_vni_entry_miss_drop") + diff --git a/dash-pipeline/py_model/data_plane/dash_headers.py b/dash-pipeline/py_model/data_plane/dash_headers.py new file mode 100755 index 000000000..388d364f9 --- /dev/null +++ b/dash-pipeline/py_model/data_plane/dash_headers.py @@ -0,0 +1,385 @@ +from typing import * +from inspect import * +from enum import IntEnum, IntFlag + +EthernetAddress_size = 48 +IPv4Address_size = 32 +IPv6Address_size = 128 +IPv4ORv6Address_size = 128 + +UDP_PORT_VXLAN = 0x12B5 +UDP_PROTO = 0x11 +TCP_PROTO = 0x06 +NVGRE_PROTO = 0x2f +IPV4_ETHTYPE = 0x0800 +IPV6_ETHTYPE = 0x86DD +DASH_ETHTYPE = 0x876d + +class dash_direction_t(IntEnum): + INVALID = 0 + OUTBOUND = 1 + INBOUND = 2 + __bitwidth__ = 16 + +dash_meter_class_t = 32 + +class dash_packet_source_t(IntEnum): + EXTERNAL = 0 + PIPELINE = 1 + DPAPP = 2 + PEER = 3 + __bitwidth__ = 8 + +class dash_packet_type_t(IntEnum): + REGULAR = 0 + FLOW_SYNC_REQ = 1 + FLOW_SYNC_ACK = 2 + DP_PROBE_REQ = 3 + DP_PROBE_ACK = 4 + __bitwidth__ = 4 + +class dash_packet_subtype_t(IntEnum): + NONE = 0 + FLOW_CREATE = 1 + FLOW_UPDATE = 2 + FLOW_DELETE = 3 + __bitwidth__ = 4 + + +class dash_encapsulation_t(IntEnum): + INVALID = 0 + VXLAN = 1 + NVGRE = 2 + __bitwidth__ = 16 + +# Flow sync state +class dash_flow_sync_state_t(IntEnum): + FLOW_MISS = 0 + FLOW_CREATED = 1 + FLOW_SYNCED = 2 + FLOW_PENDING_DELETE = 3 + FLOW_PENDING_RESIMULATION = 4 + __bitwidth__ = 8 + +class dash_flow_action_t(IntFlag): + NONE = 0 + ENCAP_U0 = (1 << 0) + ENCAP_U1 = (1 << 1) + SET_SMAC = (1 << 2) + SET_DMAC = (1 << 3) + SNAT = (1 << 4) + DNAT = (1 << 5) + NAT46 = (1 << 6) + NAT64 = (1 << 7) + SNAT_PORT = (1 << 8) + DNAT_PORT = (1 << 9) + __bitwidth__ = 32 + +class dash_flow_enabled_key_t(IntFlag): + ENI_MAC = (1 << 0) + VNI = (1 << 1) + PROTOCOL = (1 << 2) + SRC_IP = (1 << 3) + DST_IP = (1 << 4) + SRC_PORT = (1 << 5) + DST_PORT = (1 << 6) + __bitwidth__= 16 + +class dash_flow_entry_bulk_get_session_mode_t(IntEnum): + SAI_DASH_FLOW_ENTRY_BULK_GET_SESSION_MODE_GRPC = 0 + SAI_DASH_FLOW_ENTRY_BULK_GET_SESSION_MODE_VENDOR = 1 + SAI_DASH_FLOW_ENTRY_BULK_GET_SESSION_MODE_EVENT = 2 + SAI_DASH_FLOW_ENTRY_BULK_GET_SESSION_MODE_EVENT_WITHOUT_FLOW_STATE = 3 + __bitwidth__ = 16 + +class dash_flow_entry_bulk_get_session_filter_key_t(IntEnum): + INVAILD = 0 #FIXME:: Farhan + FLOW_TABLE_ID = 1 + ENI_MAC = 2 + IP_PROTOCOL = 3 + SRC_IP_ADDR = 4 + DST_IP_ADDR = 5 + SRC_L4_PORT = 6 + DST_L4_PORT = 7 + KEY_VERSION = 8 + __bitwidth__ = 16 + +class dash_flow_entry_bulk_get_session_op_key_t(IntEnum): + FILTER_OP_INVALID = 0 + FILTER_OP_EQUAL_TO = 1 + FILTER_OP_GREATER_THAN = 2 + FILTER_OP_GREATER_THAN_OR_EQUAL_TO = 3 + FILTER_OP_LESS_THAN = 4 + FILTER_OP_LESS_THAN_OR_EQUAL_TO = 5 + __bitwidth__ = 8 + +class flow_table_data_t: + id : Annotated[int, 16] + max_flow_count : Annotated[int, 32] + flow_enabled_key : Annotated[int, 16] + flow_ttl_in_milliseconds : Annotated[int, 32] + + def __init__(self): + self.id = 0 + self.max_flow_count = 0 + self.flow_enabled_key = 0 + self.flow_ttl_in_milliseconds = 0 + + +class flow_key_t: + eni_mac : Annotated[int, EthernetAddress_size] + vnet_id : Annotated[int, 16] + src_ip : Annotated[int, IPv4ORv6Address_size] + dst_ip : Annotated[int, IPv4ORv6Address_size] + src_port : Annotated[int, 16] + dst_port : Annotated[int, 16] + ip_proto : Annotated[int, 8] + reserved : Annotated[int, 7] + is_ip_v6 : Annotated[int, 1] + + def __init__(self): + self.eni_mac = 0 + self.vnet_id = 0 + self.src_ip = 0 + self.dst_ip = 0 + self.src_port = 0 + self.dst_port = 0 + self.ip_proto = 0 + self.reserved = 0 + self.is_ip_v6 = 0 + +FLOW_KEY_HDR_SIZE = 46 + +class flow_data_t: + reserved : Annotated[int, 7] + is_unidirectional : Annotated[int, 1] + direction : dash_direction_t + version : Annotated[int, 32] + actions : dash_flow_action_t + meter_class : Annotated[int, 32] + idle_timeout_in_ms : Annotated[int, 32] + + def __init__(self): + self.reserved = 0 + self.is_unidirectional = 0 + self.direction = dash_direction_t.INVALID + self.version = 0 + self.actions = dash_flow_action_t.NONE + self.meter_class = 0 + self.idle_timeout_in_ms = 0 + +FLOW_DATA_HDR_SIZE = 19 + +class dash_packet_meta_t: + packet_source : dash_packet_source_t + packet_type : dash_packet_type_t + packet_subtype : dash_packet_subtype_t + length : Annotated[int, 16] + + def __init__(self): + self.packet_source = dash_packet_source_t.EXTERNAL + self.packet_type = dash_packet_type_t.REGULAR + self.packet_subtype = dash_packet_subtype_t.NONE + self.length = 0 + +PACKET_META_HDR_SIZE = 4 + + +class encap_data_t: + vni : Annotated[int, 24] + reserved : Annotated[int, 8] + underlay_sip : Annotated[int, IPv4Address_size] + underlay_dip : Annotated[int, IPv4Address_size] + underlay_smac : Annotated[int, EthernetAddress_size] + underlay_dmac : Annotated[int, EthernetAddress_size] + dash_encapsulation : dash_encapsulation_t + + def __init__(self): + self.vni = 0 + self.reserved = 0 + self.underlay_sip = 0 + self.underlay_dip = 0 + self.underlay_smac = 0 + self.underlay_dmac = 0 + self.dash_encapsulation = dash_encapsulation_t.INVALID + +ENCAP_DATA_HDR_SIZE = 26 + +class overlay_rewrite_data_t: + # smac : Annotated[int, EthernetAddress_size] + dmac : Annotated[int, EthernetAddress_size] + sip : Annotated[int, IPv4ORv6Address_size] + dip : Annotated[int, IPv4ORv6Address_size] + sip_mask : Annotated[int, IPv6Address_size] + dip_mask : Annotated[int, IPv6Address_size] + sport : Annotated[int, 16] + dport : Annotated[int, 16] + reserved : Annotated[int, 7] + is_ipv6 : Annotated[int, 1] + + def __init__(self): + # self.smac = 0 + self.dmac = 0 + self.sip = 0 + self.dip = 0 + self.sip_mask = 0 + self.dip_mask = 0 + self.sport = 0 + self.dport = 0 + self.reserved = 0 + self.is_ipv6 = 0 + +OVERLAY_REWRITE_DATA_HDR_SIZE = 75 + +class ethernet_t: + dst_addr : Annotated[int, EthernetAddress_size] + src_addr : Annotated[int, EthernetAddress_size] + ether_type : Annotated[int, 16] + + def __init__(self): + self.dst_addr = 0 + self.src_addr = 0 + self.ether_type = 0 + +ETHER_HDR_SIZE = 14 + +class ipv4_t: + version : Annotated[int, 4] + ihl : Annotated[int, 4] + diffserv : Annotated[int, 8] + total_len : Annotated[int, 16] + identification : Annotated[int, 16] + flags : Annotated[int, 3] + frag_offset : Annotated[int, 13] + ttl : Annotated[int, 8] + protocol : Annotated[int, 8] + hdr_checksum : Annotated[int, 16] + src_addr : Annotated[int, IPv4Address_size] + dst_addr : Annotated[int, IPv4Address_size] + + def __init__(self): + self.version = 0 + self.ihl = 0 + self.diffserv = 0 + self.total_len = 0 + self.identification = 0 + self.flags = 0 + self.frag_offset = 0 + self.ttl = 0 + self.protocol = 0 + self.hdr_checksum = 0 + self.src_addr = 0 + self.dst_addr = 0 + +IPV4_HDR_SIZE = 20 + +class ipv4options_t: + options : Annotated[int, 320] + +class udp_t: + src_port : Annotated[int, 16] + dst_port : Annotated[int, 16] + length : Annotated[int, 16] + checksum : Annotated[int, 16] + +UDP_HDR_SIZE = 8 + +class vxlan_t: + flags : Annotated[int, 8] + reserved : Annotated[int, 24] + vni : Annotated[int, 24] + reserved_2 : Annotated[int, 8] + +VXLAN_HDR_SIZE = 8 + +class nvgre_t: + flags : Annotated[int, 4] + reserved : Annotated[int, 9] + version : Annotated[int, 3] + protocol_type : Annotated[int, 16] + vsid : Annotated[int, 24] + flow_id : Annotated[int, 8] + +NVGRE_HDR_SIZE = 8 + +class tcp_t: + src_port : Annotated[int, 16] + dst_port : Annotated[int, 16] + seq_no : Annotated[int, 32] + ack_no : Annotated[int, 32] + data_offset : Annotated[int, 4] + res : Annotated[int, 3] + ecn : Annotated[int, 3] + flags : Annotated[int, 6] + window : Annotated[int, 16] + checksum : Annotated[int, 16] + urgent_ptr : Annotated[int, 16] + +TCP_HDR_SIZE = 20 + +class ipv6_t: + version : Annotated[int, 4] + traffic_class : Annotated[int, 8] + flow_label : Annotated[int, 20] + payload_length : Annotated[int, 16] + next_header : Annotated[int, 8] + hop_limit : Annotated[int, 8] + src_addr : Annotated[int, IPv6Address_size] + dst_addr : Annotated[int, IPv6Address_size] + + def __init__(self): + self.version = 0 + self.traffic_class = 0 + self.flow_label = 0 + self.payload_length = 0 + self.next_header = 0 + self.hop_limit = 0 + self.src_addr = 0 + self.dst_addr = 0 + +IPV6_HDR_SIZE = 40 + +class headers_t: + # packet metadata headers + dp_ethernet : ethernet_t + packet_meta : dash_packet_meta_t + flow_key : flow_key_t + flow_data : flow_data_t + flow_overlay_data : overlay_rewrite_data_t + flow_u0_encap_data : encap_data_t + flow_u1_encap_data : encap_data_t + + # Underlay 1 headers + u1_ethernet : ethernet_t + u1_ipv4 : ipv4_t + u1_ipv4options : ipv4options_t + u1_ipv6 : ipv6_t + u1_udp : udp_t + u1_tcp : tcp_t + u1_vxlan : vxlan_t + u1_nvgre : nvgre_t + + # Underlay 0 headers + u0_ethernet : ethernet_t + u0_ipv4 : ipv4_t + u0_ipv4options : ipv4options_t + u0_ipv6 : ipv6_t + u0_udp : udp_t + u0_tcp : tcp_t + u0_vxlan : vxlan_t + u0_nvgre : nvgre_t + + # Customer headers + customer_ethernet : ethernet_t + customer_ipv4 : ipv4_t + customer_ipv6 : ipv6_t + customer_udp : udp_t + customer_tcp : tcp_t + + def __init__(self): + self.reset() + + def reset(self): + annotations = get_annotations(type(self)) + for k in annotations: + setattr(self, k, None) diff --git a/dash-pipeline/py_model/data_plane/dash_inbound.py b/dash-pipeline/py_model/data_plane/dash_inbound.py new file mode 100644 index 000000000..7be48fdb4 --- /dev/null +++ b/dash-pipeline/py_model/data_plane/dash_inbound.py @@ -0,0 +1,40 @@ +from py_model.libs.__utils import * +from py_model.libs.__table import * + +from py_model.data_plane.dash_tunnel import do_tunnel_encap +from py_model.data_plane.dash_acl import * +from py_model.data_plane.dash_conntrack import * +from py_model.data_plane.stages.inbound_routing import * +from py_model.data_plane.stages.outbound_mapping import * + +class inbound: + @classmethod + def apply(cls): + if STATEFUL_P4: + ConntrackIn.apply() + if PNA_CONNTRACK: + ConntrackIn.apply() + if meta.overlay_data.sip != 0: + do_action_nat64.apply() + + # ACL + if not meta.conntrack_data.allow_in: + acl.apply(cls.__name__) + + if STATEFUL_P4: + ConntrackOut.apply() + elif PNA_CONNTRACK: + ConntrackOut.apply() + + inbound_routing_stage.apply() + + meta.routing_actions = dash_routing_actions_t.ENCAP_U0 + + do_tunnel_encap( + meta.u0_encap_data.underlay_dmac, + meta.u0_encap_data.underlay_smac, + meta.u0_encap_data.underlay_dip, + meta.u0_encap_data.underlay_sip, + dash_encapsulation_t.VXLAN, + meta.u0_encap_data.vni + ) diff --git a/dash-pipeline/py_model/data_plane/dash_metadata.py b/dash-pipeline/py_model/data_plane/dash_metadata.py new file mode 100755 index 000000000..3682b8841 --- /dev/null +++ b/dash-pipeline/py_model/data_plane/dash_metadata.py @@ -0,0 +1,317 @@ +from typing import * +from py_model.data_plane.dash_headers import * + +MAX_ENI = 64 +MAX_HA_SET = 1 + +dash_routing_actions_t = dash_flow_action_t + +class dash_pipeline_stage_t(IntEnum): + INVALID = 0 + + # Inbound + INBOUND_STAGE_STARTING = 1 #100 + INBOUND_ROUTING = 2 #100 + + # Outbound + OUTBOUND_STAGE_STARTING = 3 #200 + OUTBOUND_ROUTING = 4 #200 + OUTBOUND_MAPPING = 5 #201 + OUTBOUND_PRE_ROUTING_ACTION_APPLY = 6 #280 + + # Common + ROUTING_ACTION_APPLY = 7 #300 + + __bitwidth__ = 16 + +class dash_eni_mac_override_type_t(IntEnum): + NONE = 0 + SRC_MAC = 1 + DST_MAC = 2 + __bitwidth__ = 8 + +class dash_eni_mac_type_t(IntEnum): + SRC_MAC = 0 + DST_MAC = 1 + __bitwidth__ = 8 + +class dash_eni_mode_t(IntEnum): + VM = 0 + FNIC = 1 + __bitwidth__ = 8 + +class dash_tunnel_dscp_mode_t(IntEnum): + PRESERVE_MODEL = 0 + PIPE_MODEL = 1 + __bitwidth__ = 16 + +class dash_ha_role_t(IntEnum): + DEAD = 0 + ACTIVE = 1 + STANDBY = 2 + STANDALONE = 3 + SWITCHING_TO_ACTIVE = 4 + __bitwidth__ = 8 + +class dash_ha_state_t(IntEnum): + DEAD = 0 + CONNECTING = 1 + CONNECTED = 2 + INITIALIZING_TO_ACTIVE = 3 + INITIALIZING_TO_STANDBY = 4 + PENDING_STANDALONE_ACTIVATION = 5 + PENDING_ACTIVE_ACTIVATION = 6 + PENDING_STANDBY_ACTIVATION = 7 + STANDALONE = 8 + ACTIVE = 9 + STANDBY = 10 + DESTROYING = 11 + SWITCHING_TO_STANDALONE = 12 + __bitwidth__ = 8 + + +class outbound_routing_group_data_t: + outbound_routing_group_id : Annotated[int, 16] + disabled : Annotated[int, 1] + +class conntrack_data_t: + def __init__(self, allow_in=False, allow_out=False): + self.allow_in = allow_in + self.allow_out = allow_out + +class eni_data_t: + cps : Annotated[int, 32] + pps : Annotated[int, 32] + flows : Annotated[int, 32] + admin_state : Annotated[int, 1] + pl_sip : Annotated[int, IPv6Address_size] + pl_sip_mask : Annotated[int, IPv6Address_size] + pl_underlay_sip : Annotated[int, IPv4Address_size] + dscp : Annotated[int, 6] + dscp_mode : dash_tunnel_dscp_mode_t + outbound_routing_group_data : outbound_routing_group_data_t + vip : Annotated[int, IPv4Address_size] + eni_mode : dash_eni_mode_t + + def __init__(self): + self.cps = 0 + self.pps = 0 + self.flows = 0 + self.admin_state = 0 + self.pl_sip = 0 + self.pl_sip_mask = 0 + self.pl_underlay_sip = 0 + self.dscp = 0 + self.dscp_mode = dash_tunnel_dscp_mode_t.PRESERVE_MODEL + self.outbound_routing_group_data = outbound_routing_group_data_t() + self.vip = 0 + self.eni_mode = dash_eni_mode_t.VM + +class port_map_context_t: + map_id : Annotated[int, 16] + service_rewrite_sip : Annotated[int, IPv6Address_size] + service_rewrite_sip_mask : Annotated[int, IPv6Address_size] + service_rewrite_dip : Annotated[int, IPv6Address_size] + service_rewrite_dip_mask : Annotated[int, IPv6Address_size] + + def __init__(self): + self.map_id = 0 + self.service_rewrite_sip = 0 + self.service_rewrite_sip_mask = 0 + self.service_rewrite_dip = 0 + self.service_rewrite_dip_mask = 0 + +class meter_context_t: + meter_class_or : Annotated[int, 32] + meter_class_and : Annotated[int, 32] + meter_policy_id : Annotated[int, 16] + meter_policy_lookup_ip: Annotated[int, IPv4ORv6Address_size] + + def __init__(self): + self.meter_class_or = 0 + self.meter_class_and = 0 + self.meter_policy_id = 0 + self.meter_policy_lookup_ip = 0 + +class ha_data_t: + ha_scope_id : Annotated[int, 16] + ha_set_id : Annotated[int, 16] + ha_role : dash_ha_role_t + local_ip_is_v6 : Annotated[int, 1] + local_ip : Annotated[int, IPv4ORv6Address_size] + peer_ip_is_v6 : Annotated[int, 1] + peer_ip : Annotated[int, IPv4ORv6Address_size] + dp_channel_dst_port : Annotated[int, 16] + dp_channel_src_port_min : Annotated[int, 16] + dp_channel_src_port_max : Annotated[int, 16] + + def __init__(self): + self.ha_scope_id = 0 + self.ha_set_id = 0 + self.ha_role = dash_ha_role_t.DEAD + self.local_ip_is_v6 = 0 + self.local_ip = 0 + self.peer_ip_is_v6 = 0 + self.peer_ip = 0 + self.dp_channel_dst_port = 0 + self.dp_channel_src_port_min = 0 + self.dp_channel_src_port_max = 0 + +# if target is TARGET_DPDK_PNA +class meta_flow_data_t: + reserved : Annotated[int, 7] + is_unidirectional : Annotated[int, 1] + direction : dash_direction_t + version : Annotated[int, 32] + actions : dash_flow_action_t + meter_class : Annotated[int, dash_meter_class_t] + idle_timeout_in_ms : Annotated[int, 32] + + def __init__(self): + self.reserved = 0 + self.is_unidirectional = 0 + self.direction = dash_direction_t.INVALID + self.version = 0 + self.actions = dash_flow_action_t.NONE + self.meter_class = 0 + self.idle_timeout_in_ms = 0 + +class meta_encap_data_t: + vni : Annotated[int, 24] + reserved : Annotated[int, 8] + underlay_sip : Annotated[int, IPv4Address_size] + underlay_dip : Annotated[int, IPv4Address_size] + underlay_smac : Annotated[int, EthernetAddress_size] + underlay_dmac : Annotated[int, EthernetAddress_size] + dash_encapsulation : dash_encapsulation_t + + def __init__(self): + self.vni = 0 + self.reserved = 0 + self.underlay_sip = 0 + self.underlay_dip = 0 + self.underlay_smac = 0 + self.underlay_dmac = 0 + self.dash_encapsulation = dash_encapsulation_t.INVALID + +class meta_overlay_rewrite_data_t: + dmac : Annotated[int, EthernetAddress_size] + sip : Annotated[int, IPv4ORv6Address_size] + dip : Annotated[int, IPv4ORv6Address_size] + sip_mask : Annotated[int, IPv6Address_size] + dip_mask : Annotated[int, IPv6Address_size] + sport : Annotated[int, 16] + dport : Annotated[int, 16] + reserved : Annotated[int, 7] + is_ipv6 : Annotated[int, 1] + + def __init__(self): + self.dmac = 0 + self.sip = 0 + self.dip = 0 + self.sip_mask = 0 + self.dip_mask = 0 + self.sport = 0 + self.dport = 0 + self.reserved = 0 + self.is_ipv6 = 0 + +class metadata_t: + dash_acl_group_id : Annotated[int, 16] + acl_outcome_allow : Annotated[int, 1] + acl_outcome_terminate : Annotated[int, 1] + meter_policy_en : Annotated[int, 1] + mapping_meter_class_override : Annotated[int, 1] + # meter_policy_id : Annotated[int, 16] + policy_meter_class : Annotated[int, 16] + route_meter_class : Annotated[int, 16] + mapping_meter_class : Annotated[int, 16] + meter_bucket_index : Annotated[int, 32] + + packet_source : dash_packet_source_t + packet_type : dash_packet_type_t + direction : dash_direction_t + eni_mac_type : dash_eni_mac_type_t + eni_mac_override_type : dash_eni_mac_override_type_t + rx_encap : encap_data_t + eni_addr : Annotated[int, EthernetAddress_size] + vnet_id : Annotated[int, 16] + dst_vnet_id : Annotated[int, 16] + eni_id : Annotated[int, 16] + eni_data : eni_data_t + inbound_vm_id : Annotated[int, 16] + appliance_id : Annotated[int, 8] + is_overlay_ip_v6 : Annotated[int, 1] + is_lkup_dst_ip_v6 : Annotated[int, 1] + ip_protocol : Annotated[int, 8] + dst_ip_addr : Annotated[int, IPv4ORv6Address_size] + src_ip_addr : Annotated[int, IPv4ORv6Address_size] + lkup_dst_ip_addr : Annotated[int, IPv4ORv6Address_size] + src_l4_port : Annotated[int, 16] + dst_l4_port : Annotated[int, 16] + stage1_dash_acl_group_id : Annotated[int, 16] + stage2_dash_acl_group_id : Annotated[int, 16] + stage3_dash_acl_group_id : Annotated[int, 16] + stage4_dash_acl_group_id : Annotated[int, 16] + stage5_dash_acl_group_id : Annotated[int, 16] + tunnel_pointer : Annotated[int, 16] + is_fast_path_icmp_flow_redirection_packet : Annotated[int, 1] + fast_path_icmp_flow_redirection_disabled : Annotated[int, 1] + port_map_ctx : port_map_context_t + meter_context : meter_context_t + ha : ha_data_t + conntrack_data : conntrack_data_t + flow_data : flow_data_t + flow_sync_state : dash_flow_sync_state_t + flow_table : flow_table_data_t + bulk_get_session_id : Annotated[int, 16] + bulk_get_session_filter_id : Annotated[int, 16] + flow_enabled : Annotated[int, 1] + to_dpapp : Annotated[int, 1] + target_stage : dash_pipeline_stage_t + routing_actions : Annotated[int, 32] + dropped : Annotated[int, 1] + u0_encap_data : encap_data_t + u1_encap_data : encap_data_t + overlay_data : overlay_rewrite_data_t + enable_reverse_tunnel_learning : Annotated[int, 1] + reverse_tunnel_sip : Annotated[int, IPv4Address_size] + dash_tunnel_id : Annotated[int, 16] + dash_tunnel_max_member_size : Annotated[int, 32] + dash_tunnel_member_index : Annotated[int, 16] + dash_tunnel_member_id : Annotated[int, 16] + dash_tunnel_next_hop_id : Annotated[int, 16] + meter_class : Annotated[int, 32] + local_region_id : Annotated[int, 8] + cpu_mac : Annotated[int, EthernetAddress_size] + + def __init__(self): + self.reset() + + self.rx_encap = encap_data_t() + self.eni_data = eni_data_t() + self.port_map_ctx = port_map_context_t() + self.meter_context = meter_context_t() + self.ha = ha_data_t() + self.conntrack_data = conntrack_data_t() + self.flow_data = flow_data_t() + self.flow_table = flow_table_data_t() + self.u0_encap_data = encap_data_t() + self.u1_encap_data = encap_data_t() + self.overlay_data = overlay_rewrite_data_t() + + self.packet_source = dash_packet_source_t.EXTERNAL + self.packet_type = dash_packet_type_t.REGULAR + self.direction = dash_direction_t.INVALID + self.eni_mac_type = dash_eni_mac_type_t.SRC_MAC + self.eni_mac_override_type = dash_eni_mac_override_type_t.NONE + self.flow_sync_state = dash_flow_sync_state_t.FLOW_MISS + self.target_stage = dash_pipeline_stage_t.INVALID + + def reset(self): + annotations = get_type_hints(type(self)) + for k, t in annotations.items(): + if t in (int, bool): + setattr(self, k, 0) + else: + setattr(self, k, None) diff --git a/dash-pipeline/py_model/data_plane/dash_nvgre.py b/dash-pipeline/py_model/data_plane/dash_nvgre.py new file mode 100755 index 000000000..6c8763d10 --- /dev/null +++ b/dash-pipeline/py_model/data_plane/dash_nvgre.py @@ -0,0 +1,53 @@ +from py_model.libs.__utils import * + +def nvgre_encap(underlay_dmac : Annotated[int, 48], + underlay_smac : Annotated[int, 48], + underlay_dip : Annotated[int, 32], + underlay_sip : Annotated[int, 32], + overlay_dmac : Annotated[int, 48], + vsid : Annotated[int, 24]): + hdr.inner_ethernet = hdr.ethernet + hdr.inner_ethernet.dst_addr = overlay_dmac + hdr.ethernet = None + + hdr.inner_ipv4 = hdr.ipv4 + hdr.ipv4 = None + hdr.inner_ipv6 = hdr.ipv6 + hdr.ipv6 = None + hdr.inner_tcp = hdr.tcp + hdr.tcp = None + hdr.inner_udp = hdr.udp + hdr.udp = None + + hdr.ethernet = ethernet_t() + hdr.ethernet.dst_addr = underlay_dmac + hdr.ethernet.src_addr = underlay_smac + hdr.ethernet.ether_type = IPV4_ETHTYPE + + hdr.ipv4 = ipv4_t() + hdr.ipv4.version = 4 + hdr.ipv4.ihl = 5 + hdr.ipv4.diffserv = 0 + + hdr.ipv4.total_len = ETHER_HDR_SIZE + IPV4_HDR_SIZE + NVGRE_HDR_SIZE + if hdr.inner_ipv4: + hdr.ipv4.total_len += hdr.inner_ipv4.total_len + if hdr.inner_ipv6: + hdr.ipv4.total_len += hdr.inner_ipv6.payload_length + IPV6_HDR_SIZE + + hdr.ipv4.identification = 1 + hdr.ipv4.flags = 0 + hdr.ipv4.frag_offset = 0 + hdr.ipv4.ttl = 64 + hdr.ipv4.protocol = NVGRE_PROTO + hdr.ipv4.dst_addr = underlay_dip + hdr.ipv4.src_addr = underlay_sip + hdr.ipv4.hdr_checksum = 0 + + hdr.nvgre = nvgre_t() + hdr.nvgre.flags = 4 + hdr.nvgre.reserved = 0 + hdr.nvgre.version = 0 + hdr.nvgre.protocol_type = 0x6558 + hdr.nvgre.vsid = vsid + hdr.nvgre.flow_id = 0 diff --git a/dash-pipeline/py_model/data_plane/dash_outbound.py b/dash-pipeline/py_model/data_plane/dash_outbound.py new file mode 100644 index 000000000..c45e823d6 --- /dev/null +++ b/dash-pipeline/py_model/data_plane/dash_outbound.py @@ -0,0 +1,33 @@ +from py_model.libs.__utils import * +from py_model.libs.__table import * + +from py_model.data_plane.dash_acl import * +from py_model.data_plane.dash_conntrack import * +from py_model.data_plane.stages.outbound_mapping import * +from py_model.data_plane.stages.outbound_routing import * +from py_model.data_plane.stages.outbound_pre_routing_action_apply import * + +class outbound: + @classmethod + def apply(cls): + if STATEFUL_P4: + ConntrackOut.apply() + if PNA_CONNTRACK: + ConntrackOut.apply() + + # ACL + if not meta.conntrack_data.allow_out: + acl.apply(cls.__name__) + + if STATEFUL_P4: + ConntrackIn.apply() + if PNA_CONNTRACK: + ConntrackIn.apply() + + meta.lkup_dst_ip_addr = meta.dst_ip_addr + meta.is_lkup_dst_ip_v6 = meta.is_overlay_ip_v6 + + outbound_routing_stage.apply() + outbound_mapping_stage.apply() + outbound_pre_routing_action_apply_stage.apply() + diff --git a/dash-pipeline/py_model/data_plane/dash_parser.py b/dash-pipeline/py_model/data_plane/dash_parser.py new file mode 100755 index 000000000..d12963c6b --- /dev/null +++ b/dash-pipeline/py_model/data_plane/dash_parser.py @@ -0,0 +1,222 @@ +from enum import Enum, auto +from py_model.libs.__utils import * +from py_model.libs.__packet_in import * +from py_model.libs.__packet_out import * + +class State(Enum): + accept = auto() + reject = auto() + start = auto() + parse_dash_hdr = auto() + parse_u0_ipv4 = auto() + parse_u0_ipv6 = auto() + parse_u0_udp = auto() + parse_u0_tcp = auto() + parse_u0_vxlan = auto() + parse_customer_ethernet = auto() + parse_customer_ipv4 = auto() + parse_customer_ipv6 = auto() + parse_customer_tcp = auto() + parse_customer_udp = auto() + +def dash_parser(packet: packet_in): + py_log("info", "Parser start") + # By default, packet is REGULAR from EXTERNAL + hdr.packet_meta = dash_packet_meta_t() + hdr.packet_meta.packet_source = dash_packet_source_t.EXTERNAL + hdr.packet_meta.packet_type = dash_packet_type_t.REGULAR + hdr.packet_meta.packet_subtype = dash_packet_subtype_t.NONE + hdr.packet_meta.length = PACKET_META_HDR_SIZE + + state = State.start + while True: + state = _dash_parser(packet, hdr, state) + if state==State.accept or state==State.reject: + break + return state + +def _dash_parser(packet: packet_in, hdr: headers_t, state: State): + match state: + case State.start: + py_log("info", "Extracting header 'u0_ethernet'") + hdr.u0_ethernet = packet.extract(ethernet_t) + if hdr.u0_ethernet == None: + return State.reject + if hdr.u0_ethernet.ether_type == IPV4_ETHTYPE: + return State.parse_u0_ipv4 + elif hdr.u0_ethernet.ether_type == IPV6_ETHTYPE: + return State.parse_u0_ipv6 + elif hdr.u0_ethernet.ether_type == DASH_ETHTYPE: + return State.parse_dash_hdr + else: + py_log("info", "Raw Ethernet: ", hdr.u0_ethernet) + py_log("info", "EtherType: ", hdr.u0_ethernet.ether_type) + return State.accept + + case State.parse_u0_ipv4: + py_log("info", "Extracting header 'u0_ipv4'") + hdr.u0_ipv4 = packet.extract(ipv4_t) + if hdr.u0_ipv4 == None: + return State.reject + if not (hdr.u0_ipv4.version == 4): + return State.reject + if not (hdr.u0_ipv4.ihl == 5): + return State.reject + if hdr.u0_ipv4.protocol == UDP_PROTO: + return State.parse_u0_udp + elif hdr.u0_ipv4.protocol == TCP_PROTO: + return State.parse_u0_tcp + else: + return State.accept + + case State.parse_u0_ipv6: + py_log("info", "Extracting header 'u0_ipv6'") + hdr.u0_ipv6 = packet.extract(ipv6_t) + if hdr.u0_ipv6 == None: + return State.reject + if hdr.u0_ipv6.next_header == UDP_PROTO: + return State.parse_u0_udp + elif hdr.u0_ipv6.next_header == TCP_PROTO: + return State.parse_u0_tcp + else: + return State.accept + + case State.parse_u0_udp: + py_log("info", "Extracting header 'u0_udp'") + hdr.u0_udp = packet.extract(udp_t) + if hdr.u0_udp == None: + return State.reject + if hdr.u0_udp.dst_port == UDP_PORT_VXLAN: + return State.parse_u0_vxlan + else: + return State.accept + + case State.parse_u0_tcp: + py_log("info", "Extracting header 'u0_tcp'") + hdr.u0_tcp = packet.extract(tcp_t) + if hdr.u0_tcp == None: + return State.reject + return State.accept + + case State.parse_u0_vxlan: + py_log("info", "Extracting header 'u0_vxlan'") + hdr.u0_vxlan = packet.extract(vxlan_t) + if hdr.u0_vxlan == None: + return State.reject + return State.parse_customer_ethernet + + case State.parse_dash_hdr: + py_log("info", "Extracting header 'packet_meta'") + hdr.packet_meta = packet.extract(dash_packet_meta_t) + + if (hdr.packet_meta.packet_subtype == dash_packet_subtype_t.FLOW_CREATE + or hdr.packet_meta.packet_subtype == dash_packet_subtype_t.FLOW_UPDATE + or hdr.packet_meta.packet_subtype == dash_packet_subtype_t.FLOW_DELETE): + # Flow create/update/delete, extract flow_key + hdr.flow_key = packet.extract(flow_key_t) + + if hdr.packet_meta.packet_subtype == dash_packet_subtype_t.FLOW_DELETE: + # Flow delete, extract flow_data + hdr.flow_data = packet.extract(flow_data_t) + + if hdr.flow_data.actions != 0: + hdr.flow_overlay_data = packet.extract(overlay_rewrite_data_t) + + if hdr.flow_data.actions & dash_routing_actions_t.ENCAP_U0 != 0: + hdr.flow_u0_encap_data = packet.extract(encap_data_t) + + if hdr.flow_data.actions & dash_routing_actions_t.ENCAP_U1 != 0: + hdr.flow_u1_encap_data = packet.extract(encap_data_t) + + return State.parse_customer_ethernet + + case State.parse_customer_ethernet: + py_log("info", "Extracting header 'customer_ethernet'") + hdr.customer_ethernet = packet.extract(ethernet_t) + if hdr.customer_ethernet == None: + return State.reject + if hdr.customer_ethernet.ether_type == IPV4_ETHTYPE: + return State.parse_customer_ipv4 + elif hdr.customer_ethernet.ether_type == IPV6_ETHTYPE: + return State.parse_customer_ipv6 + else: + return State.accept + + case State.parse_customer_ipv4: + py_log("info", "Extracting header 'customer_ipv4'") + hdr.customer_ipv4 = packet.extract(ipv4_t) + if hdr.customer_ipv4 == None: + return State.reject + if not (hdr.customer_ipv4.version == 4): + return State.reject + if not (hdr.customer_ipv4.ihl == 5): + return State.reject + if hdr.customer_ipv4.protocol == UDP_PROTO: + return State.parse_customer_udp + elif hdr.customer_ipv4.protocol == TCP_PROTO: + return State.parse_customer_tcp + else: + return State.accept + + case State.parse_customer_ipv6: + py_log("info", "Extracting header 'customer_ipv6'") + hdr.customer_ipv6 = packet.extract(ipv6_t) + if hdr.customer_ipv6 == None: + return State.reject + if hdr.customer_ipv6.next_header == UDP_PROTO: + return State.parse_customer_udp + elif hdr.customer_ipv6.next_header == TCP_PROTO: + return State.parse_customer_tcp + else: + return State.accept + + case State.parse_customer_tcp: + py_log("info", "Extracting header 'customer_tcp'") + hdr.customer_tcp = packet.extract(tcp_t) + if hdr.customer_tcp == None: + return State.reject + return State.accept + + case State.parse_customer_udp: + py_log("info", "Extracting header 'customer_udp'") + hdr.customer_udp = packet.extract(udp_t) + if hdr.customer_udp == None: + return State.reject + return State.accept + +def dash_deparser(packet: packet_out): + py_log("info", "Deparser start") + + packet.emit(hdr.dp_ethernet) + packet.emit(hdr.packet_meta) + packet.emit(hdr.flow_key) + packet.emit(hdr.flow_data) + packet.emit(hdr.flow_overlay_data) + packet.emit(hdr.flow_u0_encap_data) + packet.emit(hdr.flow_u1_encap_data) + + packet.emit(hdr.u1_ethernet) + packet.emit(hdr.u1_ipv4) + packet.emit(hdr.u1_ipv4options) + packet.emit(hdr.u1_ipv6) + packet.emit(hdr.u1_udp) + packet.emit(hdr.u1_tcp) + packet.emit(hdr.u1_vxlan) + packet.emit(hdr.u1_nvgre) + + packet.emit(hdr.u0_ethernet) + packet.emit(hdr.u0_ipv4) + packet.emit(hdr.u0_ipv4options) + packet.emit(hdr.u0_ipv6) + packet.emit(hdr.u0_udp) + packet.emit(hdr.u0_tcp) + packet.emit(hdr.u0_vxlan) + packet.emit(hdr.u0_nvgre) + packet.emit(hdr.customer_ethernet) + packet.emit(hdr.customer_ipv4) + packet.emit(hdr.customer_ipv6) + packet.emit(hdr.customer_tcp) + packet.emit(hdr.customer_udp) + + py_log("info", "Deparser end") + diff --git a/dash-pipeline/py_model/data_plane/dash_pipeline.py b/dash-pipeline/py_model/data_plane/dash_pipeline.py new file mode 100644 index 000000000..ea779ec72 --- /dev/null +++ b/dash-pipeline/py_model/data_plane/dash_pipeline.py @@ -0,0 +1,306 @@ +from py_model.libs.__utils import * +from py_model.libs.__table import * + +from py_model.data_plane.dash_acl import * +from py_model.data_plane.dash_inbound import * +from py_model.data_plane.dash_pipeline import * +from py_model.data_plane.dash_underlay import * +from py_model.data_plane.dash_outbound import * +from py_model.data_plane.dash_conntrack import * + +from py_model.data_plane.stages.ha import * +from py_model.data_plane.stages.eni_lookup import * +from py_model.data_plane.stages.trusted_vni import * +from py_model.data_plane.stages.pre_pipeline import * +from py_model.data_plane.stages.tunnel_stage import * +from py_model.data_plane.stages.inbound_routing import * +from py_model.data_plane.stages.metering_update import * +from py_model.data_plane.stages.direction_lookup import * +from py_model.data_plane.stages.conntrack_lookup import * +from py_model.data_plane.stages.outbound_routing import * +from py_model.data_plane.stages.outbound_mapping import * +from py_model.data_plane.stages.conntrack_lookup import * +from py_model.data_plane.stages.routing_action_apply import * +from py_model.data_plane.stages.outbound_pre_routing_action_apply import * + +from py_model.data_plane.dash_tunnel import * +from py_model.data_plane.dash_counters import * +from py_model.data_plane.dash_underlay import * + +class dash_eni_stage: + @staticmethod + def set_eni_attrs(cps: Annotated[int, 32], + pps: Annotated[int, 32], + flows: Annotated[int, 32], + admin_state: Annotated[int, 1], + ha_scope_id: Annotated[int, 16, {"type" : "sai_object_id_t"}], + vm_underlay_dip: Annotated[int, IPv4Address_size, {"type" : "sai_ip_address_t"}], + vm_vni: Annotated[int, 24, {"type" : "sai_uint32_t"}], + vnet_id: Annotated[int, 16, {"type" : "sai_object_id_t"}], + pl_sip: Annotated[int, IPv6Address_size], + pl_sip_mask: Annotated[int, IPv6Address_size], + pl_underlay_sip: Annotated[int, IPv4Address_size, {"type" : "sai_ip_address_t"}], + v4_meter_policy_id: Annotated[int, 16, {"type" : "sai_object_id_t"}], + v6_meter_policy_id: Annotated[int, 16, {"type" : "sai_object_id_t"}], + dash_tunnel_dscp_mode: Annotated[dash_tunnel_dscp_mode_t, {"type" : "sai_dash_tunnel_dscp_mode_t"}], + dscp: Annotated[int, 6, {"type" : "sai_uint8_t", + "validonly" : "SAI_ENI_ATTR_DASH_TUNNEL_DSCP_MODE == SAI_DASH_TUNNEL_DSCP_MODE_PIPE_MODEL"}], + + inbound_v4_stage1_dash_acl_group_id: Annotated[int, 16, {"type" : "sai_object_id_t"}], + inbound_v4_stage2_dash_acl_group_id: Annotated[int, 16, {"type" : "sai_object_id_t"}], + inbound_v4_stage3_dash_acl_group_id: Annotated[int, 16, {"type" : "sai_object_id_t"}], + inbound_v4_stage4_dash_acl_group_id: Annotated[int, 16, {"type" : "sai_object_id_t"}], + inbound_v4_stage5_dash_acl_group_id: Annotated[int, 16, {"type" : "sai_object_id_t"}], + + inbound_v6_stage1_dash_acl_group_id: Annotated[int, 16, {"type" : "sai_object_id_t"}], + inbound_v6_stage2_dash_acl_group_id: Annotated[int, 16, {"type" : "sai_object_id_t"}], + inbound_v6_stage3_dash_acl_group_id: Annotated[int, 16, {"type" : "sai_object_id_t"}], + inbound_v6_stage4_dash_acl_group_id: Annotated[int, 16, {"type" : "sai_object_id_t"}], + inbound_v6_stage5_dash_acl_group_id: Annotated[int, 16, {"type" : "sai_object_id_t"}], + + outbound_v4_stage1_dash_acl_group_id: Annotated[int, 16, {"type" : "sai_object_id_t"}], + outbound_v4_stage2_dash_acl_group_id: Annotated[int, 16, {"type" : "sai_object_id_t"}], + outbound_v4_stage3_dash_acl_group_id: Annotated[int, 16, {"type" : "sai_object_id_t"}], + outbound_v4_stage4_dash_acl_group_id: Annotated[int, 16, {"type" : "sai_object_id_t"}], + outbound_v4_stage5_dash_acl_group_id: Annotated[int, 16, {"type" : "sai_object_id_t"}], + + outbound_v6_stage1_dash_acl_group_id: Annotated[int, 16, {"type" : "sai_object_id_t"}], + outbound_v6_stage2_dash_acl_group_id: Annotated[int, 16, {"type" : "sai_object_id_t"}], + outbound_v6_stage3_dash_acl_group_id: Annotated[int, 16, {"type" : "sai_object_id_t"}], + outbound_v6_stage4_dash_acl_group_id: Annotated[int, 16, {"type" : "sai_object_id_t"}], + outbound_v6_stage5_dash_acl_group_id: Annotated[int, 16, {"type" : "sai_object_id_t"}], + + disable_fast_path_icmp_flow_redirection: Annotated[int, 1], + full_flow_resimulation_requested: Annotated[int, 1], + max_resimulated_flow_per_second: Annotated[int, 64], + outbound_routing_group_id: Annotated[int, 16, {"type" : "sai_object_id_t"}], + enable_reverse_tunnel_learning: Annotated[int, 1], + reverse_tunnel_sip: Annotated[int, IPv4Address_size, {"type" : "sai_ip_address_t"}], + is_ha_flow_owner: Annotated[int, 1], + flow_table_id: Annotated[int, 16, {"type" : "sai_object_id_t"}], + dash_eni_mode: Annotated[dash_eni_mode_t, {"type" : "sai_dash_eni_mode_t", "create_only" : "true"}]): + + meta.eni_data.cps = cps + meta.eni_data.pps = pps + meta.eni_data.flows = flows + meta.eni_data.admin_state = admin_state + meta.eni_data.pl_sip = pl_sip + meta.eni_data.pl_sip_mask = pl_sip_mask + meta.eni_data.pl_underlay_sip = pl_underlay_sip + meta.eni_data.eni_mode = dash_eni_mode + meta.u0_encap_data.underlay_dip = vm_underlay_dip + meta.eni_data.outbound_routing_group_data.outbound_routing_group_id = outbound_routing_group_id + if dash_tunnel_dscp_mode == dash_tunnel_dscp_mode_t.PIPE_MODEL: + meta.eni_data.dscp = dscp + # vm_vni is the encap VNI used for tunnel between inbound DPU -> VM + # and not a VNET identifier + meta.u0_encap_data.vni = vm_vni + meta.vnet_id = vnet_id + + meta.enable_reverse_tunnel_learning = enable_reverse_tunnel_learning + meta.reverse_tunnel_sip = reverse_tunnel_sip + + if meta.is_overlay_ip_v6 == 1: + if meta.direction == dash_direction_t.OUTBOUND: + # outbound v6 ACL groups + meta.stage1_dash_acl_group_id = outbound_v6_stage1_dash_acl_group_id + meta.stage2_dash_acl_group_id = outbound_v6_stage2_dash_acl_group_id + meta.stage3_dash_gacl_group_id = outbound_v6_stage3_dash_acl_group_id + meta.stage4_dash_acl_group_id = outbound_v6_stage4_dash_acl_group_id + meta.stage5_dash_acl_group_id = outbound_v6_stage5_dash_acl_group_id + meta.meter_context.meter_policy_lookup_ip = meta.dst_ip_addr + else: + # inbound v6 ACL groups + meta.stage1_dash_acl_group_id = inbound_v6_stage1_dash_acl_group_id + meta.stage2_dash_acl_group_id = inbound_v6_stage2_dash_acl_group_id + meta.stage3_dash_acl_group_id = inbound_v6_stage3_dash_acl_group_id + meta.stage4_dash_acl_group_id = inbound_v6_stage4_dash_acl_group_id + meta.stage5_dash_acl_group_id = inbound_v6_stage5_dash_acl_group_id + meta.meter_context.meter_policy_lookup_ip = meta.src_ip_addr + + meta.meter_context.meter_policy_id = v6_meter_policy_id + else: + if meta.direction == dash_direction_t.OUTBOUND: + # outbound v4 ACL groups + meta.stage1_dash_acl_group_id = outbound_v4_stage1_dash_acl_group_id + meta.stage2_dash_acl_group_id = outbound_v4_stage2_dash_acl_group_id + meta.stage3_dash_acl_group_id = outbound_v4_stage3_dash_acl_group_id + meta.stage4_dash_acl_group_id = outbound_v4_stage4_dash_acl_group_id + meta.stage5_dash_acl_group_id = outbound_v4_stage5_dash_acl_group_id + meta.meter_context.meter_policy_lookup_ip = meta.dst_ip_addr + else: + # inbound v4 ACL groups + meta.stage1_dash_acl_group_id = inbound_v4_stage1_dash_acl_group_id + meta.stage2_dash_acl_group_id = inbound_v4_stage2_dash_acl_group_id + meta.stage3_dash_acl_group_id = inbound_v4_stage3_dash_acl_group_id + meta.stage4_dash_acl_group_id = inbound_v4_stage4_dash_acl_group_id + meta.stage5_dash_acl_group_id = inbound_v4_stage5_dash_acl_group_id + meta.meter_context.meter_policy_lookup_ip = meta.src_ip_addr + + meta.meter_context.meter_policy_id = v4_meter_policy_id + + meta.ha.ha_scope_id = ha_scope_id + meta.fast_path_icmp_flow_redirection_disabled = disable_fast_path_icmp_flow_redirection + + meta.flow_table.id = flow_table_id + + eni = Table( + key={ + "meta.eni_id": (EXACT, {"type" : "sai_object_id_t"}) + }, + actions=[ + set_eni_attrs, + (deny, {"annotations": "@defaultonly"}) + ], + const_default_action=deny, + tname=f"{__qualname__}.eni", + sai_table=SaiTable(name="eni", api="dash_eni", order=1, isobject="true",), +) + + @classmethod + def apply(cls): + py_log("info", "Applying table 'eni' ") + if not cls.eni.apply()["hit"]: + UPDATE_COUNTER("eni_miss_drop", 0) + +class dash_lookup_stage: + @classmethod + def apply(cls): + pre_pipeline_stage.apply() + direction_lookup_stage.apply() + eni_lookup_stage.apply() + dash_eni_stage.apply() + + # Admin state check + if meta.eni_data.admin_state == 0: + deny() + + # Counters + UPDATE_ENI_COUNTER("eni_rx") + + if meta.direction == dash_direction_t.OUTBOUND: + UPDATE_ENI_COUNTER("eni_outbound_rx") + elif meta.direction == dash_direction_t.INBOUND: + UPDATE_ENI_COUNTER("eni_inbound_rx") + + if meta.is_fast_path_icmp_flow_redirection_packet: + UPDATE_ENI_COUNTER("eni_lb_fast_path_icmp_in") + + # Tunnel decap + do_tunnel_decap() + +class dash_match_stage: + @staticmethod + def set_acl_group_attrs(ip_addr_family: Annotated[int, 32, {"type" : "sai_ip_addr_family_t", + "isresourcetype" : "true"}]): + if ip_addr_family == 0: # SAI_IP_ADDR_FAMILY_IPV4 + if meta.is_overlay_ip_v6 == 1: + meta.dropped = True + else: + if meta.is_overlay_ip_v6 == 0: + meta.dropped = True + + acl_group = Table( + key={ + "meta.stage1_dash_acl_group_id": (EXACT, {"name" : "dash_acl_group_id"}) + }, + actions=[ + set_acl_group_attrs + ], + tname=f"{__qualname__}.acl_group", + sai_table=SaiTable(name="dash_acl_group", api="dash_acl", isobject="true",), + ) + + @classmethod + def apply(cls): + if meta.dropped: + return + + py_log("info", "Applying table 'acl_group' ") + cls.acl_group.apply() + + if meta.direction == dash_direction_t.OUTBOUND: + meta.target_stage = dash_pipeline_stage_t.OUTBOUND_ROUTING + outbound.apply() + elif meta.direction == dash_direction_t.INBOUND: + meta.target_stage = dash_pipeline_stage_t.INBOUND_ROUTING + inbound.apply() + +class dash_ingress: + @staticmethod + def drop_action(): + if TARGET == TARGET_PYTHON_V1MODEL: + mark_to_drop(standard_metadata) + + if TARGET == TARGET_DPDK_PNA: + pass + + @classmethod + def apply(cls): + if TARGET != TARGET_DPDK_PNA: + meta.rx_encap = encap_data_t() + meta.flow_data = flow_data_t() + meta.u0_encap_data = encap_data_t() + meta.u1_encap_data = encap_data_t() + meta.overlay_data = overlay_rewrite_data_t() + + # If packet is from DPAPP, not do common lookup + if hdr.packet_meta.packet_source != dash_packet_source_t.DPAPP: + dash_lookup_stage.apply() + else: + meta.flow_enabled = True + + if meta.flow_enabled: + conntrack_lookup_stage.apply() + + ha_stage.apply() + + + if (not meta.flow_enabled or + (meta.flow_sync_state == dash_flow_sync_state_t.FLOW_MISS and + hdr.packet_meta.packet_source == dash_packet_source_t.EXTERNAL)): + + # TODO: revisit it after inbound route HLD done + trusted_vni_stage.apply() + dash_match_stage.apply() + + if meta.dropped: + cls.drop_action() + return + + if meta.flow_enabled: + conntrack_flow_handle.apply() + + if meta.to_dpapp: + if TARGET == TARGET_PYTHON_V1MODEL: + standard_metadata.egress_spec = 2 # FIXME hard-code vpp port + elif TARGET == TARGET_DPDK_PNA: + cls.drop_action() + return + else: + hdr.packet_meta = None + + routing_action_apply.apply() + + # Underlay routing: using meta.dst_ip_addr as lookup key + if meta.routing_actions & dash_routing_actions_t.ENCAP_U1 != 0: + meta.dst_ip_addr = hdr.u1_ipv4.dst_addr + elif meta.routing_actions & dash_routing_actions_t.ENCAP_U0 != 0: + meta.dst_ip_addr = hdr.u0_ipv4.dst_addr + + underlay.apply() + + + if meta.eni_data.dscp_mode == dash_tunnel_dscp_mode_t.PIPE_MODEL: + hdr.u0_ipv4.diffserv = meta.eni_data.dscp + + metering_update_stage.apply() + + if meta.dropped: + cls.drop_action() + else: + UPDATE_ENI_COUNTER("eni_tx") + if meta.direction == dash_direction_t.OUTBOUND: + UPDATE_ENI_COUNTER("eni_outbound_tx") + elif meta.direction == dash_direction_t.INBOUND: + UPDATE_ENI_COUNTER("eni_inbound_tx") diff --git a/dash-pipeline/py_model/data_plane/dash_routing_types.py b/dash-pipeline/py_model/data_plane/dash_routing_types.py new file mode 100644 index 000000000..8975771d0 --- /dev/null +++ b/dash-pipeline/py_model/data_plane/dash_routing_types.py @@ -0,0 +1,158 @@ +from py_model.libs.__utils import * +from py_model.data_plane.dash_tunnel import * +from py_model.data_plane.routing_actions.routing_actions import * + +def set_meter_attrs(meter_class_or: Annotated[int, 32], + meter_class_and: Annotated[int, 32]): + + meta.meter_context.meter_class_or = meta.meter_context.meter_class_or | meter_class_or + meta.meter_context.meter_class_and = meta.meter_context.meter_class_and & meter_class_and + +# Routing Type - drop: +def drop(): + meta.target_stage = dash_pipeline_stage_t.OUTBOUND_PRE_ROUTING_ACTION_APPLY + meta.dropped = True + +# Routing Type - vnet: +# - Continue to look up in VNET mapping stage with the destination VNET ID. +# - No routing action will be populated in this routing type. +def route_vnet(dst_vnet_id : Annotated[int, 16, {"type" : "sai_object_id_t"}], + dash_tunnel_id : Annotated[int, 16, {"type" : "sai_object_id_t"}], + meter_class_or : Annotated[int, 32], + meter_class_and : Annotated[int, 32, {"default_value" : "4294967295"}], + routing_actions_disabled_in_flow_resimulation : dash_flow_action_t): + + meta.dash_tunnel_id = dash_tunnel_id + meta.target_stage = dash_pipeline_stage_t.OUTBOUND_MAPPING + meta.dst_vnet_id = dst_vnet_id + set_meter_attrs(meter_class_or, meter_class_and) + +# Routing Type - vnet_direct: +# - Forward with overrided destination overlay IP. +# - No routing action will be populated in this routing type. +def route_vnet_direct(dst_vnet_id : Annotated[int, 16], + dash_tunnel_id : Annotated[int, 16, {"type" : "sai_object_id_t"}], + overlay_ip_is_v6 : Annotated[int, 1], + overlay_ip : Annotated[int, IPv6Address_size, {"type" :"sai_ip_address_t"}], + meter_class_or : Annotated[int, 32], + meter_class_and : Annotated[int, 32, {"default_value" : "4294967295"}], + routing_actions_disabled_in_flow_resimulation : dash_flow_action_t): + + meta.dash_tunnel_id = dash_tunnel_id + + meta.target_stage = dash_pipeline_stage_t.OUTBOUND_MAPPING + meta.dst_vnet_id = dst_vnet_id + meta.lkup_dst_ip_addr = overlay_ip + meta.is_lkup_dst_ip_v6 = overlay_ip_is_v6 + set_meter_attrs(meter_class_or, meter_class_and) + +# Routing Type - direct: +# - Send to underlay router without any encap +# - No routing action will be populated in this routing type. +def route_direct(dash_tunnel_id : Annotated[int, 16, {"type" : "sai_object_id_t"}], + meter_class_or : Annotated[int, 32], + meter_class_and : Annotated[int, 32 , {"default_value" : "4294967295"}], + routing_actions_disabled_in_flow_resimulation : dash_flow_action_t): + + meta.target_stage = dash_pipeline_stage_t.OUTBOUND_PRE_ROUTING_ACTION_APPLY + set_meter_attrs(meter_class_or, meter_class_and) + meta.dash_tunnel_id = dash_tunnel_id + +# Routing Type - servicetunnel +# - Encap the packet with the given overlay and underlay addresses. +# - Perform 4-to-6 translation on the overlay addresses. +def route_service_tunnel(overlay_dip_is_v6 : Annotated[int, 1], + overlay_dip : Annotated[int, IPv4ORv6Address_size], + overlay_dip_mask_is_v6 : Annotated[int, 1], + overlay_dip_mask : Annotated[int, IPv4ORv6Address_size], + overlay_sip_is_v6 : Annotated[int, 1], + overlay_sip : Annotated[int, IPv4ORv6Address_size], + overlay_sip_mask_is_v6 : Annotated[int, 1], + overlay_sip_mask : Annotated[int, IPv4ORv6Address_size], + underlay_dip_is_v6 : Annotated[int, 1], + underlay_dip : Annotated[int, IPv4ORv6Address_size], + underlay_sip_is_v6 : Annotated[int, 1], + underlay_sip : Annotated[int, IPv4ORv6Address_size], + dash_encapsulation : Annotated[dash_encapsulation_t, + {"type" : "sai_dash_encapsulation_t", + "default_value" : "SAI_DASH_ENCAPSULATION_VXLAN"}], + tunnel_key : Annotated[int, 24], + dash_tunnel_id : Annotated[int, 16, {"type" : "sai_object_id_t"}], + meter_class_or : Annotated[int, 32], + meter_class_and : Annotated[int, 32, {"default_value" : "4294967295"}], + routing_actions_disabled_in_flow_resimulation : dash_flow_action_t): + + meta.dash_tunnel_id = dash_tunnel_id + + # Assume the overlay addresses provided are always IPv6 and the original are IPv4 + # Required(overlay_dip_is_v6 == 1 and overlay_sip_is_v6 == 1) + # Required(overlay_dip_mask_is_v6 == 1 and overlay_sip_mask_is_v6 == 1) + # Required(underlay_dip_is_v6 != 1 and underlay_sip_is_v6 != 1) + + meta.target_stage = dash_pipeline_stage_t.OUTBOUND_PRE_ROUTING_ACTION_APPLY + + push_action_nat46(overlay_sip, overlay_sip_mask, + overlay_dip, overlay_dip_mask) + + # Python support arithmetic on 128-bit operands + push_action_set_dmac(hdr.u0_ethernet.dst_addr) + + push_action_encap_u0(encap = dash_encapsulation, + vni = tunnel_key, + underlay_sip = underlay_sip if underlay_sip != 0 else hdr.u0_ipv4.src_addr, + underlay_dip = underlay_dip if underlay_dip != 0 else hdr.u0_ipv4.dst_addr) + + set_meter_attrs(meter_class_or, meter_class_and) + +def set_tunnel_mapping(underlay_dip : Annotated[int, IPv4Address_size , {"type" : "sai_ip_address_t"}], + overlay_dmac : Annotated[int, EthernetAddress_size], + use_dst_vnet_vni : Annotated[int, 1], + meter_class_or : Annotated[int, 32], + dash_tunnel_id : Annotated[int, 16, {"type" : "sai_object_id_t"}], + flow_resimulation_requested : Annotated[int, 1], + routing_actions_disabled_in_flow_resimulation : dash_flow_action_t): + meta.target_stage = dash_pipeline_stage_t.OUTBOUND_PRE_ROUTING_ACTION_APPLY + meta.dash_tunnel_id = dash_tunnel_id + + if use_dst_vnet_vni == 1: + meta.vnet_id = meta.dst_vnet_id + + push_action_set_dmac(overlay_dmac) + + push_action_encap_u0(underlay_dip=underlay_dip) + + set_meter_attrs(meter_class_or, 0xffffffff) + +def set_private_link_mapping(underlay_dip : Annotated[int, IPv4Address_size, {"type" : "sai_ip_address_t"}], + overlay_sip : Annotated[int, IPv6Address_size], + overlay_sip_mask : Annotated[int, IPv6Address_size], + overlay_dip : Annotated[int, IPv6Address_size], + overlay_dip_mask : Annotated[int, IPv6Address_size], + dash_encapsulation : Annotated[dash_encapsulation_t, {"type" : "sai_dash_encapsulation_t"}], + tunnel_key : Annotated[int, 24], + meter_class_or : Annotated[int, 32], + dash_tunnel_id : Annotated[int, 16, {"type" : "sai_object_id_t"}], + flow_resimulation_requested : Annotated[int, 1], + routing_actions_disabled_in_flow_resimulation : dash_flow_action_t, + outbound_port_map_id : Annotated[int, 16, {"type" : "sai_object_id_t"}]): + + meta.target_stage = dash_pipeline_stage_t.OUTBOUND_PRE_ROUTING_ACTION_APPLY + meta.dash_tunnel_id = dash_tunnel_id + + push_action_set_dmac(hdr.u0_ethernet.dst_addr) + + ##TODO Pass required arguments + push_action_encap_u0(dash_encapsulation, + vni = tunnel_key, + # PL has its own underlay SIP, so override + underlay_sip = meta.eni_data.pl_underlay_sip, + underlay_dip = underlay_dip) + + # Python support arithmetic on 128-bit operands + push_action_nat46(overlay_dip, overlay_dip_mask, + (((meta.src_ip_addr & ~overlay_sip_mask) | overlay_sip) & ~meta.eni_data.pl_sip_mask) | meta.eni_data.pl_sip, + 0xffffffffffffffffffffffff) + + meta.port_map_ctx.map_id = outbound_port_map_id + + set_meter_attrs(meter_class_or, 0xffffffff) \ No newline at end of file diff --git a/dash-pipeline/py_model/data_plane/dash_service_tunnel.py b/dash-pipeline/py_model/data_plane/dash_service_tunnel.py new file mode 100755 index 000000000..ef40c7e75 --- /dev/null +++ b/dash-pipeline/py_model/data_plane/dash_service_tunnel.py @@ -0,0 +1,41 @@ +from py_model.libs.__utils import * + +# Encodes V4 in V6 +def service_tunnel_encode(st_dst : Annotated[int, IPv6Address_size], + st_dst_mask : Annotated[int, IPv6Address_size], + st_src : Annotated[int, IPv6Address_size], + st_src_mask : Annotated[int, IPv6Address_size]): + hdr.u0_ipv6 = ipv6_t() + hdr.u0_ipv6.version = 6 + hdr.u0_ipv6.traffic_class = 0 + hdr.u0_ipv6.flow_label = 0 + hdr.u0_ipv6.payload_length = hdr.u0_ipv4.total_len - IPV4_HDR_SIZE + hdr.u0_ipv6.next_header = hdr.u0_ipv4.protocol + hdr.u0_ipv6.hop_limit = hdr.u0_ipv4.ttl + + # Python support arithmetic on 128-bit operands + hdr.u0_ipv6.dst_addr = (hdr.u0_ipv4.dst_addr & ~st_dst_mask) | (st_dst & st_dst_mask) + hdr.u0_ipv6.src_addr = (hdr.u0_ipv4.src_addr & ~st_src_mask) | (st_src & st_src_mask) + + hdr.u0_ipv4 = None + hdr.ethernet.ether_type = IPV6_ETHTYPE + +# Decodes V4 from V6 +def service_tunnel_decode(src : Annotated[int, IPv4Address_size], + dst : Annotated[int, IPv4Address_size]): + hdr.u0_ipv4 = ipv4_t() + hdr.u0_ipv4.version = 4 + hdr.u0_ipv4.ihl = 5 + hdr.u0_ipv4.diffserv = 0 + hdr.u0_ipv4.total_len = hdr.u0_ipv6.payload_length + IPV4_HDR_SIZE + hdr.u0_ipv4.identification = 1 + hdr.u0_ipv4.flags = 0 + hdr.u0_ipv4.frag_offset = 0 + hdr.u0_ipv4.protocol = hdr.u0_ipv6.next_header + hdr.u0_ipv4.ttl = hdr.u0_ipv6.hop_limit + hdr.u0_ipv4.hdr_checksum = 0 + hdr.u0_ipv4.dst_addr = dst + hdr.u0_ipv4.src_addr = src + + hdr.u0_ipv6 = None + hdr.ethernet.ether_type = IPV4_ETHTYPE diff --git a/dash-pipeline/py_model/data_plane/dash_tunnel.py b/dash-pipeline/py_model/data_plane/dash_tunnel.py new file mode 100644 index 000000000..591758332 --- /dev/null +++ b/dash-pipeline/py_model/data_plane/dash_tunnel.py @@ -0,0 +1,151 @@ +from py_model.libs.__utils import * + +# Return hdr.name, creating it if None. +def ensure_hdr(hdr, name, cls): + if getattr(hdr, name, None) is None: + setattr(hdr, name, cls()) + return getattr(hdr, name) + +if TARGET_PYTHON_V1MODEL: + def PUSH_VXLAN_TUNNEL_DEF(underlay_id, overlay_id): + def push_vxlan_tunnel(underlay_dmac: Annotated[int, EthernetAddress_size], + underlay_smac: Annotated[int, EthernetAddress_size], + underlay_dip: Annotated[int, IPv4Address_size], + underlay_sip: Annotated[int, IPv4Address_size], + tunnel_key: Annotated[int, 24]): + + eth = ensure_hdr(hdr, f"{underlay_id}_ethernet", ethernet_t) + eth.dst_addr = underlay_dmac + eth.src_addr = underlay_smac + eth.ether_type = IPV4_ETHTYPE + + overlay_ipv4 = getattr(hdr, f"{overlay_id}_ipv4") # may be None + overlay_ipv6 = getattr(hdr, f"{overlay_id}_ipv6") # may be None + + ipv4 = ensure_hdr(hdr, f"{underlay_id}_ipv4", ipv4_t) + ipv4_len = overlay_ipv4.total_len if overlay_ipv4 is not None else 0 + ipv6_len = overlay_ipv6.payload_length if overlay_ipv6 is not None else 0 + ipv6_hdr = IPV6_HDR_SIZE if overlay_ipv6 is not None else 0 + + ipv4.total_len = ipv4_len + ipv6_len + ipv6_hdr + ETHER_HDR_SIZE + IPV4_HDR_SIZE + UDP_HDR_SIZE + VXLAN_HDR_SIZE + ipv4.version = 4 + ipv4.ihl = 5 + ipv4.diffserv = 0 + ipv4.identification = 1 + ipv4.flags = 0 + ipv4.frag_offset = 0 + ipv4.ttl = 64 + ipv4.protocol = UDP_PROTO + ipv4.dst_addr = underlay_dip + ipv4.src_addr = underlay_sip + ipv4.hdr_checksum = 0 + + udp = ensure_hdr(hdr, f"{underlay_id}_udp", udp_t) + udp.src_port = 0 + udp.dst_port = UDP_PORT_VXLAN + udp.length = ipv4_len + ipv6_len + ipv6_hdr + UDP_HDR_SIZE + VXLAN_HDR_SIZE + ETHER_HDR_SIZE + udp.checksum = 0 + + vxlan = ensure_hdr(hdr, f"{underlay_id}_vxlan", vxlan_t) + vxlan.reserved = 0 + vxlan.reserved_2 = 0 + vxlan.flags = 0x8 + vxlan.vni = tunnel_key + + push_vxlan_tunnel.__name__ = f"push_vxlan_tunnel_{underlay_id}" + return push_vxlan_tunnel + +if TARGET_DPDK_PNA: + def PUSH_VXLAN_TUNNEL_DEF(underlay_id, overlay_id): + pass + +if TARGET_PYTHON_V1MODEL: + def PUSH_NVGRE_TUNNEL_DEF(underlay_id, overlay_id): + def push_nvgre_tunnel(underlay_dmac: Annotated[int, EthernetAddress_size], + underlay_smac: Annotated[int, EthernetAddress_size], + underlay_dip: Annotated[int, IPv4Address_size], + underlay_sip: Annotated[int, IPv4Address_size], + tunnel_key: Annotated[int, 24]): + + eth = ensure_hdr(hdr, f"{underlay_id}_ethernet", ethernet_t) + eth.dst_addr = underlay_dmac + eth.src_addr = underlay_smac + eth.ether_type = IPV4_ETHTYPE + + overlay_ipv4 = getattr(hdr, f"{overlay_id}_ipv4") # may be None + overlay_ipv6 = getattr(hdr, f"{overlay_id}_ipv6") # may be None + + ipv4_len = overlay_ipv4.total_len if overlay_ipv4 is not None else 0 + ipv6_len = overlay_ipv6.payload_length if overlay_ipv6 is not None else 0 + ipv6_hdr = IPV6_HDR_SIZE if overlay_ipv6 is not None else 0 + + ipv4 = ensure_hdr(hdr, f"{underlay_id}_ipv4", ipv4_t) + ipv4.total_len = ipv4_len + ipv6_len + ipv6_hdr + ETHER_HDR_SIZE + IPV4_HDR_SIZE + NVGRE_HDR_SIZE + ipv4.total_len += ETHER_HDR_SIZE + IPV4_HDR_SIZE + NVGRE_HDR_SIZE + + ipv4.version = 4 + ipv4.ihl = 5 + ipv4.diffserv = 0 + ipv4.identification = 1 + ipv4.flags = 0 + ipv4.frag_offset = 0 + ipv4.ttl = 64 + ipv4.protocol = NVGRE_PROTO + ipv4.dst_addr = underlay_dip + ipv4.src_addr = underlay_sip + ipv4.hdr_checksum = 0 + + nvgre = ensure_hdr(hdr, f"{underlay_id}_nvgre", nvgre_t) + nvgre.flags = 4 + nvgre.reserved = 0 + nvgre.version = 0 + nvgre.protocol_type = 0x6558 + nvgre.vsid = tunnel_key + nvgre.flow_id = 0 + + push_nvgre_tunnel.__name__ = f"push_nvgre_tunnel_{underlay_id}" + return push_nvgre_tunnel + +if TARGET_DPDK_PNA: + def PUSH_NVGRE_TUNNEL_DEF(underlay_id, overlay_id): + pass + +push_vxlan_tunnel_u0 = PUSH_VXLAN_TUNNEL_DEF("u0", "customer") +push_vxlan_tunnel_u1 = PUSH_VXLAN_TUNNEL_DEF("u1", "u0") +push_nvgre_tunnel_u0 = PUSH_NVGRE_TUNNEL_DEF("u0", "customer") +push_nvgre_tunnel_u1 = PUSH_NVGRE_TUNNEL_DEF("u1", "u0") + +def do_tunnel_encap( underlay_dmac : Annotated[int, EthernetAddress_size], + underlay_smac : Annotated[int, EthernetAddress_size], + underlay_dip : Annotated[int, IPv4Address_size], + underlay_sip : Annotated[int, IPv4Address_size], + dash_encapsulation : dash_encapsulation_t, + tunnel_key: Annotated[int, 24]): + if dash_encapsulation == dash_encapsulation_t.VXLAN: + if meta.tunnel_pointer == 0: + push_vxlan_tunnel_u0(underlay_dmac, underlay_smac, underlay_dip, underlay_sip, tunnel_key) + elif meta.tunnel_pointer == 1: + push_vxlan_tunnel_u1(underlay_dmac, underlay_smac, underlay_dip, underlay_sip, tunnel_key) + + elif dash_encapsulation == dash_encapsulation_t.NVGRE: + if meta.tunnel_pointer == 0: + push_nvgre_tunnel_u0(underlay_dmac, underlay_smac, underlay_dip, underlay_sip, tunnel_key) + elif meta.tunnel_pointer == 1: + push_nvgre_tunnel_u1(underlay_dmac, underlay_smac, underlay_dip, underlay_sip, tunnel_key) + + meta.tunnel_pointer += 1 + +# Tunnel decap can only pop u0 because that's what was parsed. +# If the packet has more than one tunnel on ingress, BM will +# reparse it. +# It is also assumed, that if DASH pushes more than one tunnel, +# they won't need to pop them */ +def do_tunnel_decap(): + hdr.u0_ethernet = None + hdr.u0_ipv4 = None + hdr.u0_ipv6 = None + hdr.u0_nvgre = None + hdr.u0_vxlan = None + hdr.u0_udp = None + + meta.tunnel_pointer = 0 diff --git a/dash-pipeline/py_model/data_plane/dash_underlay.py b/dash-pipeline/py_model/data_plane/dash_underlay.py new file mode 100755 index 000000000..7f6d96167 --- /dev/null +++ b/dash-pipeline/py_model/data_plane/dash_underlay.py @@ -0,0 +1,58 @@ +from py_model.libs.__utils import * +from py_model.libs.__table import * + +# The values in this context have been sourced from the 'saiswitch.h' file and +# have been manually designated to maintain alignment with enum values specified in the SAI commit . +SAI_PACKET_ACTION_DROP = 0 +SAI_PACKET_ACTION_FORWARD = 1 + +class underlay: + # Send packet on different/same port it arrived based on routing + @staticmethod + def set_nhop(next_hop_id : Annotated[int, 9]): + if TARGET == TARGET_PYTHON_V1MODEL: + standard_metadata.egress_spec = next_hop_id + if TARGET == TARGET_DPDK_PNA: + pass + + @staticmethod + def pkt_act(packet_action: Annotated[int, 9], next_hop_id: Annotated[int, 9]): + if packet_action == SAI_PACKET_ACTION_DROP: + # Drops the packet + meta.dropped = True + elif packet_action == SAI_PACKET_ACTION_FORWARD: + # Forwards the packet on different/same port it arrived based on routing + underlay.set_nhop(next_hop_id) + + @staticmethod + def def_act(): + if TARGET == TARGET_PYTHON_V1MODEL: + # if hdr.packet_meta.packet_source == dash_packet_source_t.DPAPP: + if hdr.packet_meta and (hdr.packet_meta.packet_source == dash_packet_source_t.DPAPP): + standard_metadata.egress_spec = 0; # FIXME + else: + standard_metadata.egress_spec = standard_metadata.ingress_port + + if TARGET == TARGET_DPDK_PNA: + pass + + underlay_routing = Table( + key = { + "meta.dst_ip_addr": (LPM, {"name" : "destination"}) + }, + actions = [ + # Processes a packet based on the specified packet action. + # Depending on the packet action, it either drops the packet or forwards it to the specified next-hop. + pkt_act, + (def_act, {"annotations": "@defaultonly"}), + ], + # Send packet on same port it arrived (echo) by default + const_default_action=def_act, + tname=f"{__qualname__}.underlay_routing", + sai_table=SaiTable(name="route", api="route", api_type="underlay",), + ) + + @classmethod + def apply(cls): + py_log("info", "Applying table 'underlay_routing'") + cls.underlay_routing.apply() diff --git a/dash-pipeline/py_model/data_plane/dash_vxlan.py b/dash-pipeline/py_model/data_plane/dash_vxlan.py new file mode 100755 index 000000000..fd45afa02 --- /dev/null +++ b/dash-pipeline/py_model/data_plane/dash_vxlan.py @@ -0,0 +1,82 @@ +from py_model.libs.__utils import * + +def vxlan_encap(underlay_dmac : Annotated[int, EthernetAddress_size], + underlay_smac : Annotated[int, EthernetAddress_size], + underlay_dip : Annotated[int, IPv4Address_size], + underlay_sip : Annotated[int, IPv4Address_size], + overlay_dmac : Annotated[int, EthernetAddress_size], + vni : Annotated[int, 24]): + hdr.inner_ethernet = hdr.ethernet + hdr.inner_ethernet.dst_addr = overlay_dmac + hdr.ethernet = None + + hdr.inner_ipv4 = hdr.ipv4 + hdr.ipv4 = None + hdr.inner_ipv6 = hdr.ipv6 + hdr.ipv6 = None + hdr.inner_tcp = hdr.tcp + hdr.tcp = None + hdr.inner_udp = hdr.udp + hdr.udp = None + + hdr.ethernet = ethernet_t() + hdr.ethernet.dst_addr = underlay_dmac + hdr.ethernet.src_addr = underlay_smac + hdr.ethernet.ether_type = IPV4_ETHTYPE + + hdr.ipv4 = ipv4_t() + hdr.ipv4.version = 4 + hdr.ipv4.ihl = 5 + hdr.ipv4.diffserv = 0 + + hdr.ipv4.total_len = ETHER_HDR_SIZE + IPV4_HDR_SIZE + UDP_HDR_SIZE + VXLAN_HDR_SIZE + if hdr.inner_ipv4: + hdr.ipv4.total_len += hdr.inner_ipv4.total_len + if hdr.inner_ipv6: + hdr.ipv4.total_len += hdr.inner_ipv6.payload_length + IPV6_HDR_SIZE + + hdr.ipv4.identification = 1 + hdr.ipv4.flags = 0 + hdr.ipv4.frag_offset = 0 + hdr.ipv4.ttl = 64 + hdr.ipv4.protocol = UDP_PROTO + hdr.ipv4.dst_addr = underlay_dip + hdr.ipv4.src_addr = underlay_sip + hdr.ipv4.hdr_checksum = 0 + + hdr.udp = udp_t() + hdr.udp.src_port = 0 + hdr.udp.dst_port = UDP_PORT_VXLAN + hdr.udp.length = UDP_HDR_SIZE + VXLAN_HDR_SIZE + ETHER_HDR_SIZE + + if hdr.inner_ipv4: + hdr.udp.length += hdr.inner_ipv4.total_len + if hdr.inner_ipv6: + hdr.udp.length += hdr.inner_ipv6.payload_length + IPV6_HDR_SIZE + + hdr.udp.checksum = 0 + + hdr.vxlan = vxlan_t() + hdr.vxlan.reserved = 0 + hdr.vxlan.reserved_2 = 0 + hdr.vxlan.flags = 0 + hdr.vxlan.vni = vni + +def vxlan_decap(): + hdr.ethernet = hdr.inner_ethernet + hdr.inner_ethernet = None + + hdr.ipv4 = hdr.inner_ipv4 + hdr.inner_ipv4 = None + + hdr.ipv6 = hdr.inner_ipv6 + hdr.inner_ipv6 = None + + hdr.vxlan = None + hdr.udp = None + + hdr.tcp = hdr.inner_tcp + hdr.inner_tcp = None + + hdr.udp = hdr.inner_udp + hdr.inner_udp = None diff --git a/dash-pipeline/py_model/data_plane/defines.py b/dash-pipeline/py_model/data_plane/defines.py new file mode 100644 index 000000000..e456d07ed --- /dev/null +++ b/dash-pipeline/py_model/data_plane/defines.py @@ -0,0 +1,15 @@ + +TABLE_HERO_SCALE = False +TABLE_BABY_HERO_SCALE = False + +if TABLE_HERO_SCALE: + TABLE_CA_TO_PA_SIZE = (8 * 1024 * 1024) + TABLE_ROUTING_SIZE = (4 * 1024 * 1024) + +elif TABLE_BABY_HERO_SCALE: + TABLE_CA_TO_PA_SIZE = (8 * 1024 * 10) + TABLE_ROUTING_SIZE = (4 * 1024 * 10) + +else: #default/minimum size + TABLE_CA_TO_PA_SIZE = 1024 + TABLE_ROUTING_SIZE = 1024 diff --git a/dash-pipeline/py_model/data_plane/routing_actions/routing_action_encap_underlay.py b/dash-pipeline/py_model/data_plane/routing_actions/routing_action_encap_underlay.py new file mode 100644 index 000000000..f1fc4f13c --- /dev/null +++ b/dash-pipeline/py_model/data_plane/routing_actions/routing_action_encap_underlay.py @@ -0,0 +1,75 @@ +from py_model.libs.__utils import * +from py_model.data_plane.dash_tunnel import * + +def push_action_encap_u0(dash_encapsulation : dash_encapsulation_t = dash_encapsulation_t.VXLAN, + vni : Annotated[int, 24] = 0, + underlay_sip : Annotated[int, IPv4Address_size] = 0, + underlay_dip : Annotated[int, IPv4Address_size] = 0, + underlay_smac : Annotated[int, EthernetAddress_size] = 0, + underlay_dmac : Annotated[int, EthernetAddress_size] = 0): + + meta.routing_actions = meta.routing_actions | dash_routing_actions_t.ENCAP_U0 + + meta.u0_encap_data.dash_encapsulation = dash_encapsulation + + meta.u0_encap_data.vni = meta.u0_encap_data.vni if vni == 0 else vni + meta.u0_encap_data.underlay_smac = meta.u0_encap_data.underlay_smac if underlay_smac == 0 else underlay_smac + meta.u0_encap_data.underlay_dmac = meta.u0_encap_data.underlay_dmac if underlay_dmac == 0 else underlay_dmac + meta.u0_encap_data.underlay_sip = meta.u0_encap_data.underlay_sip if underlay_sip == 0 else underlay_sip + meta.u0_encap_data.underlay_dip = meta.u0_encap_data.underlay_dip if underlay_dip == 0 else underlay_dip + +def push_action_encap_u1(dash_encapsulation : dash_encapsulation_t = dash_encapsulation_t.VXLAN, + vni : Annotated[int, 24] = 0, + underlay_sip : Annotated[int, IPv4Address_size] = 0, + underlay_dip : Annotated[int, IPv4Address_size] = 0, + underlay_smac : Annotated[int, EthernetAddress_size] = 0, + underlay_dmac : Annotated[int, EthernetAddress_size] = 0): + + meta.routing_actions = meta.routing_actions | dash_routing_actions_t.ENCAP_U1 + + meta.u0_encap_data.dash_encapsulation = dash_encapsulation + meta.u1_encap_data.vni = meta.u1_encap_data.vni if vni == 0 else vni + + meta.u1_encap_data.underlay_smac = meta.u1_encap_data.underlay_smac if underlay_smac == 0 else underlay_smac + meta.u1_encap_data.underlay_dmac = meta.u1_encap_data.underlay_dmac if underlay_dmac == 0 else underlay_dmac + meta.u1_encap_data.underlay_sip = meta.u1_encap_data.underlay_sip if underlay_sip == 0 else underlay_sip + meta.u1_encap_data.underlay_dip = meta.u1_encap_data.underlay_dip if underlay_dip == 0 else underlay_dip + +class do_action_encap_u0: + @classmethod + def apply(cls): + if (meta.routing_actions & dash_routing_actions_t.ENCAP_U0) == 0: + return + + if meta.u0_encap_data.dash_encapsulation == dash_encapsulation_t.VXLAN: + push_vxlan_tunnel_u0(meta.u0_encap_data.underlay_dmac, + meta.u0_encap_data.underlay_smac, + meta.u0_encap_data.underlay_dip, + meta.u0_encap_data.underlay_sip, + meta.u0_encap_data.vni) + + elif meta.u0_encap_data.dash_encapsulation == dash_encapsulation_t.NVGRE: + push_nvgre_tunnel_u0(meta.u0_encap_data.underlay_dmac, + meta.u0_encap_data.underlay_smac, + meta.u0_encap_data.underlay_dip, + meta.u0_encap_data.underlay_sip, + meta.u0_encap_data.vni) + +class do_action_encap_u1: + @classmethod + def apply(cls): + if (meta.routing_actions & dash_routing_actions_t.ENCAP_U1) == 0: + return + if meta.u1_encap_data.dash_encapsulation == dash_encapsulation_t.VXLAN: + push_vxlan_tunnel_u1(meta.u1_encap_data.underlay_dmac, + meta.u1_encap_data.underlay_smac, + meta.u1_encap_data.underlay_dip, + meta.u1_encap_data.underlay_sip, + meta.u1_encap_data.vni) + + elif meta.u1_encap_data.dash_encapsulation == dash_encapsulation_t.NVGRE: + push_nvgre_tunnel_u1(meta.u1_encap_data.underlay_dmac, + meta.u1_encap_data.underlay_smac, + meta.u1_encap_data.underlay_dip, + meta.u1_encap_data.underlay_sip, + meta.u1_encap_data.vni) diff --git a/dash-pipeline/py_model/data_plane/routing_actions/routing_action_nat46.py b/dash-pipeline/py_model/data_plane/routing_actions/routing_action_nat46.py new file mode 100644 index 000000000..345f15ca6 --- /dev/null +++ b/dash-pipeline/py_model/data_plane/routing_actions/routing_action_nat46.py @@ -0,0 +1,36 @@ +from py_model.libs.__utils import * + +def push_action_nat46(sip: Annotated[int, IPv6Address_size], + sip_mask: Annotated[int, IPv6Address_size], + dip: Annotated[int, IPv6Address_size], + dip_mask: Annotated[int, IPv6Address_size]): + + meta.routing_actions = meta.routing_actions | dash_routing_actions_t.NAT46 + meta.overlay_data.is_ipv6 = 1 + meta.overlay_data.sip = sip + meta.overlay_data.sip_mask = sip_mask + meta.overlay_data.dip = dip + meta.overlay_data.dip_mask = dip_mask + +class do_action_nat46: + @classmethod + def apply(cls): + if (meta.routing_actions & dash_routing_actions_t.NAT46 == 0): + return + + assert(meta.overlay_data.is_ipv6 == 1); + + hdr.u0_ipv6 = ipv6_t() + hdr.u0_ipv6.version = 6 + hdr.u0_ipv6.traffic_class = 0 + hdr.u0_ipv6.flow_label = 0 + hdr.u0_ipv6.payload_length = hdr.u0_ipv4.total_len - IPV4_HDR_SIZE + hdr.u0_ipv6.next_header = hdr.u0_ipv4.protocol + hdr.u0_ipv6.hop_limit = hdr.u0_ipv4.ttl + + # Python support arithmetic on 128-bit operands + hdr.u0_ipv6.dst_addr = (hdr.u0_ipv4.dst_addr & ~meta.overlay_data.dip_mask) | (meta.overlay_data.dip & meta.overlay_data.dip_mask) + hdr.u0_ipv6.src_addr = (hdr.u0_ipv4.src_addr & ~meta.overlay_data.sip_mask) | (meta.overlay_data.sip & meta.overlay_data.sip_mask) + + hdr.u0_ipv4 = None + hdr.u0_ethernet.ether_type = IPV6_ETHTYPE \ No newline at end of file diff --git a/dash-pipeline/py_model/data_plane/routing_actions/routing_action_nat64.py b/dash-pipeline/py_model/data_plane/routing_actions/routing_action_nat64.py new file mode 100644 index 000000000..cb174aa38 --- /dev/null +++ b/dash-pipeline/py_model/data_plane/routing_actions/routing_action_nat64.py @@ -0,0 +1,32 @@ +from py_model.libs.__utils import * +def push_action_nat64(src: Annotated[int, IPv4Address_size], + dst: Annotated[int, IPv4Address_size]): + meta.routing_actions = meta.routing_actions | dash_routing_actions_t.NAT64 + meta.overlay_data.is_ipv6 = 0 + meta.overlay_data.sip = src + meta.overlay_data.dip = dst + +class do_action_nat64: + @classmethod + def apply(cls): + if (meta.routing_actions & dash_routing_actions_t.NAT64 == 0): + return + + assert(meta.overlay_data.is_ipv6 == 0) + + hdr.u0_ipv4 = ipv4_t() + hdr.u0_ipv4.version = 4 + hdr.u0_ipv4.ihl = 5 + hdr.u0_ipv4.diffserv = 0 + hdr.u0_ipv4.total_len = hdr.u0_ipv6.payload_length + IPV4_HDR_SIZE + hdr.u0_ipv4.identification = 1 + hdr.u0_ipv4.flags = 0 + hdr.u0_ipv4.frag_offset = 0 + hdr.u0_ipv4.protocol = hdr.u0_ipv6.next_header + hdr.u0_ipv4.ttl = hdr.u0_ipv6.hop_limit + hdr.u0_ipv4.hdr_checksum = 0 + hdr.u0_ipv4.dst_addr = meta.overlay_data.dip + hdr.u0_ipv4.src_addr = meta.overlay_data.sip + + hdr.u0_ipv6 = None + hdr.u0_ethernet.ether_type = IPV4_ETHTYPE \ No newline at end of file diff --git a/dash-pipeline/py_model/data_plane/routing_actions/routing_action_nat_port.py b/dash-pipeline/py_model/data_plane/routing_actions/routing_action_nat_port.py new file mode 100644 index 000000000..a8a20ad16 --- /dev/null +++ b/dash-pipeline/py_model/data_plane/routing_actions/routing_action_nat_port.py @@ -0,0 +1,31 @@ +from py_model.libs.__utils import * + +def push_action_snat_port(sport: Annotated[int, 16]): + meta.routing_actions = meta.routing_actions | dash_routing_actions_t.SNAT_PORT + meta.overlay_data.sport = sport + +def push_action_dnat_port(dport: Annotated[int, 16]): + meta.routing_actions = meta.routing_actions | dash_routing_actions_t.DNAT_PORT + meta.overlay_data.dport = dport + +class do_action_snat_port: + @classmethod + def apply(cls): + if (meta.routing_actions & dash_routing_actions_t.SNAT_PORT == 0): + return + + if hdr.customer_tcp: + hdr.customer_tcp.src_port = meta.overlay_data.sport + elif hdr.customer_udp: + hdr.customer_udp.src_port = meta.overlay_data.sport + +class do_action_dnat_port: + @classmethod + def apply(cls): + if (meta.routing_actions & dash_routing_actions_t.DNAT_PORT == 0): + return + + if hdr.customer_tcp: + hdr.customer_tcp.dst_port = meta.overlay_data.dport + elif hdr.customer_udp: + hdr.customer_udp.dst_port = meta.overlay_data.dport diff --git a/dash-pipeline/py_model/data_plane/routing_actions/routing_action_set_mac.py b/dash-pipeline/py_model/data_plane/routing_actions/routing_action_set_mac.py new file mode 100644 index 000000000..b0cacb3f0 --- /dev/null +++ b/dash-pipeline/py_model/data_plane/routing_actions/routing_action_set_mac.py @@ -0,0 +1,24 @@ +from py_model.libs.__utils import * +from py_model.data_plane.dash_tunnel import * + +def push_action_set_smac(overlay_smac: Annotated[int, EthernetAddress_size]): + # not used by now + pass + +def push_action_set_dmac(overlay_dmac: Annotated[int, EthernetAddress_size]): + meta.routing_actions = meta.routing_actions | dash_routing_actions_t.SET_DMAC + meta.overlay_data.dmac = overlay_dmac + +class do_action_set_smac: + @classmethod + def apply(cls): + # not used by now + pass + +class do_action_set_dmac: + @classmethod + def apply(cls): + if (meta.routing_actions & dash_routing_actions_t.SET_DMAC == 0): + return + + hdr.customer_ethernet.dst_addr = meta.overlay_data.dmac diff --git a/dash-pipeline/py_model/data_plane/routing_actions/routing_actions.py b/dash-pipeline/py_model/data_plane/routing_actions/routing_actions.py new file mode 100644 index 000000000..b0b9de63e --- /dev/null +++ b/dash-pipeline/py_model/data_plane/routing_actions/routing_actions.py @@ -0,0 +1,5 @@ +from py_model.data_plane.routing_actions.routing_action_encap_underlay import * +from py_model.data_plane.routing_actions.routing_action_set_mac import * +from py_model.data_plane.routing_actions.routing_action_nat46 import * +from py_model.data_plane.routing_actions.routing_action_nat64 import * +from py_model.data_plane.routing_actions.routing_action_nat_port import * diff --git a/dash-pipeline/py_model/data_plane/stages/conntrack_lookup.py b/dash-pipeline/py_model/data_plane/stages/conntrack_lookup.py new file mode 100644 index 000000000..11ff44671 --- /dev/null +++ b/dash-pipeline/py_model/data_plane/stages/conntrack_lookup.py @@ -0,0 +1,399 @@ +from py_model.libs.__utils import * +from py_model.libs.__table import * + +def conntrack_set_meta_from_dash_header(): + # basic metadata + meta.direction = hdr.flow_data.direction + meta.dash_tunnel_id = 0 + meta.routing_actions = hdr.flow_data.actions + meta.meter_class = hdr.flow_data.meter_class + + # encapsulation metadata + if TARGET == TARGET_DPDK_PNA: + meta.u0_encap_data.vni = hdr.flow_u0_encap_data.vni + meta.u0_encap_data.underlay_sip = hdr.flow_u0_encap_data.underlay_sip + meta.u0_encap_data.underlay_dip = hdr.flow_u0_encap_data.underlay_dip + meta.u0_encap_data.underlay_smac = hdr.flow_u0_encap_data.underlay_smac + meta.u0_encap_data.underlay_dmac = hdr.flow_u0_encap_data.underlay_dmac + meta.u0_encap_data.dash_encapsulation = hdr.flow_u0_encap_data.dash_encapsulation + else: + meta.u0_encap_data = hdr.flow_u0_encap_data + + # tunnel metadata + if TARGET == TARGET_DPDK_PNA: + meta.u1_encap_data.vni = hdr.flow_u1_encap_data.vni + meta.u1_encap_data.underlay_sip = hdr.flow_u1_encap_data.underlay_sip + meta.u1_encap_data.underlay_dip = hdr.flow_u1_encap_data.underlay_dip + meta.u1_encap_data.underlay_smac = hdr.flow_u1_encap_data.underlay_smac + meta.u1_encap_data.underlay_dmac = hdr.flow_u1_encap_data.underlay_dmac + meta.u1_encap_data.dash_encapsulation = hdr.flow_u1_encap_data.dash_encapsulation + else: + meta.u1_encap_data = hdr.flow_u1_encap_data + + # overlay rewrite metadata + if TARGET == TARGET_DPDK_PNA: + meta.overlay_data.dmac = hdr.flow_overlay_data.dmac + meta.overlay_data.sip = hdr.flow_overlay_data.sip + meta.overlay_data.dip = hdr.flow_overlay_data.dip + meta.overlay_data.sip_mask = hdr.flow_overlay_data.sip_mask + meta.overlay_data.dip_mask = hdr.flow_overlay_data.dip_mask + meta.overlay_data.is_ipv6 = hdr.flow_overlay_data.is_ipv6 + else: + meta.overlay_data = hdr.flow_overlay_data + +def conntrack_strip_dash_header(): + hdr.dp_ethernet = None + hdr.packet_meta = None + hdr.flow_key = None + hdr.flow_data = None + hdr.flow_overlay_data = None + hdr.flow_u0_encap_data = None + hdr.flow_u1_encap_data = None + +class conntrack_build_dash_header: + @classmethod + def apply(cls, packet_subtype : dash_packet_subtype_t): + py_log("info", "conntrack_build_dash_header") + + length = 0 + + hdr.flow_data = flow_data_t() + hdr.flow_data.is_unidirectional = 0 + hdr.flow_data.version = 0 + hdr.flow_data.direction = meta.direction + hdr.flow_data.actions = meta.routing_actions + hdr.flow_data.meter_class = meta.meter_class + hdr.flow_data.idle_timeout_in_ms = meta.flow_data.idle_timeout_in_ms + length += FLOW_DATA_HDR_SIZE + + if meta.routing_actions & dash_routing_actions_t.ENCAP_U0 != 0: + if TARGET == TARGET_DPDK_PNA: + hdr.flow_u0_encap_data = encap_data_t() + hdr.flow_u0_encap_data.vni = meta.u0_encap_data.vni + hdr.flow_u0_encap_data.underlay_sip = meta.u0_encap_data.underlay_sip + hdr.flow_u0_encap_data.underlay_dip = meta.u0_encap_data.underlay_dip + hdr.flow_u0_encap_data.underlay_smac = meta.u0_encap_data.underlay_smac + hdr.flow_u0_encap_data.underlay_dmac = meta.u0_encap_data.underlay_dmac + hdr.flow_u0_encap_data.dash_encapsulation = meta.u0_encap_data.dash_encapsulation + else: + hdr.flow_u0_encap_data = meta.u0_encap_data + length += ENCAP_DATA_HDR_SIZE + + if meta.routing_actions & dash_routing_actions_t.ENCAP_U1 != 0: + if TARGET == TARGET_DPDK_PNA: + hdr.flow_u1_encap_data = encap_data_t() + hdr.flow_u1_encap_data.vni = meta.u1_encap_data.vni + hdr.flow_u1_encap_data.underlay_sip = meta.u1_encap_data.underlay_sip + hdr.flow_u1_encap_data.underlay_dip = meta.u1_encap_data.underlay_dip + hdr.flow_u1_encap_data.underlay_smac = meta.u1_encap_data.underlay_smac + hdr.flow_u1_encap_data.underlay_dmac = meta.u1_encap_data.underlay_dmac + hdr.flow_u1_encap_data.dash_encapsulation = meta.u1_encap_data.dash_encapsulation + else: + hdr.flow_u1_encap_data = meta.u1_encap_data + length += ENCAP_DATA_HDR_SIZE + + if meta.routing_actions != 0: + if TARGET == TARGET_DPDK_PNA: + hdr.flow_overlay_data = overlay_rewrite_data_t() + hdr.flow_overlay_data.dmac = meta.overlay_data.dmac + hdr.flow_overlay_data.sip = meta.overlay_data.sip + hdr.flow_overlay_data.dip = meta.overlay_data.dip + hdr.flow_overlay_data.sip_mask = meta.overlay_data.sip_mask + hdr.flow_overlay_data.dip_mask = meta.overlay_data.dip_mask + hdr.flow_overlay_data.is_ipv6 = meta.overlay_data.is_ipv6 + else: + hdr.flow_overlay_data = meta.overlay_data + length += OVERLAY_REWRITE_DATA_HDR_SIZE + + length += FLOW_KEY_HDR_SIZE + + hdr.packet_meta = dash_packet_meta_t() + hdr.packet_meta.packet_source = dash_packet_source_t.PIPELINE + hdr.packet_meta.packet_type = dash_packet_type_t.REGULAR + hdr.packet_meta.packet_subtype = packet_subtype + hdr.packet_meta.length = length + PACKET_META_HDR_SIZE + + hdr.dp_ethernet = ethernet_t() + hdr.dp_ethernet.dst_addr = meta.cpu_mac + hdr.dp_ethernet.src_addr = meta.u0_encap_data.underlay_smac + hdr.dp_ethernet.ether_type = DASH_ETHTYPE + +class conntrack_flow_miss_handle(): + @classmethod + def apply(cls): + py_log("info", "conntrack_flow_miss_handle") + # SYN + if (hdr.customer_tcp and hdr.customer_tcp.flags == 0x2) or hdr.customer_udp: + conntrack_build_dash_header.apply(dash_packet_subtype_t.FLOW_CREATE) + meta.to_dpapp = True # trap to dpapp + return + # FIN/RST + elif ((hdr.customer_tcp.flags & 0b000101) != 0) and (hdr.packet_meta.packet_source == dash_packet_source_t.DPAPP): + # Flow should be just deleted by dpapp + conntrack_set_meta_from_dash_header() + return + + # should not reach here + meta.dropped = True # drop it + +class conntrack_flow_created_handle(): + @classmethod + def apply(cls): + py_log("info", "conntrack_flow_created_handle") + if hdr.customer_tcp: + if (hdr.customer_tcp.flags & 0b000101) != 0: # FIN/RST + conntrack_build_dash_header.apply(dash_packet_subtype_t.FLOW_DELETE) + meta.to_dpapp = True + return + # TODO update flow timestamp for aging + +class conntrack_flow_handle(): + @classmethod + def apply(cls): + match meta.flow_sync_state: + case dash_flow_sync_state_t.FLOW_MISS: + conntrack_flow_miss_handle.apply() + case dash_flow_sync_state_t.FLOW_CREATED: + conntrack_flow_created_handle.apply() + + # Drop dash header if not sending to dpapp + if not meta.to_dpapp: + conntrack_strip_dash_header() + + +class conntrack_lookup_stage: + # Flow table: + @staticmethod + def set_flow_table_attr(max_flow_count : Annotated[int, 32], + dash_flow_enabled_key : Annotated[dash_flow_enabled_key_t, {"type" : "sai_dash_flow_enabled_key_t"}], + flow_ttl_in_milliseconds: Annotated[int, 32]): + meta.flow_table.max_flow_count = max_flow_count + meta.flow_table.flow_enabled_key = dash_flow_enabled_key + meta.flow_table.flow_ttl_in_milliseconds = flow_ttl_in_milliseconds + + # Flow entry: + @staticmethod + def set_flow_entry_attr( + # Flow basic metadata + version : Annotated[int, 32], + dash_direction : Annotated[dash_direction_t, {"type" : "sai_dash_direction_t"}], + dash_flow_action : Annotated[dash_flow_action_t, {"type" : "sai_dash_flow_action_t"}], + meter_class : Annotated[int, 32], + is_unidirectional_flow : Annotated[int, 1], + dash_flow_sync_state : Annotated[dash_flow_sync_state_t, {"type" : "sai_dash_flow_sync_state_t"}], + + # Reverse flow key + reverse_flow_eni_mac : Annotated[int, EthernetAddress_size], + reverse_flow_vnet_id : Annotated[int, 16], + reverse_flow_ip_proto : Annotated[int, 8], + reverse_flow_src_ip : Annotated[int, IPv4ORv6Address_size], + reverse_flow_dst_ip : Annotated[int, IPv4ORv6Address_size], + reverse_flow_src_port : Annotated[int, 16], + reverse_flow_dst_port : Annotated[int, 16], + reverse_flow_dst_ip_is_v6 : Annotated[int, 1], + + # Flow encap related attributes + underlay0_vnet_id : Annotated[int, 24], + underlay0_sip : Annotated[int, IPv4Address_size, {"type" : "sai_ip_address_t"}], + underlay0_dip : Annotated[int, IPv4Address_size, {"type" : "sai_ip_address_t"}], + underlay0_smac : Annotated[int, EthernetAddress_size], + underlay0_dmac : Annotated[int, EthernetAddress_size], + underlay0_dash_encapsulation: Annotated[dash_encapsulation_t, {"type" : "sai_dash_encapsulation_t"}], + + underlay1_vnet_id : Annotated[int, 24], + underlay1_sip : Annotated[int, IPv4Address_size, {"type" : "sai_ip_address_t"}], + underlay1_dip : Annotated[int, IPv4Address_size, {"type" : "sai_ip_address_t"}], + underlay1_smac : Annotated[int, EthernetAddress_size], + underlay1_dmac : Annotated[int, EthernetAddress_size], + underlay1_dash_encapsulation: Annotated[dash_encapsulation_t, {"type" : "sai_dash_encapsulation_t"}], + + # Flow overlay rewrite related attributes + dst_mac : Annotated[int, EthernetAddress_size], + sip : Annotated[int, IPv4ORv6Address_size], + dip : Annotated[int, IPv4ORv6Address_size], + sip_mask : Annotated[int, IPv6Address_size], + dip_mask : Annotated[int, IPv6Address_size], + dip_is_v6 : Annotated[int, 1], + + # Extra flow metadata + vendor_metadata : Annotated[int, 16, {"type" : "sai_u8_list_t"}], + flow_data_pb : Annotated[int, 16, {"type" : "sai_u8_list_t"}] + ): + # Set Flow basic metadata + meta.flow_data.version = version + meta.flow_data.direction = dash_direction + meta.flow_data.actions = dash_flow_action + meta.flow_data.meter_class = meter_class + meta.flow_data.is_unidirectional = is_unidirectional_flow + + # Also set basic metadata + meta.flow_sync_state = dash_flow_sync_state + meta.flow_sync_state = dash_flow_sync_state + meta.direction = dash_direction + meta.routing_actions = dash_flow_action + meta.meter_class = meter_class + + # Reverse flow key is not used by now + + # Set encapsulation metadata + meta.u0_encap_data.vni = underlay0_vnet_id + meta.u0_encap_data.underlay_sip = underlay0_sip + meta.u0_encap_data.underlay_dip = underlay0_dip + meta.u0_encap_data.dash_encapsulation = underlay0_dash_encapsulation + meta.u0_encap_data.underlay_smac = underlay0_smac + meta.u0_encap_data.underlay_dmac = underlay0_dmac + + meta.u1_encap_data.vni = underlay1_vnet_id + meta.u1_encap_data.underlay_sip = underlay1_sip + meta.u1_encap_data.underlay_dip = underlay1_dip + meta.u1_encap_data.dash_encapsulation = underlay1_dash_encapsulation + meta.u1_encap_data.underlay_smac = underlay1_smac + meta.u1_encap_data.underlay_dmac = underlay1_dmac + + # Set overlay rewrite metadata + meta.overlay_data.dmac = dst_mac + meta.overlay_data.sip = sip + meta.overlay_data.dip = dip + meta.overlay_data.sip_mask = sip_mask + meta.overlay_data.dip_mask = dip_mask + meta.overlay_data.is_ipv6 = dip_is_v6 + + @staticmethod + def flow_miss(): + meta.flow_sync_state = dash_flow_sync_state_t.FLOW_MISS + + # Flow bulk get session filter: + # For API generation only and has no effect on the dataplane + @staticmethod + def set_flow_entry_bulk_get_session_filter_attr( + dash_flow_entry_bulk_get_session_filter_key : Annotated[dash_flow_entry_bulk_get_session_filter_key_t, + {"type" : "sai_dash_flow_entry_bulk_get_session_filter_key_t"}], + dash_flow_entry_bulk_get_session_op_key : Annotated[dash_flow_entry_bulk_get_session_op_key_t, + {"type" : "sai_dash_flow_entry_bulk_get_session_op_key_t"}], + int_value : Annotated[int, 64], + ip_value : Annotated[int, IPv4ORv6Address_size], + mac_value : Annotated[int, EthernetAddress_size] + ): + pass + + # Flow bulk get session: + # For API generation only and has no effect on the dataplane + @staticmethod + def set_flow_entry_bulk_get_session_attr( + dash_flow_entry_bulk_get_session_mode : Annotated[dash_flow_entry_bulk_get_session_mode_t, + {"type" : "sai_dash_flow_entry_bulk_get_session_mode_t"}], + # Mode and limitation + bulk_get_entry_limitation : Annotated[int, 32], + + # GRPC Session server IP and port + bulk_get_session_server_ip : Annotated[int, IPv4ORv6Address_size], + bulk_get_session_server_port : Annotated[int, 16], + + # Session filters + first_flow_entry_bulk_get_session_filter_id : Annotated[int, 16, {"type" : "sai_object_id_t"}], + second_flow_entry_bulk_get_session_filter_id : Annotated[int, 16, {"type" : "sai_object_id_t"}], + third_flow_entry_bulk_get_session_filter_id : Annotated[int, 16, {"type" : "sai_object_id_t"}], + fourth_flow_entry_bulk_get_session_filter_id : Annotated[int, 16, {"type" : "sai_object_id_t"}], + fifth_flow_entry_bulk_get_session_filter_id : Annotated[int, 16, {"type" : "sai_object_id_t"}], + ): + pass + + flow_table = Table( + key={ + "meta.flow_table.id": EXACT, + }, + actions=[ + set_flow_table_attr, + ], + tname=f"{__qualname__}.flow_table", + sai_table=SaiTable(name="flow_table", api="dash_flow", order=0, isobject="true",), + ) + + flow_entry = Table( + key={ + "hdr.flow_key.eni_mac" : EXACT, + "hdr.flow_key.vnet_id" : EXACT, + "hdr.flow_key.src_ip" : EXACT, + "hdr.flow_key.dst_ip" : EXACT, + "hdr.flow_key.src_port" : EXACT, + "hdr.flow_key.dst_port" : EXACT, + "hdr.flow_key.ip_proto" : EXACT, + "hdr.flow_key.is_ip_v6" : (EXACT, {"name" : "src_ip_is_v6"}) + }, + actions=[ + set_flow_entry_attr, + (flow_miss, {"annotations": "@defaultonly"}), + ], + const_default_action=flow_miss, + tname=f"{__qualname__}.flow_entry", + sai_table=SaiTable(name="flow", api="dash_flow", order=1, enable_bulk_get_api="true", enable_bulk_get_server="true",), + ) + + flow_entry_bulk_get_session_filter = Table( + key={ + "meta.bulk_get_session_filter_id": (EXACT, {"name" : "bulk_get_session_filter_id", "type" : "sai_object_id_t"}) + }, + actions=[ + set_flow_entry_bulk_get_session_filter_attr, + ], + tname=f"{__qualname__}.flow_entry_bulk_get_session_filter", + sai_table=SaiTable(name="flow_entry_bulk_get_session_filter", api="dash_flow", order=2, isobject="true",), + ) + + flow_entry_bulk_get_session = Table( + key={ + "meta.bulk_get_session_id": (EXACT, {"name" : "bulk_get_session_id", "type" : "sai_object_id_t"}) + }, + actions=[ + set_flow_entry_bulk_get_session_attr, + ], + tname=f"{__qualname__}.flow_entry_bulk_get_session", + sai_table=SaiTable(name="flow_entry_bulk_get_session", api="dash_flow", order=3, isobject="true",), + ) + + @staticmethod + def set_flow_key(flow_enabled_key: Annotated[int, 16]): + hdr.flow_key = flow_key_t() + hdr.flow_key.is_ip_v6 = meta.is_overlay_ip_v6 + + if flow_enabled_key & dash_flow_enabled_key_t.ENI_MAC != 0: + hdr.flow_key.eni_mac = meta.eni_addr + if flow_enabled_key & dash_flow_enabled_key_t.VNI != 0: + hdr.flow_key.vnet_id = meta.vnet_id + if flow_enabled_key & dash_flow_enabled_key_t.PROTOCOL != 0: + hdr.flow_key.ip_proto = meta.ip_protocol + if flow_enabled_key & dash_flow_enabled_key_t.SRC_IP != 0: + hdr.flow_key.src_ip = meta.src_ip_addr + if flow_enabled_key & dash_flow_enabled_key_t.DST_IP != 0: + hdr.flow_key.dst_ip = meta.dst_ip_addr + if flow_enabled_key & dash_flow_enabled_key_t.SRC_PORT != 0: + hdr.flow_key.src_port = meta.src_l4_port + if flow_enabled_key & dash_flow_enabled_key_t.DST_PORT != 0: + hdr.flow_key.dst_port = meta.dst_l4_port + + @classmethod + def apply(cls): + py_log("info", "conntrack_lookup_stage") + if hdr.flow_key is None: + py_log("info", "Applying table 'flow_table' ") + if cls.flow_table.apply()["hit"]: + meta.flow_data.idle_timeout_in_ms = meta.flow_table.flow_ttl_in_milliseconds + flow_enabled_key = meta.flow_table.flow_enabled_key + else: + # Enable all keys by default + flow_enabled_key = (dash_flow_enabled_key_t.ENI_MAC | + dash_flow_enabled_key_t.VNI | + dash_flow_enabled_key_t.PROTOCOL | + dash_flow_enabled_key_t.SRC_IP | + dash_flow_enabled_key_t.DST_IP | + dash_flow_enabled_key_t.SRC_PORT | + dash_flow_enabled_key_t.DST_PORT) + + cls.set_flow_key(flow_enabled_key) + + py_log("info", "Applying table 'flow_entry' ") + cls.flow_entry.apply() + py_log("info", "Applying table 'flow_entry_bulk_get_session_filter'") + cls.flow_entry_bulk_get_session_filter.apply() + py_log("info", "Applying table 'flow_entry_bulk_get_session'") + cls.flow_entry_bulk_get_session.apply() \ No newline at end of file diff --git a/dash-pipeline/py_model/data_plane/stages/direction_lookup.py b/dash-pipeline/py_model/data_plane/stages/direction_lookup.py new file mode 100644 index 000000000..9bf8a7163 --- /dev/null +++ b/dash-pipeline/py_model/data_plane/stages/direction_lookup.py @@ -0,0 +1,47 @@ +from py_model.libs.__utils import * +from py_model.libs.__table import * + +class direction_lookup_stage: + @staticmethod + def set_eni_mac_type(eni_mac_type: dash_eni_mac_type_t, + eni_mac_override_type: dash_eni_mac_override_type_t): + meta.eni_mac_type = eni_mac_type + + if eni_mac_override_type == dash_eni_mac_override_type_t.SRC_MAC: + meta.eni_mac_type = dash_eni_mac_type_t.SRC_MAC + elif eni_mac_override_type == dash_eni_mac_override_type_t.DST_MAC: + meta.eni_mac_type = dash_eni_mac_type_t.DST_MAC + + @staticmethod + def set_outbound_direction(dash_eni_mac_override_type: Annotated[dash_eni_mac_override_type_t, + {"type" : "sai_dash_eni_mac_override_type_t"}]): + meta.direction = dash_direction_t.OUTBOUND + direction_lookup_stage.set_eni_mac_type(dash_eni_mac_type_t.SRC_MAC, + dash_eni_mac_override_type) + + @staticmethod + def set_inbound_direction( dash_eni_mac_override_type: Annotated[dash_eni_mac_override_type_t, + {"type" : "sai_dash_eni_mac_override_type_t"}] + = dash_eni_mac_override_type_t.NONE): + meta.direction = dash_direction_t.INBOUND + direction_lookup_stage.set_eni_mac_type(dash_eni_mac_type_t.DST_MAC, + dash_eni_mac_override_type) + + direction_lookup = Table( + key={ + "meta.rx_encap.vni": (EXACT, {"name" : "VNI"}) + }, + actions=[ + set_outbound_direction, + set_inbound_direction, + ], + const_default_action=set_inbound_direction, + tname=f"{__qualname__}.direction_lookup", + sai_table=SaiTable(name="direction_lookup", api="dash_direction_lookup",), + ) + + @classmethod + def apply(cls): + # If Outer VNI matches with a reserved VNI, then the direction is Outbound + py_log("info", "Applying table 'direction_lookup'") + cls.direction_lookup.apply() \ No newline at end of file diff --git a/dash-pipeline/py_model/data_plane/stages/eni_lookup.py b/dash-pipeline/py_model/data_plane/stages/eni_lookup.py new file mode 100644 index 000000000..c5103f71f --- /dev/null +++ b/dash-pipeline/py_model/data_plane/stages/eni_lookup.py @@ -0,0 +1,37 @@ +from py_model.libs.__utils import * +from py_model.libs.__table import * +from py_model.data_plane.dash_counters import * + + +class eni_lookup_stage: + @staticmethod + def set_eni(eni_id: Annotated[int, 16, {"type" : "sai_object_id_t"}]): + meta.eni_id = eni_id + + eni_ether_address_map = Table( + key={ + "meta.eni_addr": (EXACT, {"name" : "address", "type" : "sai_mac_t"}) + }, + actions=[ + set_eni, + (deny, {"annotations": "@defaultonly"}) + ], + const_default_action=deny, + tname=f"{__qualname__}.eni_ether_address_map", + sai_table=SaiTable(name="eni_ether_address_map", api="dash_eni", order=0,), + ) + + @classmethod + def apply(cls): + # Put VM's MAC in direction agnostic metadata field + if meta.eni_mac_type == dash_eni_mac_type_t.SRC_MAC: + meta.eni_addr = hdr.customer_ethernet.src_addr + else: + meta.eni_addr = hdr.customer_ethernet.dst_addr + + py_log("info", "Applying table 'eni_ether_address_map' ") + if not cls.eni_ether_address_map.apply()["hit"]: + UPDATE_COUNTER("eni_miss_drop", 0) + if meta.is_fast_path_icmp_flow_redirection_packet: + UPDATE_COUNTER("port_lb_fast_path_eni_miss_drop", 0) + pass diff --git a/dash-pipeline/py_model/data_plane/stages/ha.py b/dash-pipeline/py_model/data_plane/stages/ha.py new file mode 100644 index 000000000..bc970444d --- /dev/null +++ b/dash-pipeline/py_model/data_plane/stages/ha.py @@ -0,0 +1,105 @@ +from py_model.libs.__utils import * +from py_model.libs.__table import * +from py_model.data_plane.dash_counters import * + +class ha_stage: + # HA scope + @staticmethod + def set_ha_scope_attr( + ha_set_id : Annotated[int, 16, {"type" : "sai_object_id_t"}], + dash_ha_role : Annotated[dash_ha_role_t, {"type" : "sai_dash_ha_role_t"}], + flow_version : Annotated[int, 32, {"isreadonly" : "true"}], + flow_reconcile_requested : Annotated[int, 1], + flow_reconcile_needed : Annotated[int, 1, {"isreadonly" : "true"}], + vip_v4 : Annotated[int, IPv4Address_size, {"type" : "sai_ip_address_t"}], + vip_v6 : Annotated[int, IPv6Address_size], + admin_state : Annotated[int, 1], + activate_role : Annotated[int, 1], + dash_ha_state : Annotated[dash_ha_state_t, {"isreadonly" : "true", "type" : "sai_dash_ha_state_t"}] + ): + meta.ha.ha_set_id = ha_set_id + meta.ha.ha_role = dash_ha_role + + # HA set + # Data plane probe related counters + DEFINE_COUNTER("dp_probe_req_rx", MAX_HA_SET, "dp_probe_req_rx", attr_type="stats", action_names="set_ha_set_attr") + DEFINE_COUNTER("dp_probe_req_tx", MAX_HA_SET, "dp_probe_req_tx", attr_type="stats", action_names="set_ha_set_attr") + DEFINE_COUNTER("dp_probe_ack_rx", MAX_HA_SET, "dp_probe_ack_rx", attr_type="stats", action_names="set_ha_set_attr") + DEFINE_COUNTER("dp_probe_ack_tx", MAX_HA_SET, "dp_probe_ack_tx", attr_type="stats", action_names="set_ha_set_attr") + DEFINE_HIT_COUNTER("dp_probe_failed", MAX_HA_SET, "dp_probe_failed", attr_type="stats", action_names="set_ha_set_attr") + + # Control plane data channel related counters + DEFINE_HIT_COUNTER("cp_data_channel_connect_attempted", MAX_HA_SET, "cp_data_channel_connect_attempted", attr_type="stats", action_names="set_ha_set_attr") + DEFINE_HIT_COUNTER("cp_data_channel_connect_received", MAX_HA_SET, "cp_data_channel_connect_received", attr_type="stats", action_names="set_ha_set_attr") + DEFINE_HIT_COUNTER("cp_data_channel_connect_succeeded", MAX_HA_SET, "cp_data_channel_connect_succeeded", attr_type="stats", action_names="set_ha_set_attr") + DEFINE_HIT_COUNTER("cp_data_channel_connect_failed", MAX_HA_SET, "cp_data_channel_connect_failed", attr_type="stats", action_names="set_ha_set_attr") + DEFINE_HIT_COUNTER("cp_data_channel_connect_rejected", MAX_HA_SET, "cp_data_channel_connect_rejected", attr_type="stats", action_names="set_ha_set_attr") + DEFINE_HIT_COUNTER("cp_data_channel_timeout_count", MAX_HA_SET, "cp_data_channel_timeout_count", attr_type="stats", action_names="set_ha_set_attr") + + # Bulk sync related counters + DEFINE_HIT_COUNTER("bulk_sync_message_received", MAX_HA_SET, "bulk_sync_message_received", attr_type="stats", action_names="set_ha_set_attr") + DEFINE_HIT_COUNTER("bulk_sync_message_sent", MAX_HA_SET, "bulk_sync_message_sent", attr_type="stats", action_names="set_ha_set_attr") + DEFINE_HIT_COUNTER("bulk_sync_message_send_failed", MAX_HA_SET, "bulk_sync_message_send_failed", attr_type="stats", action_names="set_ha_set_attr") + DEFINE_HIT_COUNTER("bulk_sync_flow_received", MAX_HA_SET, "bulk_sync_flow_received", attr_type="stats", action_names="set_ha_set_attr") + DEFINE_HIT_COUNTER("bulk_sync_flow_sent", MAX_HA_SET, "bulk_sync_flow_sent", attr_type="stats", action_names="set_ha_set_attr") + + @staticmethod + def set_ha_set_attr( + local_ip_is_v6 : Annotated[int, 1], + local_ip : Annotated[int, IPv4ORv6Address_size, {"type" : "sai_ip_address_t"}], + peer_ip_is_v6 : Annotated[int, 1], + peer_ip : Annotated[int, IPv4ORv6Address_size, {"type" : "sai_ip_address_t"}], + cp_data_channel_port : Annotated[int, 16], + dp_channel_dst_port : Annotated[int, 16], + dp_channel_min_src_port : Annotated[int, 16], + dp_channel_max_src_port : Annotated[int, 16], + dp_channel_probe_interval_ms : Annotated[int, 32], + dp_channel_probe_fail_threshold : Annotated[int, 32], + dp_channel_is_alive : Annotated[int, 1, {"isreadonly" : "true"}], + dpu_driven_ha_switchover_wait_time_ms : Annotated[int, 32], + ): + meta.ha.peer_ip_is_v6 = peer_ip_is_v6 + meta.ha.peer_ip = peer_ip + + meta.ha.dp_channel_dst_port = dp_channel_dst_port + meta.ha.dp_channel_src_port_min = dp_channel_min_src_port + meta.ha.dp_channel_src_port_max = dp_channel_max_src_port + + ha_scope = Table( + key={ + "meta.ha.ha_scope_id": EXACT, + }, + actions=[ + set_ha_scope_attr, + ], + tname=f"{__qualname__}.ha_scope", + sai_table=SaiTable(api="dash_ha", order=1, isobject="true",), + ) + + ha_set = Table( + key={ + "meta.ha.ha_set_id": (EXACT, {"type" : "sai_object_id_t"}) + }, + actions=[ + set_ha_set_attr, + ], + tname=f"{__qualname__}.ha_set", + sai_table=SaiTable(api="dash_ha", order=0, isobject="true",), + ) + + @classmethod + def apply(cls): + # If HA scope id is not set, then HA is not enabled. + if meta.ha.ha_scope_id == 0: + return + py_log("info", "Applying table 'ha_scope'") + cls.ha_scope.apply() + + # If HA set id is not set, then HA is not enabled. + if meta.ha.ha_set_id == 0: + return + + py_log("info", "Applying table 'ha_set'") + cls.ha_set.apply() + + # TODO: HA state machine handling. diff --git a/dash-pipeline/py_model/data_plane/stages/inbound_routing.py b/dash-pipeline/py_model/data_plane/stages/inbound_routing.py new file mode 100644 index 000000000..e1c6a6b47 --- /dev/null +++ b/dash-pipeline/py_model/data_plane/stages/inbound_routing.py @@ -0,0 +1,76 @@ +from py_model.libs.__table import * +from py_model.libs.__counters import * +from py_model.data_plane.dash_headers import * +from py_model.data_plane.dash_metadata import * +from py_model.data_plane.dash_routing_types import * + +class inbound_routing_stage: + @staticmethod + def permit(): + pass + + @staticmethod + def vxlan_decap(): + pass + + @staticmethod + def vxlan_decap_pa_validate(): + pass + + @staticmethod + def tunnel_decap(meter_class_or : Annotated[int, 32], + meter_class_and: Annotated[int, 32, {"default_value" : "4294967295"}]): + set_meter_attrs(meter_class_or, meter_class_and) + + @staticmethod + def tunnel_decap_pa_validate(src_vnet_id : Annotated[int, 16, {"type" : "sai_object_id_t"}], + meter_class_or : Annotated[int, 32], + meter_class_and: Annotated[int, 32, {"default_value" : "4294967295"}]): + meta.vnet_id = src_vnet_id + set_meter_attrs(meter_class_or, meter_class_and) + + + pa_validation = Table( + key={ + "meta.vnet_id" : (EXACT, {"type" : "sai_object_id_t"}), + "meta.rx_encap.underlay_sip": (EXACT, {"name": "sip", "type": "sai_ip_address_t"}), + }, + actions=[ + permit, + (drop, {"annotations": "@defaultonly"}), + ], + const_default_action=drop, + tname=f"{__qualname__}.pa_validation", + sai_table=SaiTable(name="pa_validation", api="dash_pa_validation",), + ) + + inbound_routing = Table( + key={ + "meta.eni_id" : (EXACT, {"type": "sai_object_id_t"}), + "meta.rx_encap.vni" : (EXACT, {"name": "VNI"}), + "meta.rx_encap.underlay_sip": (TERNARY, {"name": "sip", "type": "sai_ip_address_t"}), + }, + actions=[ + tunnel_decap, + tunnel_decap_pa_validate, + vxlan_decap, # Deprecated, but cannot be removed until SWSS is updated. + vxlan_decap_pa_validate, # Deprecated, but cannot be removed until SWSS is updated. + (drop, {"annotations": "@defaultonly"}), + ], + const_default_action=drop, + tname=f"{__qualname__}.inbound_routing", + sai_table=SaiTable(name="inbound_routing", api="dash_inbound_routing",), + ) + + @classmethod + def apply(cls): + if meta.target_stage != dash_pipeline_stage_t.INBOUND_ROUTING: + return + + py_log("info", "Applying table 'inbound_routing'") + result = cls.inbound_routing.apply()["action_run"] + if result == cls.tunnel_decap_pa_validate: + py_log("info", "Applying table 'pa_validation'") + cls.pa_validation.apply() + elif result == drop: + UPDATE_ENI_COUNTER("inbound_routing_entry_miss_drop") diff --git a/dash-pipeline/py_model/data_plane/stages/metering_update.py b/dash-pipeline/py_model/data_plane/stages/metering_update.py new file mode 100644 index 000000000..87da7cd8e --- /dev/null +++ b/dash-pipeline/py_model/data_plane/stages/metering_update.py @@ -0,0 +1,106 @@ +from py_model.libs.__utils import * +from py_model.libs.__table import * +from py_model.data_plane.dash_counters import * + +class metering_update_stage: + # Validate IP address family against overlay IP version + @staticmethod + def check_ip_addr_family(ip_addr_family: Annotated[int, 32, {"type" : "sai_ip_addr_family_t", "isresourcetype" : "true"}]): + if ip_addr_family == 0: # SAI_IP_ADDR_FAMILY_IPV4 + if meta.is_overlay_ip_v6 == 1: + meta.dropped = True + else: + if meta.is_overlay_ip_v6 == 0: + meta.dropped = True + + # Assign meter class from policy + @staticmethod + def set_policy_meter_class(meter_class: Annotated[int, 32]): + meta.meter_class = meter_class + + # MAX_METER_BUCKET = MAX_ENI(64) * NUM_BUCKETS_PER_ENI(4096) + MAX_METER_BUCKETS = 262144 + DEFINE_BYTE_COUNTER("meter_bucket_outbound", MAX_METER_BUCKETS, name="outbound", action_names="update_meter_bucket", attr_type="stats") + DEFINE_BYTE_COUNTER("meter_bucket_inbound", MAX_METER_BUCKETS, name="inbound", action_names="update_meter_bucket", attr_type="stats") + + @staticmethod + def update_meter_bucket(): + pass + + meter_policy = Table( + key = { + "meta.meter_context.meter_policy_id": EXACT + }, + actions = [ + check_ip_addr_family + ], + tname=f"{__qualname__}.meter_policy", + sai_table = SaiTable(name="meter_policy", api="dash_meter", order=1, isobject="true") + ) + + meter_rule = Table( + key = { + "meta.meter_context.meter_policy_id": (EXACT, {"type" : "sai_object_id_t", "isresourcetype" : "true", "objects" : "METER_POLICY"}), + "meta.meter_context.meter_policy_lookup_ip": (TERNARY, {"name" : "dip", "type" : "sai_ip_address_t"}) + }, + actions = [ + set_policy_meter_class, + (NoAction, {"annotations": "@defaultonly"}) + ], + const_default_action = NoAction, + tname=f"{__qualname__}.meter_rule", + sai_table = SaiTable(name="meter_rule", api="dash_meter", order=2, isobject="true") + ) + + meter_bucket = Table( + key = { + "meta.eni_id" : (EXACT, {"type" : "sai_object_id_t"}), + "meta.meter_class" : EXACT + }, + actions = [ + update_meter_bucket, + (NoAction, {"annotations": "@defaultonly"}) + ], + const_default_action = NoAction, + tname=f"{__qualname__}.meter_bucket", + sai_table = SaiTable(name="meter_bucket", api="dash_meter", order=0) + ) + + DEFINE_TABLE_COUNTER("eni_counter") + + eni_meter = Table( + key = { + "meta.eni_id" : (EXACT, {"type" : "sai_object_id_t"}), + "meta.direction": EXACT, + "meta.dropped" : EXACT + }, + actions = [ + NoAction + ], + tname=f"{__qualname__}.eni_meter", + sai_table = SaiTable(ignored = "true") + ) + ATTACH_TABLE_COUNTER("eni_counter", "eni_meter") + + @classmethod + def apply(cls): + meta.meter_class = meta.meter_context.meter_class_or & meta.meter_context.meter_class_and + + # If the meter class is 0 from the SDN policies, we go through the metering policy. + if meta.meter_class == 0: + py_log("info", "Applying table 'meter_policy'") + cls.meter_policy.apply() + py_log("info", "Applying table 'meter_rule'") + cls.meter_rule.apply() + + py_log("info", "Applying table 'meter_bucket'") + cls.meter_bucket.apply() + + if meta.meter_class != 0: + if meta.direction == dash_direction_t.OUTBOUND: + UPDATE_COUNTER("meter_bucket_outbound", meta.meter_class) + elif meta.direction == dash_direction_t.INBOUND: + UPDATE_COUNTER("meter_bucket_inbound", meta.meter_class) + + py_log("info", "Applying table 'eni_meter'") + cls.eni_meter.apply() diff --git a/dash-pipeline/py_model/data_plane/stages/outbound_mapping.py b/dash-pipeline/py_model/data_plane/stages/outbound_mapping.py new file mode 100644 index 000000000..132bc6c3a --- /dev/null +++ b/dash-pipeline/py_model/data_plane/stages/outbound_mapping.py @@ -0,0 +1,55 @@ +from py_model.libs.__table import * +from py_model.libs.__counters import * +from py_model.data_plane.dash_headers import * +from py_model.data_plane.dash_metadata import * +from py_model.data_plane.dash_routing_types import * +from py_model.data_plane.defines import * + + +class outbound_mapping_stage: + @staticmethod + def set_vnet_attrs(vni: Annotated[int, 24]): + meta.u0_encap_data.vni = vni + + DEFINE_TABLE_COUNTER("ca_to_pa_counter") + ca_to_pa = Table( + key={ + # Flow for express route + "meta.dst_vnet_id" : (EXACT, {"type": "sai_object_id_t"}), + "meta.is_lkup_dst_ip_v6": (EXACT, {"name": "dip_is_v6"}), + "meta.lkup_dst_ip_addr" : (EXACT, {"name": "dip"}), + }, + actions=[ + set_tunnel_mapping, + set_private_link_mapping, + (drop, {"annotations": "@defaultonly"}), + ], + const_default_action=drop, + tname=f"{__qualname__}.ca_to_pa", + sai_table=SaiTable(name="outbound_ca_to_pa", api="dash_outbound_ca_to_pa",), + ) + ATTACH_TABLE_COUNTER("ca_to_pa_counter", "ca_to_pa") + + vnet = Table( + key={ + "meta.vnet_id": (EXACT, {"type": "sai_object_id_t"}), + }, + actions=[ + set_vnet_attrs, + ], + tname=f"{__qualname__}.vnet", + sai_table=SaiTable(name="vnet", api="dash_vnet", isobject="true",), + ) + + @classmethod + def apply(cls): + if meta.target_stage != dash_pipeline_stage_t.OUTBOUND_MAPPING: + return + + py_log("info", "Applying table 'ca_to_pa'") + action = cls.ca_to_pa.apply()["action_run"] + if action == set_tunnel_mapping: + py_log("info", "Applying table 'vnet'") + cls.vnet.apply() + elif action == drop: + UPDATE_ENI_COUNTER("outbound_ca_pa_entry_miss_drop") diff --git a/dash-pipeline/py_model/data_plane/stages/outbound_port_map.py b/dash-pipeline/py_model/data_plane/stages/outbound_port_map.py new file mode 100644 index 000000000..784ad5d3e --- /dev/null +++ b/dash-pipeline/py_model/data_plane/stages/outbound_port_map.py @@ -0,0 +1,85 @@ +from py_model.libs.__utils import * +from py_model.libs.__table import * +from py_model.libs.__counters import * +from py_model.data_plane.dash_routing_types import * +from py_model.data_plane.routing_actions.routing_action_nat_port import * + + +class outbound_port_map_stage: + @staticmethod + def set_port_map_attr(self): + pass + + @staticmethod + def skip_mapping(self): + pass + + @staticmethod + def map_to_private_link_service(backend_ip : Annotated[int, IPv4Address_size, {"type" : "sai_ip_address_t"}], + match_port_base : Annotated[int, 16], + backend_port_base : Annotated[int, 16]): + assert (meta.routing_actions & dash_routing_actions_t.NAT46) != 0 + + # For private link, once the service is redirected, we need to update 2 things: + # 1. The underlay IP to point to the new backend IP in order to route the packet there. + # 2. The overlay IP and port to the new backend ip and port, so that the overlay packet will + # look like being sent from the new backend IP. + + # Update underlay IP to backend + meta.u0_encap_data.underlay_dip = backend_ip + + # Python support arithmetic on 128-bit operands + # Update overlay IP + meta.overlay_data.dip = (meta.overlay_data.dip & meta.overlay_data.dip_mask) | int(backend_ip) + + # Update overlay port with DNAT + push_action_dnat_port(meta.dst_l4_port - match_port_base + backend_port_base) + + + DEFINE_TABLE_COUNTER("outbound_port_map_counter") + outbound_port_map = Table( + key={ + "meta.port_map_ctx.map_id": (EXACT, {"name": "outbound_port_map_id", + "type": "sai_object_id_t"}) + }, + actions=[ + set_port_map_attr, + (drop, {"annotations": "@defaultonly"}), + ], + const_default_action=drop, + tname=f"{__qualname__}.outbound_port_map", + sai_table=SaiTable(api="dash_outbound_port_map", order=0, isobject="true",), + ) + ATTACH_TABLE_COUNTER("outbound_port_map_counter", "outbound_port_map") + + + DEFINE_TABLE_COUNTER("outbound_port_map_port_range_counter") + outbound_port_map_port_range = Table( + key={ + "meta.port_map_ctx.map_id" : (EXACT, {"name": "outbound_port_map_id", "type": "sai_object_id_t"}), + "meta.dst_l4_port" : (RANGE, {"name": "dst_port_range"}) + }, + actions=[ + skip_mapping, + map_to_private_link_service, + (drop, {"annotations": "@defaultonly"}), + ], + const_default_action=drop, + tname=f"{__qualname__}.outbound_port_map_port_range", + sai_table=SaiTable(api="dash_outbound_port_map", order=1, single_match_priority="true",), + ) + ATTACH_TABLE_COUNTER("outbound_port_map_port_range_counter", "outbound_port_map_port_range") + @classmethod + def apply(cls): + py_log("info", "outbound_routing_stage") + if meta.port_map_ctx.map_id == 0: + return + + py_log("info", "Applying table 'outbound_port_map'") + if not cls.outbound_port_map.apply().hit: + UPDATE_ENI_COUNTER("outbound_port_map_miss_drop") + return + + py_log("info", "Applying table 'outbound_port_map_port_range'") + if not cls.outbound_port_map_port_range.apply().hit: + UPDATE_ENI_COUNTER("outbound_port_map_port_range_entry_miss_drop") diff --git a/dash-pipeline/py_model/data_plane/stages/outbound_pre_routing_action_apply.py b/dash-pipeline/py_model/data_plane/stages/outbound_pre_routing_action_apply.py new file mode 100644 index 000000000..f354241b0 --- /dev/null +++ b/dash-pipeline/py_model/data_plane/stages/outbound_pre_routing_action_apply.py @@ -0,0 +1,20 @@ +from py_model.libs.__utils import * +from py_model.libs.__obj_classes import * +from py_model.data_plane.stages.tunnel_stage import * +from py_model.data_plane.stages.outbound_port_map import * + + +class outbound_pre_routing_action_apply_stage: + @classmethod + def apply(cls): + py_log("info", "outbound_pre_routing_action_apply_stage") + # Outbound pre-routing action apply stage is added here for certain pre-processing + if meta.target_stage != dash_pipeline_stage_t.OUTBOUND_PRE_ROUTING_ACTION_APPLY: + return + + outbound_port_map_stage.apply() + + tunnel_stage.apply() + + # Once done, move to routing action apply stage + meta.target_stage = dash_pipeline_stage_t.ROUTING_ACTION_APPLY diff --git a/dash-pipeline/py_model/data_plane/stages/outbound_routing.py b/dash-pipeline/py_model/data_plane/stages/outbound_routing.py new file mode 100644 index 000000000..b147cbee4 --- /dev/null +++ b/dash-pipeline/py_model/data_plane/stages/outbound_routing.py @@ -0,0 +1,73 @@ +from py_model.libs.__table import * +from py_model.libs.__counters import * +from py_model.data_plane.dash_headers import * +from py_model.data_plane.dash_metadata import * +from py_model.data_plane.dash_routing_types import * +from py_model.data_plane.defines import * + + +class outbound_routing_stage: + @staticmethod + def set_outbound_routing_group_attr(disabled: Annotated[int, 1]): + meta.eni_data.outbound_routing_group_data.disabled = bool(disabled) + + outbound_routing_group = Table( + key={ + "meta.eni_data.outbound_routing_group_data.outbound_routing_group_id": ( + EXACT, {"type": "sai_object_id_t"}, + ), + }, + actions=[ + set_outbound_routing_group_attr, + (drop, {"annotations": "@defaultonly"}), + ], + tname=f"{__qualname__}.outbound_routing_group", + sai_table=SaiTable(name="outbound_routing_group", api="dash_outbound_routing", order=1, isobject="true",), + ) + + DEFINE_TABLE_COUNTER("routing_counter") + routing = Table( + key={ + "meta.eni_data.outbound_routing_group_data.outbound_routing_group_id": ( + EXACT, {"type": "sai_object_id_t"}, + ), + "meta.is_overlay_ip_v6": ( + EXACT, {"name": "destination_is_v6"}, + ), + "meta.dst_ip_addr": ( + LPM, {"name": "destination"}, + ), + }, + actions=[ + route_vnet, # for express route - ecmp of overlay + route_vnet_direct, + route_direct, + route_service_tunnel, + drop, + ], + const_default_action=drop, + tname=f"{__qualname__}.routing", + sai_table=SaiTable(name="outbound_routing", api="dash_outbound_routing",), + ) + ATTACH_TABLE_COUNTER("routing_counter", "routing") + + @classmethod + def apply(cls): + py_log("info", "outbound_routing_stage") + if meta.target_stage != dash_pipeline_stage_t.OUTBOUND_ROUTING: + return + + py_log("info", "Applying table 'outbound_routing_group'") + if not cls.outbound_routing_group.apply().get("hit", False): + UPDATE_ENI_COUNTER("outbound_routing_group_miss_drop") + drop() + return + + if meta.eni_data.outbound_routing_group_data.disabled: + UPDATE_ENI_COUNTER("outbound_routing_group_disabled_drop") + drop() + return + + py_log("info", "Applying table 'routing'") + if not cls.routing.apply()["hit"]: + UPDATE_ENI_COUNTER("outbound_routing_entry_miss_drop") diff --git a/dash-pipeline/py_model/data_plane/stages/pre_pipeline.py b/dash-pipeline/py_model/data_plane/stages/pre_pipeline.py new file mode 100644 index 000000000..1dee85ddb --- /dev/null +++ b/dash-pipeline/py_model/data_plane/stages/pre_pipeline.py @@ -0,0 +1,126 @@ +from py_model.libs.__utils import * +from py_model.libs.__table import * +from py_model.data_plane.dash_counters import * +from py_model.data_plane.dash_routing_types import * + + +class pre_pipeline_stage: + @staticmethod + def accept(): + pass + + @staticmethod + def set_appliance(local_region_id: Annotated[int, 8, {"create_only":"true"}]): + meta.local_region_id = local_region_id + + @staticmethod + def set_internal_config(neighbor_mac: Annotated[int, EthernetAddress_size], + mac : Annotated[int, EthernetAddress_size], + cpu_mac : Annotated[int, EthernetAddress_size], + flow_enabled: Annotated[int, 1]): + meta.u0_encap_data.underlay_dmac = neighbor_mac + meta.u0_encap_data.underlay_smac = mac + meta.cpu_mac = cpu_mac + meta.flow_enabled = flow_enabled + + appliance = Table( + key = { + "meta.appliance_id" : (EXACT, {"type": "sai_object_id_t"}) + }, + actions=[ + set_appliance, + (accept, {"annotations": "@defaultonly"}) + ], + const_default_action=accept, + tname=f"{__qualname__}.appliance", + sai_table=SaiTable(name="dash_appliance", api="dash_appliance", order=0, isobject="true",), + ) + + internal_config = Table( + key = { + "meta.appliance_id" : TERNARY + }, + actions = [ + set_internal_config + ], + tname=f"{__qualname__}.internal_config", + sai_table=SaiTable(ignored="true",), + ) + + vip = Table( + key = { + "meta.rx_encap.underlay_dip": (EXACT, {"name": "VIP", "type": "sai_ip_address_t"}) + }, + actions=[ + accept, + (drop, {"annotations": "@defaultonly"}) + ], + const_default_action=drop, + tname=f"{__qualname__}.vip", + sai_table=SaiTable(name="vip", api="dash_vip",), + ) + + @classmethod + def apply(cls): + # Normalize the outer headers. + # This helps us handling multiple encaps and different type of encaps in the future and simplify the later packet processing. + meta.rx_encap.underlay_smac = hdr.u0_ethernet.src_addr + meta.rx_encap.underlay_dmac = hdr.u0_ethernet.dst_addr + + if hdr.u0_ipv4 is not None: + meta.rx_encap.underlay_sip = hdr.u0_ipv4.src_addr + meta.rx_encap.underlay_dip = hdr.u0_ipv4.dst_addr + # IPv6 encap on received packet is not supported yet. + # elif (hdr.u0_ipv6 is not None): + # meta.rx_encap.underlay_sip = hdr.u0_ipv6.src_addr + # meta.rx_encap.underlay_dip = hdr.u0_ipv6.dst_addr + # + meta.rx_encap.dash_encapsulation = dash_encapsulation_t.VXLAN + meta.rx_encap.vni = hdr.u0_vxlan.vni + + # Save the original DSCP value + meta.eni_data.dscp_mode = dash_tunnel_dscp_mode_t.PRESERVE_MODEL + meta.eni_data.dscp = hdr.u0_ipv4.diffserv + + # Normalize the customer headers for later lookups. + meta.is_overlay_ip_v6 = 0 + meta.ip_protocol = 0 + meta.dst_ip_addr = 0 + meta.src_ip_addr = 0 + if (hdr.customer_ipv6 is not None): + meta.ip_protocol = hdr.customer_ipv6.next_header + meta.src_ip_addr = hdr.customer_ipv6.src_addr + meta.dst_ip_addr = hdr.customer_ipv6.dst_addr + meta.is_overlay_ip_v6 = 1 + elif (hdr.customer_ipv4 is not None): + meta.ip_protocol = hdr.customer_ipv4.protocol + meta.src_ip_addr = hdr.customer_ipv4.src_addr + meta.dst_ip_addr = hdr.customer_ipv4.dst_addr + + if (hdr.customer_tcp is not None): + meta.src_l4_port = hdr.customer_tcp.src_port + meta.dst_l4_port = hdr.customer_tcp.dst_port + elif (hdr.customer_udp is not None): + meta.src_l4_port = hdr.customer_udp.src_port + meta.dst_l4_port = hdr.customer_udp.dst_port + + # The pipeline starts from here and we can use the normalized headers for processing. + if (meta.is_fast_path_icmp_flow_redirection_packet): + UPDATE_COUNTER("port_lb_fast_path_icmp_in", 0) + pass + + py_log("info", "Applying table 'vip'") + if cls.vip.apply()["hit"]: + # Use the same VIP that was in packet's destination if it's present in the VIP table + meta.u0_encap_data.underlay_sip = meta.rx_encap.underlay_dip + else: + UPDATE_COUNTER("vip_miss_drop", 0) + + if (meta.is_fast_path_icmp_flow_redirection_packet): + pass # Do Nothing + + py_log("info", "Applying table 'appliance'") + cls.appliance.apply() + + py_log("info", "Applying table 'internal_config'") + cls.internal_config.apply() diff --git a/dash-pipeline/py_model/data_plane/stages/routing_action_apply.py b/dash-pipeline/py_model/data_plane/stages/routing_action_apply.py new file mode 100644 index 000000000..4bd328f69 --- /dev/null +++ b/dash-pipeline/py_model/data_plane/stages/routing_action_apply.py @@ -0,0 +1,23 @@ +from py_model.libs.__utils import * +from py_model.data_plane.routing_actions.routing_actions import * +from py_model.data_plane.routing_actions.routing_action_encap_underlay import * +from py_model.data_plane.routing_actions.routing_action_nat46 import * +from py_model.data_plane.routing_actions.routing_action_nat64 import * +from py_model.data_plane.routing_actions.routing_action_nat_port import * +from py_model.data_plane.routing_actions.routing_action_set_mac import * + +class routing_action_apply: + @classmethod + def apply(cls): + py_log("info", "routing_action_apply") + do_action_nat46.apply() + do_action_nat64.apply() + do_action_snat_port.apply() + do_action_dnat_port.apply() + do_action_set_dmac.apply() + + # Encaps needs to be added after all other transforms, from inner ones to outer ones, + # because it requires the transforms on the inner packet to be finished in order to + # get the correct inner packet size and other informations. + do_action_encap_u0.apply() + do_action_encap_u1.apply() diff --git a/dash-pipeline/py_model/data_plane/stages/trusted_vni.py b/dash-pipeline/py_model/data_plane/stages/trusted_vni.py new file mode 100644 index 000000000..92b2a40a9 --- /dev/null +++ b/dash-pipeline/py_model/data_plane/stages/trusted_vni.py @@ -0,0 +1,45 @@ +from py_model.libs.__utils import * +from py_model.libs.__table import * +from py_model.libs.__counters import * + +class trusted_vni_stage: + @staticmethod + def permit(): + pass + + global_trusted_vni = Table( + key={ + "meta.rx_encap.vni": (RANGE, {"name" : "vni_range"}) + }, + actions=[ + permit, + ], + tname=f"{__qualname__}.global_trusted_vni", + sai_table=SaiTable(api="dash_trusted_vni", single_match_priority="true", order=0, isobject="false",), + ) + + # eni_trusted_vni: matches on eni_id + vni range + eni_trusted_vni = Table( + key={ + "meta.eni_id" : (EXACT, {"type" : "sai_object_id_t"}), + "meta.rx_encap.vni" : (RANGE, {"name" : "vni_range"}) + }, + actions=[ + permit, + (deny, {"annotations": "@defaultonly"}) + ], + const_default_action=deny, + tname=f"{__qualname__}.eni_trusted_vni", + sai_table=SaiTable(api="dash_trusted_vni", single_match_priority="true", order=1,), + ) + + @classmethod + def apply(cls): + py_log("info", "Applying table 'global_trusted_vni'") + if cls.global_trusted_vni.apply()["hit"]: + return + + py_log("info", "Applying table 'eni_trusted_vni'") + if not cls.eni_trusted_vni.apply()["hit"]: + UPDATE_ENI_COUNTER("eni_trusted_vni_entry_miss_drop") + pass diff --git a/dash-pipeline/py_model/data_plane/stages/tunnel_stage.py b/dash-pipeline/py_model/data_plane/stages/tunnel_stage.py new file mode 100644 index 000000000..ae8f2d847 --- /dev/null +++ b/dash-pipeline/py_model/data_plane/stages/tunnel_stage.py @@ -0,0 +1,135 @@ +from py_model.libs.__utils import * +from py_model.libs.__table import * +from py_model.data_plane.routing_actions.routing_action_encap_underlay import * + + +class tunnel_stage: + if TARGET == TARGET_DPDK_PNA: + tunnel_data = meta_encap_data_t() + else: + tunnel_data = encap_data_t() + + @staticmethod + def set_tunnel_attrs(dash_encapsulation : Annotated[dash_encapsulation_t, {"type" : "sai_dash_encapsulation_t", "default_value" : "SAI_DASH_ENCAPSULATION_VXLAN", "create_only" : "true"}], + tunnel_key : Annotated[int, 24, {"create_only" : "true"}], + max_member_size : Annotated[int, 32, {"default_value" : "1", "create_only" : "true"}], + dip : Annotated[int, IPv4Address_size, {"type" : "sai_ip_address_t"}], + sip : Annotated[int, IPv4Address_size, {"type" : "sai_ip_address_t"}] + ): + + meta.dash_tunnel_max_member_size = max_member_size + + tunnel_stage.tunnel_data.dash_encapsulation = dash_encapsulation + tunnel_stage.tunnel_data.vni = tunnel_key + tunnel_stage.tunnel_data.underlay_sip = sip if sip != 0 else hdr.u0_ipv4.src_addr + tunnel_stage.tunnel_data.underlay_dip = dip + + @staticmethod + def select_tunnel_member(dash_tunnel_member_id: Annotated[int, 16]): + meta.dash_tunnel_member_id = dash_tunnel_member_id + + @staticmethod + def set_tunnel_member_attrs(dash_tunnel_id : Annotated[int, 16, {"type" : "sai_object_id_t", "mandatory" : "true", "create_only" : "true"}], + dash_tunnel_next_hop_id : Annotated[int, 16, {"type" : "sai_object_id_t", "mandatory" : "true"}]): + # dash_tunnel_id in tunnel member must match the metadata + assert meta.dash_tunnel_id == dash_tunnel_id + meta.dash_tunnel_next_hop_id = dash_tunnel_next_hop_id + + @staticmethod + def set_tunnel_next_hop_attrs(dip: Annotated[int, IPv4Address_size, {"type" : "sai_ip_address_t"}]): + assert dip != 0 + tunnel_stage.tunnel_data.underlay_dip = dip + + + tunnel = Table( + key = { + "meta.dash_tunnel_id": (EXACT, {"type": "sai_object_id_t"}) + }, + actions=[ + set_tunnel_attrs, + ], + tname=f"{__qualname__}.tunnel", + sai_table=SaiTable(name="dash_tunnel", api="dash_tunnel", order=0, isobject="true",), + ) + + # This table is a helper table that used to select the tunnel member based on the index. + # The entry of this table is created by DASH data plane app, when the tunnel member is created. + tunnel_member_select = Table( + key = { + "meta.dash_tunnel_member_index" : (EXACT, {"type" : "sai_object_id_t", "is_object_key": "true"}), + "meta.dash_tunnel_id" : (EXACT, {"type" : "sai_object_id_t"}) + }, + actions=[ + select_tunnel_member, + ], + tname=f"{__qualname__}.tunnel_member_select", + sai_table=SaiTable(ignored="true",), + ) + + tunnel_member = Table( + key = { + "meta.dash_tunnel_member_id": (EXACT, {"type" : "sai_object_id_t", "is_object_key": "true"}) + }, + actions=[ + set_tunnel_member_attrs, + ], + tname=f"{__qualname__}.tunnel_member", + sai_table=SaiTable(name="dash_tunnel_member", api="dash_tunnel", order=1, isobject="true",), + ) + + tunnel_next_hop = Table( + key = { + "meta.dash_tunnel_next_hop_id": (EXACT, {"type": "sai_object_id_t"}) + }, + actions=[ + set_tunnel_next_hop_attrs, + ], + tname=f"{__qualname__}.tunnel_next_hop", + sai_table=SaiTable(name="dash_tunnel_next_hop", api="dash_tunnel", order=2,isobject="true",), + ) + + @classmethod + def apply(cls): + if meta.dash_tunnel_id == 0: + return + + py_log("info", "Applying table 'tunnel'") + cls.tunnel.apply() + + # If max member size is greater than 1, the tunnel is programmed with multiple members. + if meta.dash_tunnel_max_member_size > 1: + if TARGET == TARGET_PYTHON_V1MODEL: + # Select tunnel member based on the hash of the packet tuples. + meta.dash_tunnel_member_index = ( + hash((meta.dst_ip_addr, meta.src_ip_addr, + meta.src_l4_port, meta.dst_l4_port)) + % meta.dash_tunnel_max_member_size + ) + else: + meta.dash_tunnel_member_index = 0 + + py_log("info", "Applying table 'tunnel_member_select'") + cls.tunnel_member_select.apply() + py_log("info", "Applying table 'tunnel_member'") + cls.tunnel_member.apply() + py_log("info", "Applying table 'tunnel_next_hop'") + py_log("info", "Applying table 'tunnel_next_hop'") + cls.tunnel_next_hop.apply() + + # Encapsulation push + if (meta.routing_actions & dash_routing_actions_t.ENCAP_U0) == 0: + meta.tunnel_pointer = 0 + push_action_encap_u0( + encap=tunnel_stage.tunnel_data.dash_encapsulation, + vni=tunnel_stage.tunnel_data.vni, + underlay_sip=tunnel_stage.tunnel_data.underlay_sip, + underlay_dip=tunnel_stage.tunnel_data.underlay_dip + ) + else: + meta.tunnel_pointer = 1 + push_action_encap_u1( + encap=tunnel_stage.tunnel_data.dash_encapsulation, + vni=tunnel_stage.tunnel_data.vni, + underlay_sip=tunnel_stage.tunnel_data.underlay_sip, + underlay_dip=tunnel_stage.tunnel_data.underlay_dip + ) diff --git a/dash-pipeline/py_model/libs/__counters.py b/dash-pipeline/py_model/libs/__counters.py new file mode 100644 index 000000000..3b64324d2 --- /dev/null +++ b/dash-pipeline/py_model/libs/__counters.py @@ -0,0 +1,153 @@ +from enum import Enum +from typing import Dict, List, Optional, Any + +from py_model.libs.__utils import * +from py_model.data_plane.dash_metadata import * + +class CounterType(Enum): + PACKETS = "PACKETS" + BYTES = "BYTES" + PACKETS_AND_BYTES = "BOTH" + +class SaiCounter: + def __init__(self, name: Optional[str] = None, type: Optional[str] = None, isresourcetype: Optional[bool] = None, + objects: Optional[List[str]] = None, isreadonly: Optional[bool] = None, default_value: Optional[int] = None, + ctr_name: Optional[str] = None, size: Optional[int] = None, counter_type: Optional[CounterType] = None, + attr_type: str = "stats", action_names: Optional[List[str]] = None, no_suffix: bool = False, + ): + self.name = name + self.type = type + self.isresourcetype = isresourcetype + self.objects = objects + self.isreadonly = isreadonly + self.default_value = default_value + + self.ctr_name = ctr_name + self.size = size + self.counter_type = counter_type + self.attr_type = attr_type + self.action_names = action_names if action_names is not None else [] + self.no_suffix = no_suffix + +class Counter: + def __init__(self, config: SaiCounter): + self.config = config + self._counters = [0] * config.size + + def count(self, index: int, value: int = 1) -> None: + if 0 <= index < self.config.size: + self._counters[index] += value + else: + py_log("INFO", f"Counter index {index} out of range for {self.config.ctr_name}") + +class DashCounters: + _counters: Dict[str, Counter] = {} + + @classmethod + def get(cls, name: str) -> Optional[Counter]: + return cls._counters.get(name) + + @classmethod + def put(cls, name: str, counter: Counter) -> None: + cls._counters[name] = counter + +class DirectCounter: + def __init__(self, name: str, counter_type: CounterType = CounterType.PACKETS_AND_BYTES): + self.name = name + self.counter_type = counter_type + self._values: Dict[Any, int] = {} + + def count(self, entry_key: Any, value: int = 1) -> None: + self._values[entry_key] = self._values.get(entry_key, 0) + value + +class DashTableCounters: + _counters: Dict[str, DirectCounter] = {} + _attachments: Dict[str, str] = {} # table_name -> counter_name + + @classmethod + def get(cls, name: str) -> Optional[DirectCounter]: + return cls._counters.get(name) + + @classmethod + def put(cls, name: str, counter: DirectCounter) -> None: + cls._counters[name] = counter + + @classmethod + def attach(cls, table_name: str, counter_name: str) -> None: + cls._attachments[table_name] = counter_name + +def DEFINE_COUNTER(ctr_name: str, count: int, name: Optional[str] = None, counter_type: CounterType = CounterType.PACKETS_AND_BYTES, + attr_type: str = "stats", action_names: Optional[List[str]] = None, no_suffix: bool = False, **kwargs,) -> Counter: + cfg = SaiCounter(ctr_name=ctr_name, name=name, size=count, counter_type=counter_type, + attr_type=attr_type, action_names=action_names, no_suffix=no_suffix, **kwargs,) + + existing = DashCounters.get(ctr_name) + if existing: + py_log("INFO", f"Counter already defined: {ctr_name}") + return existing + + counter = Counter(cfg) + DashCounters.put(ctr_name, counter) + # py_log("INFO", f"Created counter: {ctr_name}") + return counter + + +def DEFINE_PACKET_COUNTER(ctr_name: str, count: int, name: Optional[str] = None, **kwargs) -> Counter: + return DEFINE_COUNTER(ctr_name, count, name, CounterType.PACKETS, **kwargs) + +def DEFINE_BYTE_COUNTER(ctr_name: str, count: int, name: Optional[str] = None, **kwargs) -> Counter: + return DEFINE_COUNTER(ctr_name, count, name, CounterType.BYTES, **kwargs) + +def DEFINE_HIT_COUNTER(ctr_name: str, count: int, name: Optional[str] = None, **kwargs) -> Counter: + return DEFINE_COUNTER(ctr_name, count, name, CounterType.PACKETS, no_suffix=True, **kwargs) + +def UPDATE_COUNTER(ctr_name: str, index: int, value: int = 1) -> None: + ctr = DashCounters.get(ctr_name) + if ctr: + ctr.count(index, value) + else: + py_log("INFO", f"Counter {ctr_name} not found") + +def DEFINE_ENI_COUNTER(ctr_name: str, name: Optional[str] = None, **kwargs) -> Counter: + return DEFINE_COUNTER(ctr_name, MAX_ENI, name, CounterType.PACKETS_AND_BYTES, action_names="set_eni_attrs", **kwargs,) + +def DEFINE_ENI_PACKET_COUNTER(ctr_name: str, name: Optional[str] = None, **kwargs) -> Counter: + return DEFINE_PACKET_COUNTER(ctr_name, MAX_ENI, name, action_names="set_eni_attrs", **kwargs) + +def DEFINE_ENI_BYTE_COUNTER(ctr_name: str, name: Optional[str] = None, **kwargs) -> Counter: + return DEFINE_BYTE_COUNTER(ctr_name, MAX_ENI, name, action_names="set_eni_attrs", **kwargs) + +def DEFINE_ENI_HIT_COUNTER(ctr_name: str, name: Optional[str] = None, **kwargs) -> Counter: + return DEFINE_HIT_COUNTER(ctr_name, MAX_ENI, name, action_names="set_eni_attrs", **kwargs) + +# ENI-level data plane flow sync request counters: +# - Depends on implementations, the flow sync request could be batched, hence they need to tracked separately. +# - The counters are defined as combination of following things: +# - 3 flow sync operations: create, update, delete. +# - 2 ways of sync: Inline sync and timed sync. +# - Request result: succeeded, failed (unexpected) and ignored (expected and ok to ignore, e.g., more packets arrives before flow sync is acked). +def DEFINE_ENI_FLOW_SYNC_COUNTERS(counter_name: str) -> list[str]: + parts = [ + "req_sent", "req_recv", "req_failed", "req_ignored", + "ack_recv", "ack_failed", "ack_ignored", + ] + for prefix in ("inline", "timed"): + for p in parts: + name = f"{prefix}_{counter_name}_{p}" + DEFINE_ENI_HIT_COUNTER(name) + + +def UPDATE_ENI_COUNTER(ctr_name: str, value: int = 1) -> None: + UPDATE_COUNTER(ctr_name, meta.eni_id, value) + + +def DEFINE_TABLE_COUNTER(ctr_name: str, counter_type: CounterType = CounterType.PACKETS_AND_BYTES) -> DirectCounter: + ctr = DirectCounter(ctr_name, counter_type) + DashTableCounters.put(ctr_name, ctr) + # py_log("INFO", f"Created direct counter: {ctr_name}") + return ctr + + +def ATTACH_TABLE_COUNTER(ctr_name: str, table_name: Optional[str] = None) -> None: + DashTableCounters.attach(table_name, ctr_name) + # py_log("INFO", f"Attached direct counter '{ctr_name}' to table '{table_name}'") diff --git a/dash-pipeline/py_model/libs/__id_map.py b/dash-pipeline/py_model/libs/__id_map.py new file mode 100755 index 000000000..137e6b9d6 --- /dev/null +++ b/dash-pipeline/py_model/libs/__id_map.py @@ -0,0 +1,48 @@ + +UNSPECIFIED = 0x00 +ACTION = 0x01 +TABLE = 0x02 +COUNTER = 0x12 +DIRECT_COUNTER = 0x13 +# VALUE_SET = PyIds_Prefix_VALUE_SET +# CONTROLLER_HEADER = PyIds_Prefix_CONTROLLER_HEADER +# PSA_EXTERNS_START = PyIds_Prefix_PSA_EXTERNS_START +# ACTION_PROFILE = PyIds_Prefix_ACTION_PROFILE +# METER = PyIds_Prefix_METER +# DIRECT_METER = PyIds_Prefix_DIRECT_METER +# REGISTER = PyIds_Prefix_REGISTER +# DIGEST = PyIds_Prefix_DIGEST +# OTHER_EXTERNS_START = PyIds_Prefix_OTHER_EXTERNS_START +# MAX = PyIds_Prefix_MAX + +# Global registries +table_ids = {} # {tid: table_name} +table_objs = {} # {table_name: Table} +action_ids = {} # {aid: action_name} +action_objs = {} # {action_name: callable} +counter_ids = {} # {cid: counter_name} +direct_counter_ids = {} # {dcid: counter_name} + +def compute_hash(key: str) -> int: + key_bytes = key.encode('utf-8') + hash_val = 0 + for b in key_bytes: + hash_val = (hash_val + b) & 0xFFFFFFFF + hash_val = (hash_val + (hash_val << 10)) & 0xFFFFFFFF + hash_val = (hash_val ^ (hash_val >> 6)) & 0xFFFFFFFF + hash_val = (hash_val + (hash_val << 3)) & 0xFFFFFFFF + hash_val = (hash_val ^ (hash_val >> 11)) & 0xFFFFFFFF + hash_val = (hash_val + (hash_val << 15)) & 0xFFFFFFFF + return hash_val + +def gen_symbol_id(name: str, prefix: int) -> int: + """ + Generate a P4Runtime ID just like p4c: + - prefix = 8-bit object type code (e.g., 0x01 action, 0x02 table). + - suffix = 24-bit Jenkins hash of name. + - ID = (prefix << 24) | suffix + """ + x = compute_hash(name) + suffix = x & 0xFFFFFF + id = (prefix << 24) | suffix + return id diff --git a/dash-pipeline/py_model/libs/__init__.py b/dash-pipeline/py_model/libs/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/dash-pipeline/py_model/libs/__jsonize.py b/dash-pipeline/py_model/libs/__jsonize.py new file mode 100755 index 000000000..e0259a8d9 --- /dev/null +++ b/dash-pipeline/py_model/libs/__jsonize.py @@ -0,0 +1,60 @@ + +def _write_str(file, str_value): + file.write('"') + file.write(str_value) + file.write('"') + +def _write_int(file, int_value): + file.write(str(int_value)) + +def _write_bool(file, bool_value): + file.write(str(bool_value)) + +def _write_dict(file, dict_value, level): + file.write("{\n") + i = 0 + for k in dict_value: + file.write(" " * level) + file.write('"') + file.write(k) + file.write('": ') + _write_value(file, dict_value[k], level + 1) + if i < len(dict_value) - 1: + file.write(',') + file.write("\n") + i += 1 + file.write(" " * (level - 1)) + file.write("}") + +def _write_list(file, list_value, level): + file.write("[\n") + i = 0 + for item in list_value: + file.write(" " * level) + _write_value(file, item, level + 1) + if i < len(list_value) - 1: + file.write(',') + file.write("\n") + i += 1 + file.write(" " * (level - 1)) + file.write("]") + +def _write_value(file, value, level): + value_type = type(value) + if value_type == str: + _write_str(file, value) + elif value_type == dict: + _write_dict(file, value, level) + elif value_type == list: + _write_list(file, value, level) + elif value_type == int: + _write_int(file, value) + elif value_type == bool: + _write_bool(file, value) + else: + raise ValueError("Type not supported") + +def jsonize(file_name, value): + file = open(file_name, "w") + _write_value(file, value, 1) + file.close() diff --git a/dash-pipeline/py_model/libs/__obj_classes.py b/dash-pipeline/py_model/libs/__obj_classes.py new file mode 100644 index 000000000..cd55626d0 --- /dev/null +++ b/dash-pipeline/py_model/libs/__obj_classes.py @@ -0,0 +1,20 @@ +from py_model.data_plane.dash_acl import * +from py_model.data_plane.dash_inbound import * +from py_model.data_plane.dash_pipeline import * +from py_model.data_plane.dash_underlay import * +from py_model.data_plane.dash_outbound import * +from py_model.data_plane.dash_conntrack import * + +from py_model.data_plane.stages.ha import * +from py_model.data_plane.stages.eni_lookup import * +from py_model.data_plane.stages.trusted_vni import * +from py_model.data_plane.stages.pre_pipeline import * +from py_model.data_plane.stages.metering_update import * +from py_model.data_plane.stages.direction_lookup import * +from py_model.data_plane.stages.conntrack_lookup import * +from py_model.data_plane.stages.tunnel_stage import * +from py_model.data_plane.stages.outbound_pre_routing_action_apply import * +from py_model.data_plane.stages.routing_action_apply import * +from py_model.data_plane.stages.inbound_routing import * +from py_model.data_plane.stages.outbound_routing import * +from py_model.data_plane.stages.outbound_mapping import * diff --git a/dash-pipeline/py_model/libs/__packet_in.py b/dash-pipeline/py_model/libs/__packet_in.py new file mode 100755 index 000000000..fc123942d --- /dev/null +++ b/dash-pipeline/py_model/libs/__packet_in.py @@ -0,0 +1,78 @@ +from enum import Enum +from bitarray import bitarray +from bitarray.util import ba2int +from inspect import get_annotations +from typing import get_origin, get_args, Annotated + +class packet_in: + def __init__(self): + self.reset() + + def reset(self): + self.data = bitarray(endian="big") + self.index = 0 + + def set_data(self, data: bytes): + self.data.frombytes(data) + + def extract(self, hdr_type): + hdr = hdr_type() + annotations = get_annotations(hdr_type) + for field_name, field_type in annotations.items(): + width = self._extract_bit_width(field_type, field_name) + + if self.index + width > len(self.data): + return None # Not enough bits left + + raw_value = ba2int(self.data[self.index : self.index + width]) + value = self._convert_from_int(raw_value, field_type, field_name) + + setattr(hdr, field_name, value) + self.index += width + + return hdr + + def get_pkt_size(self) -> int: + return len(self.data) // 8 + + def get_unparsed_slice(self) -> bitarray: + return self.data[self.index:] + + # ------------------- Internal helpers ------------------- + + def _extract_bit_width(self, field_type, field_name) -> int: + if get_origin(field_type) is Annotated: + base_type, *metadata = get_args(field_type) + return metadata[0] + + if isinstance(field_type, type) and issubclass(field_type, Enum): + if hasattr(field_type, "__bitwidth__"): + return field_type.__bitwidth__ + + if field_type is int: + raise ValueError(f"Field '{field_name}' is int but missing Annotated metadata") + + raise TypeError(f"Cannot determine width for field '{field_name}' of type '{field_type}'") + + def _convert_from_int(self, value: int, field_type, field_name): + try: + # Handle Annotated fields + if get_origin(field_type) is Annotated: + base_type, *_ = get_args(field_type) + if base_type is int: + return value + if issubclass(base_type, Enum): + return base_type(value) + + # Handle Enums directly + if isinstance(field_type, type) and issubclass(field_type, Enum): + return field_type(value) + + # Plain int + if field_type is int: + return value + + except Exception as e: + raise ValueError(f"Cannot map int '{value}' to field '{field_name}' of type '{field_type}'") from e + + raise TypeError(f"Unsupported field type '{field_type}' for field '{field_name}'") diff --git a/dash-pipeline/py_model/libs/__packet_out.py b/dash-pipeline/py_model/libs/__packet_out.py new file mode 100755 index 000000000..e04a6eaaf --- /dev/null +++ b/dash-pipeline/py_model/libs/__packet_out.py @@ -0,0 +1,45 @@ +from inspect import * +from enum import Enum +from bitarray import bitarray +from bitarray.util import int2ba +from typing import get_origin, get_args, Annotated + + +class packet_out: + def __init__(self): + self.reset() + + def reset(self): + self.data = bitarray(endian="big") + self.index = 0 + + def emit(self, hdr): + if hdr: + annotations = get_annotations(type(hdr)) + for field_name, field_type in annotations.items(): + width = self._extract_bit_width(field_type, field_name) #width 48/32/16/8 + value = getattr(hdr, field_name) #Field value + value = self._convert_to_int(value, field_name) + if width <= 0: + raise ValueError(f"Field '{field_name}': bit width must be > 0") + self.data.extend(int2ba(value, width)) + + def _extract_bit_width(self, field_type, field_name) -> int: + if get_origin(field_type) is Annotated: + base_type, *metadata = get_args(field_type) + return metadata[0] + + if isinstance(field_type, type) and issubclass(field_type, Enum): + if hasattr(field_type, "__bitwidth__"): + return field_type.__bitwidth__ + + if field_type is int: + raise ValueError(f"Field '{field_name}' is int but missing Annotated metadata") + + raise TypeError(f"Cannot determine width for field '{field_name}' of type '{field_type}'") + + def _convert_to_int(self, value, field_name) -> int: + try: + return value.value if isinstance(value, Enum) else int(value) + except Exception as e: + raise ValueError(f"Cannot convert '{value}' to int for field '{field_name}'") from e diff --git a/dash-pipeline/py_model/libs/__standard_metadata.py b/dash-pipeline/py_model/libs/__standard_metadata.py new file mode 100755 index 000000000..333f18738 --- /dev/null +++ b/dash-pipeline/py_model/libs/__standard_metadata.py @@ -0,0 +1,30 @@ +from typing import * +from inspect import * + +class standard_metadata_t: + egress_spec : int + + def __init__(self): + self.ingress_port = 0 + self.egress_spec = 0 + self.reset() + + def reset(self): + annotations = get_annotations(type(self)) + for k in annotations: + setattr(self, k, None) + +_DROP_PORT = 511 + +def mark_to_drop(standard_metadata: standard_metadata_t): + standard_metadata.egress_spec = _DROP_PORT + +def is_dropped(standard_metadata: standard_metadata_t): + return standard_metadata.egress_spec == _DROP_PORT + +def NoAction(): + pass + + def __init__(self): + self.reset() + diff --git a/dash-pipeline/py_model/libs/__table.py b/dash-pipeline/py_model/libs/__table.py new file mode 100755 index 000000000..9c99b6de2 --- /dev/null +++ b/dash-pipeline/py_model/libs/__table.py @@ -0,0 +1,263 @@ +from inspect import get_annotations +from py_model.libs.__utils import * +from py_model.libs.__id_map import * + +class SaiTable: + def __init__(self, **kwargs): + self.name = kwargs.get("name") + self.api = kwargs.get("api") + self.api_type = kwargs.get("api_type") + self.order = kwargs.get("order") + self.stage = kwargs.get("stage") + self.isobject = kwargs.get("isobject") + self.ignored = kwargs.get("ignored") + self.match_type = kwargs.get("match_type") + self.single_match_priority = kwargs.get("single_match_priority") + self.enable_bulk_get_api = kwargs.get("enable_bulk_get_api") + self.enable_bulk_get_server = kwargs.get("enable_bulk_get_server") + +class SaiVal: + def __init__(self, **kwargs): + self.name = kwargs.get("name") + self.type = kwargs.get("type") + self.default_value = kwargs.get("default_value") + self.isresourcetype = kwargs.get("isresourcetype") + self.is_object_key = kwargs.get("is_object_key") + self.objects = kwargs.get("objects") + self.isreadonly = kwargs.get("isreadonly") + self.iscreateonly = kwargs.get("iscreateonly") + self.match_type = kwargs.get("match_type") + self.ismandatory = kwargs.get("ismandatory") + self.skipattr = kwargs.get("skipattr") + +class Entry: + class Ternary: + def __init__(self, value: int = 0, mask: int = 0): + self.value = value + self.mask = mask + + def __eq__(self, other): + return (isinstance(other, type(self)) and + self.value == other.value and + self.mask == other.mask) + + class LPM: + def __init__(self, value: int = 0, prefix_len: int = 0): + self.value = value + self.prefix_len = prefix_len + + def __eq__(self, other): + return (isinstance(other, type(self)) and + self.value == other.value and + self.prefix_len == other.prefix_len) + + class Range: + def __init__(self, low: int = 0, high: int = 0): + self.low = low + self.high = high + + def __eq__(self, other): + return (isinstance(other, type(self)) and + self.low == other.low and + self.high == other.high) + + def __init__(self, + values: list = None, + action: Callable = None, + params: list[int] = None, + priority: int = 0): + self.values = values or [] + self.action = action + self.params = params or [] + self.priority = priority + +def _read_value(input): + tokens = input.split(".") + container = globals()[tokens[0]] + var_name = tokens[1] + var = getattr(container, var_name) + for token in tokens[2:]: + container = var + var_name = token + var = getattr(container, var_name) + width = (get_annotations(type(container))[var_name].__metadata__)[0] + return (var, width) + +def EXACT(key: str, entry_value: int, match_value: int, width: int): + curr_entry[key] = hex(entry_value) + return entry_value == match_value + +def TERNARY(key: str, entry_value: Entry.Ternary, match_value: int, width: int): + val, mask = entry_value.value, entry_value.mask + curr_entry[key] = hex(val) + return (val & mask) == (match_value & mask) + +def LIST(key: str, entry_value: list[Entry.Ternary], match_value: int, width: int): + for ev in entry_value: + if TERNARY(key, ev, match_value, width): + return True + return False + +def RANGE(key: str, entry_value: Entry.Range, match_value: int, width: int): + curr_entry[key] = hex(entry_value.low) + return entry_value.low <= match_value <= entry_value.high + +def RANGE_LIST(key: str, entry_value: list[Entry.Range], match_value: int, width: int): + for ev in entry_value: + if RANGE(key, ev, match_value, width): + return True + return False + +def LPM(key: str, entry_value: Entry.LPM, match_value: int, width: int): + value = entry_value.value + prefix_len = entry_value.prefix_len + prefix_len = max(0, min(prefix_len, width)) + + if isinstance(value, str): + value = int(value, 16) + if isinstance(prefix_len, str): + prefix_len = int(prefix_len) + + mask = ((1 << prefix_len) - 1) << (width - prefix_len) + curr_entry[key] = hex(value) + return (value & mask) == (match_value & mask) + +def _winning_criteria_PRIORITY(a: Entry, b: Entry, key): + return a.priority < b.priority + +def _winning_criteria_PREFIX_LEN(a: Entry, b: Entry, key): + for idx, k in enumerate(key): + if key[k] == LPM: + return a.values[idx].prefix_len > b.values[idx].prefix_len + return False + +class Table: + def __init__(self, key, actions, default_action=NoAction, + const_default_action=None, default_params=None, + tname=None, sai_table=None): + + if not tname: + raise ValueError("Each table must have a unique 'tname'") + + self.entries = {} + self.entry_cnt = 0 + self.default_params = default_params or [] + self.sai_table = sai_table or SaiTable(name=tname) + self.const_default_action = self.const_default_action_id = None + self.default_action = self.default_action_id = None + self.actions = [] + table_objs[tname] = self + + self.key, self.sai_val = {}, {} + for k, v in key.items(): + if isinstance(v, tuple): + match_type, meta = v + self.key[k] = match_type + self.sai_val[k] = SaiVal(**meta) + else: + self.key[k] = v + + has_no_action = False + for act in actions or []: + func, hints = act if isinstance(act, tuple) else (act, {}) + self._register_action(func, hints) + self.actions.append((func, hints)) + + # handle @defaultonly or NoAction flags + if hints.get("annotations") == "@defaultonly" or func is NoAction: + self.default_action = default_action + has_no_action = True + + if const_default_action is not None: + # constant default overrides everything else + self._register_action(const_default_action) + self.const_default_action = const_default_action + self.default_action = self.default_action_id = None + elif not has_no_action and not any(f is default_action for f, _ in self.actions): + # ensure NoAction is always registered if not present + self._register_action(default_action) + self.actions.append((NoAction, {})) + self.default_action = default_action + self.const_default_action = self.const_default_action_id = None + + def _register_action(self, func, hints=None): + real_func = func.__func__ if isinstance(func, staticmethod) else func + name = getattr(real_func, "__qualname__", getattr(real_func, "__name__", str(real_func))) + action_objs.setdefault(name, (func, hints or {})) + + def insert(self, hash, entry): + if hash in self.entries: + py_log("warn", f"Entry already exists for hash {hash}") + else: + self.entry_cnt += 1 + self.entries[hash] = entry + + def update(self, hash, entry): + if hash in self.entries: + self.entries[hash] = entry + else: + py_log("warn", f"No entry found to update for hash {hash}") + + def delete(self, hash): + if hash in self.entries: + self.entry_cnt -= 1 + del self.entries[hash] + return RETURN_SUCCESS + # py_log("warn", f"No entry found to delete for hash {hash}") + return RETURN_FAILURE + + def apply(self): + entry = self.__lookup() + if entry: + show_matched_entry(curr_entry) + + action = (entry.action if entry else self.default_action or self.const_default_action) + params = (entry.params if entry else self.default_params) + + py_log("info", f"Table {'HIT' if entry else 'MISS'}") + py_log("info", f"Action entry: {action.__name__}\n") + + action(*params) + return {"hit": bool(entry), "action_run": action} + + def __match_entry(self, entry: Entry): + for idx, (k, match_routine) in enumerate(self.key.items()): + if idx >= len(entry.values): + break + + match_value, width = _read_value(k) + entry_value = entry.values[idx] + + if isinstance(entry_value, str): + entry_value = int(entry_value, 16) + if isinstance(match_value, str): + match_value = int(match_value, 16) + + result = match_routine(k, entry_value, match_value, width) + + if not result: + # py_log("error", f"Match failed for key[{idx}], returning False.") + return False + + return True + + def __get_all_matching_entries(self): + return [e for e in self.entries.values() if self.__match_entry(e)] + + def __get_winning_criteria(self): + if any(v == LPM for v in self.key.values()): + return _winning_criteria_PREFIX_LEN + return _winning_criteria_PRIORITY + + def __select_winning_entry(self, matches): + crit = self.__get_winning_criteria() + winner = matches[0] + for e in matches[1:]: + if crit(e, winner, self.key): + winner = e + return winner + + def __lookup(self): + py_log("info", "Looking up key:") + matches = self.__get_all_matching_entries() + return self.__select_winning_entry(matches) if matches else None diff --git a/dash-pipeline/py_model/libs/__utils.py b/dash-pipeline/py_model/libs/__utils.py new file mode 100644 index 000000000..8c4bdb449 --- /dev/null +++ b/dash-pipeline/py_model/libs/__utils.py @@ -0,0 +1,75 @@ + +import logging +from py_model.libs.__id_map import * +from py_model.libs.__packet_in import * +from py_model.libs.__packet_out import * +from py_model.libs.__standard_metadata import * +from py_model.data_plane.dash_headers import * +from py_model.data_plane.dash_metadata import * + +# target definition +TARGET_DPDK_PNA = 0 +TARGET_BMV2_V1MODEL = 1 +TARGET_PYTHON_V1MODEL = 2 + +TARGET = TARGET_PYTHON_V1MODEL + +STATEFUL_P4 = 0 +PNA_CONNTRACK = 0 + +RETURN_SUCCESS = 0 +RETURN_FAILURE = -1 + + +hdr = headers_t() +meta = metadata_t() +standard_metadata = standard_metadata_t() +pkt_in = packet_in() +pkt_out = packet_out() + + +def deny(): + meta.dropped = True + +curr_entry = {} + +def show_matched_entry(entry_dict): + max_key_len = max(len(k) for k in entry_dict.keys()) + + for key, value in entry_dict.items(): + py_log(None, f"* {key:<{max_key_len}}: {value}") + py_log(None, "") + curr_entry.clear() + + +# Configure logging +logging.basicConfig( + level=logging.DEBUG, + format="%(asctime)s [pymodel] [%(levelname)s] %(filename)s:%(lineno)d - %(message)s", + datefmt="%Y-%m-%d %H:%M:%S" +) + +def py_log(level=None, *args, **kwargs): + msg_parts = [str(arg) for arg in args] + if kwargs: + msg_parts.append(str(kwargs)) + message = " ".join(msg_parts) + + if not level: + print(message) + return + + level = level.lower() + if level == "debug": + logging.debug(message, stacklevel=2) + elif level == "info": + logging.info(message, stacklevel=2) + elif level == "warn": + logging.warning(message, stacklevel=2) + elif level == "error": + logging.error(message, stacklevel=2) + elif level == "critical": + logging.critical(message, stacklevel=2) + else: + print(message) + diff --git a/dash-pipeline/py_model/main_dash.py b/dash-pipeline/py_model/main_dash.py new file mode 100644 index 000000000..0d1592217 --- /dev/null +++ b/dash-pipeline/py_model/main_dash.py @@ -0,0 +1,75 @@ +import sys +import signal +import threading +from scapy.all import sniff, sendp, Ether +from py_model.dash_py_v1model import dash_py_model +from py_model.control_plane.grpc_server import serve +from py_model.libs.__utils import py_log, standard_metadata + +iface_list = [] + +def sniff_packet() -> None: + """Capture packets on configured interfaces and process them.""" + def process_packet(pkt: Ether) -> None: + raw_bytes = bytes(pkt) + py_log("info", f"Processing packet received on {pkt.sniffed_on}") + + result = dash_py_model(raw_bytes) + if not result: + return + + ether_frame = Ether(result) + + egress_idx = standard_metadata.egress_spec + if egress_idx < len(iface_list): + egress_port = iface_list[egress_idx] + py_log("info", f"Transmitting {len(ether_frame)} bytes out of port {egress_port}\n") + sendp(ether_frame, iface=egress_port, verbose=False) + else: + py_log("warn", f"Egress port index {egress_idx} out of range — dropping packet.") + + sniff(iface=iface_list, prn=process_packet, store=False, filter="inbound") + + +def setup_interfaces(args: list[str]) -> None: + """Parse command-line arguments and populate iface_list.""" + if len(args) < 3: + py_log(None, "\nUsage: python3 -m py_model.main_dash '' '' ['']") + sys.exit(1) + + iface_list.extend(args[1:4]) # add 2 or 3 interfaces + py_log(None, "") # blank line for readability + + for idx, iface in enumerate(iface_list): + role = "(DPAPP)" if idx == 2 else "" + py_log(None, f"Adding interface {iface} as port {idx} {role}") + py_log(None, "") + + +def main() -> None: + """Main entry point for running the DASH Python model.""" + setup_interfaces(sys.argv) + + # Start gRPC server + server_thread = threading.Thread(target=serve, daemon=True) + server_thread.start() + + # Start packet sniffer + sniff_thread = threading.Thread(target=sniff_packet, daemon=True) + sniff_thread.start() + + # Graceful shutdown handler + def handle_exit(signum, frame): + py_log(None, "\nStopping Python DASH model...") + sys.exit(0) + + signal.signal(signal.SIGINT, handle_exit) + signal.signal(signal.SIGTERM, handle_exit) + + # Keep threads alive + server_thread.join() + sniff_thread.join() + + +if __name__ == "__main__": + main() diff --git a/dash-pipeline/py_model/requirements.txt b/dash-pipeline/py_model/requirements.txt new file mode 100644 index 000000000..e874f56ae --- /dev/null +++ b/dash-pipeline/py_model/requirements.txt @@ -0,0 +1,11 @@ +bitarray +scapy +p4runtime +openpyxl +cerberus +python-docx +pyyaml +grpcio +google-cloud-storage +protobuf==3.20.* +googleapis-common-protos \ No newline at end of file diff --git a/dash-pipeline/py_model/scripts/__init__.py b/dash-pipeline/py_model/scripts/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/dash-pipeline/py_model/scripts/artifacts_gen.py b/dash-pipeline/py_model/scripts/artifacts_gen.py new file mode 100644 index 000000000..9d6816a9a --- /dev/null +++ b/dash-pipeline/py_model/scripts/artifacts_gen.py @@ -0,0 +1,663 @@ +#!/usr/bin/env python3 +""" +Optimized generator for py_p4rt.json from dash_py_model in-memory model. +- Caches expensive reflection (enum/member lookups, annotation width reads) +- Uses comprehensions where appropriate +- Simplifies/clarifies structured-annotation extraction +- Keeps behavior compatible with your original code +""" + +import os +import re +import sys +import enum +import json +import base64 +import inspect +from functools import lru_cache +from collections import OrderedDict +from typing import Annotated, Optional, get_origin, get_args, get_type_hints as get_annotations + +from py_model.scripts.gen_ir import gen_ir +from py_model.scripts.gen_table_chain import generate_table_chain +from py_model.scripts.gen_action_chain import generate_action_chain +from py_model.scripts.gen_counter_chain import generate_counter_chain +from py_model.scripts.gen_global_actions_chain import generate_global_actions_chain + +from py_model.libs.__table import * +from py_model.libs.__id_map import * +from py_model.libs.__jsonize import * +from py_model.libs.__counters import * +from py_model.dash_py_v1model import * + +_isclass = inspect.isclass +_isfunction = inspect.isfunction +_getmembers = inspect.getmembers +_enum_types = (enum.IntEnum, enum.IntFlag) +_RE_CAMEL1 = re.compile(r'(.)([A-Z][a-z]+)') +_RE_CAMEL2 = re.compile(r'([a-z0-9])([A-Z])') + +project_dir = "py_model/data_plane" +func_set = [] +func_chain = [] +act_alias_names = [] +table_chain = [] +tab_alias_names = [] +counter_chain = [] +direct_counter_chain = [] +GLOBAL_NAMES = globals() + +class SafeEncoder(json.JSONEncoder): + def default(self, o): + # enum members -> their value + if isinstance(o, enum.Enum): + return o.value + # enum types -> mapping name->value + if inspect.isclass(o) and issubclass(o, enum.Enum): + return {e.name: e.value for e in o} + # callables -> name + if callable(o): + return getattr(o, "__name__", str(o)) + return super().default(o) + +def is_int_str(val: str) -> bool: + if not isinstance(val, str): + return False + return val.lstrip("-").isdigit() + +def format_scalar(val): + if isinstance(val, str): + return json.dumps(val) + elif isinstance(val, bool): + return "true" if val else "false" + return str(val) + +def to_snake_case(name): + # camelCase or PascalCase → snake_case + s1 = _RE_CAMEL1.sub(r'\1_\2', name) + return _RE_CAMEL2.sub(r'\1_\2', s1).lower() + +def base64_to_escaped(b64_str): + # Decode base64 to bytes + decoded_bytes = base64.b64decode(b64_str) + # Convert each byte to \ooo (octal) escaped format + escaped = ''.join(f'\\{byte:03o}' for byte in decoded_bytes) + return escaped + +def dict_to_textproto(d: dict, indent: int = 0, parent_key: str = "") -> str: + """Recursively dumps a dict/list into Protobuf text format style.""" + lines = [] + pad = " " * indent + + if isinstance(d, dict): + for key, value in d.items(): + key = to_snake_case(key) + + # Special-case for serializable_enums (map field) + if key == "serializable_enums" and isinstance(value, dict): + for map_key, map_val in value.items(): + lines.append(f"{pad}{key} {{") + lines.append(f"{pad} key: \"{map_key}\"") + lines.append(f"{pad} value {{") + lines.append(dict_to_textproto(map_val, indent + 4)) + lines.append(f"{pad} }}") + lines.append(f"{pad}}}") + elif isinstance(value, dict): + lines.append(f"{pad}{key} {{") + lines.append(dict_to_textproto(value, indent + 2)) + lines.append(f"{pad}}}") + elif isinstance(value, list): + for item in value: + if isinstance(item, dict): + lines.append(f"{pad}{key} {{") + lines.append(dict_to_textproto(item, indent + 2)) + lines.append(f"{pad}}}") + else: + lines.append(f"{pad}{key}: {format_scalar(item)}") + else: + if key != "unit" and key != "size" and key != "int64_value" and key != "match_type" and value != "DEFAULT_ONLY": + value = format_scalar(value) + if value == "LIST" or value == "RANGE_LIST": + value = "OPTIONAL" + if key == 'value': + value = base64_to_escaped(value) + value = f"\"{value}\"" + append = lines.append + append(f"{pad}{key}: {value}") + + + elif isinstance(d, list): + for item in d: + if isinstance(item, dict): + lines.append(f"{pad}{{") + lines.append(dict_to_textproto(item, indent + 2)) + lines.append(f"{pad}}}") + else: + lines.append(f"{pad}{format_scalar(item)}") + + return "\n".join(lines) + +def find_best_match(name: str, chain: Iterable[str]) -> Optional[str]: + """ + Find the best match in `chain` for the given `name`, by matching + from the end (dot-separated segments). + """ + input_parts = name.split(".") + best_match = None + best_len = 0 + + for full_name in chain: + full_parts = full_name.split(".") + min_len = min(len(input_parts), len(full_parts)) + match_len = 0 + # compare backwards + for i in range(1, min_len + 1): + if input_parts[-i] == full_parts[-i]: + match_len += 1 + else: + break + if match_len > best_len: + best_len = match_len + best_match = full_name + + return best_match + +@lru_cache(maxsize=512) +def find_by_function_name(func_name: str) -> Optional[str]: + return find_best_match(func_name, func_chain) + +@lru_cache(maxsize=512) +def find_by_table_name(table_name: str) -> Optional[str]: + return find_best_match(table_name, table_chain) + +@lru_cache(maxsize=512) +def find_by_counter_name(counter_name: str) -> Optional[str]: + return find_best_match(counter_name, counter_chain) + +@lru_cache(maxsize=512) +def find_by_direct_counter_name(counter_name: str) -> Optional[str]: + return find_best_match(counter_name, direct_counter_chain) + +@lru_cache(maxsize=1024) +def _read_width(k: str) -> Optional[int]: + """ + Read bitwidth for a dotted key like "Container.field.subfield". + Caches results because this uses runtime type introspection. + """ + try: + tokens = k.split(".") + if not tokens: + return None + # First token is a global name + root_name, *rest = tokens + container = GLOBAL_NAMES.get(root_name) + if container is None: + return None + + # iterate through annotation types + container_type = type(container) + var_name = rest[0] if rest else None + for token in rest[1:]: + anns = get_annotations(container_type) or {} + container_type = anns.get(var_name) + var_name = token + if container_type is None: + return None + + anns = get_annotations(container_type) or {} + ann = anns.get(var_name) + if ann is None: + return None + + if get_origin(ann) is Annotated: + args = get_args(ann) + # metadata usually at args[2] in your pattern + if len(args) >= 3 and isinstance(args[2], dict): + # assume 'bitwidth' or direct width is in metadata + return args[2].get("bitwidth") or args[2].get("width") or None + + # numeric second arg for Annotated[int, ] + if len(args) > 1 and isinstance(args[1], int): + return args[1] + + # If annotation is an enum class, try its __bitwidth__ + if _isclass(ann) and issubclass(ann, _enum_types): + return getattr(ann, "__bitwidth__", None) + + # fallback: if it's a raw int annotation assume 32 + if ann is int: + return 32 + + except Exception: + # Do not crash on unexpected types; caller should handle None. + return None + +def _extract_attrs(obj, exclude_keys=None): + if obj is None: + return None + d = getattr(obj, "__dict__", None) + if not d: + return None + out = {k: v for k, v in d.items() + if v is not None and not _isfunction(v) and not isinstance(v, staticmethod)} + if exclude_keys: + for k in exclude_keys: + out.pop(k, None) + return out or None + +_get_str_annos_for_table = lambda t: _extract_attrs(t, getattr(t, "key", {})) +_get_str_annos_for_key = lambda hints, k: _extract_attrs(hints.get(k)) + +def _apply_action_scope(node, aid, const_def_act_id, def_hint, def_act_name, act_name, hints): + annotations = [] + if const_def_act_id and aid == const_def_act_id and def_hint: + annotations.append("@defaultonly") + elif def_act_name and act_name == "NoAction": + annotations.append("@defaultonly") + elif hints.get("annotations"): + annotations.append(hints["annotations"]) + if annotations: + node["annotations"] = annotations + node["scope"] = "DEFAULT_ONLY" + +def _make_str_annos_node(str_annos: dict, kind: int): + """ + Turn a dict of structured annotations into the JSON node shape expected by P4RT. + kind: 0=table, 1=saival/action param, 2=counter + """ + if not str_annos: + return None + + kv_pairs = [] + if kind == 0: # SaiTable + # "ignored" is special-cased in original code + if "ignored" in str_annos: + kv_pairs.append({"key": "ignored", "value": {"stringValue": str(str_annos["ignored"])}}) + else: + for key, value in str_annos.items(): + val_type = "int64Value" if key == "order" else "stringValue" + kv_pairs.append({"key": key, "value": {val_type: str(value)}}) + name = "SaiTable" + elif kind == 2: # SaiCounter + name = "SaiCounter" + for key, value in str_annos.items(): + # booleans should be expressed as strings for compatibility with original output + kv_pairs.append({"key": key, "value": {"stringValue": str(value)}}) + else: # SaiVal or action param + name = "SaiVal" + for key, value in str_annos.items(): + kv_pairs.append({"key": key, "value": {"stringValue": str(value)}}) + + return [{"name": name, "kvPairList": {"kvPairs": kv_pairs}}] + +def _extract_annotation_info(k: str, anno): + """ + Returns (bitwidth:int or None, str_annos:dict or None). + Handles Annotated[...] patterns, enums, and plain int. + """ + origin = get_origin(anno) + if origin is Annotated: + args = get_args(anno) + base, *meta = args + width = next((m for m in meta if isinstance(m, int)), None) + meta_dict = next((m for m in meta if isinstance(m, dict)), None) + if _isclass(base) and issubclass(base, _enum_types): + return width or getattr(base, "__bitwidth__", 16), meta_dict + if base is int: + return width or 32, meta_dict + raise TypeError(f"Unsupported base type for param '{k}': {base}") + if _isclass(anno) and issubclass(anno, _enum_types): + return getattr(anno, "__bitwidth__", 16), None + if anno is int: + return 32, None + raise TypeError(f"Unsupported annotation type for param '{k}': {anno}") + +def unique_alias(full_name: str, used_aliases: List[str]) -> str: + """Generate a unique alias for full_name, avoiding collisions in used_aliases.""" + alias = full_name.rsplit(".", 1)[-1] + if alias not in used_aliases: + used_aliases.append(alias) + return alias + + parts = full_name.split(".") + # progressively include more path components (2-part, 3-part, ...) + for i in range(2, len(parts) + 1): + candidate = ".".join(parts[-i:]) + if candidate not in used_aliases: + used_aliases.append(candidate) + return candidate + + # fallback to full name (last resort) + used_aliases.append(full_name) + return full_name + +def resolve_func_name(raw: Any, func_name_to_classflag: Dict[str, bool]) -> str: + """Centralized resolution for functions (handles class-method flags & find_by_function_name).""" + name = getattr(raw, "__qualname__", getattr(raw, "__name__", str(raw))) + is_class_method = func_name_to_classflag.get(name, False) + if name not in func_name_to_classflag or is_class_method: + # attempt to translate via project-level name lookup once + found = find_by_function_name(name) + return found or name + return name + +def _resolve_default_action(def_action, func_name_to_classflag): + """Unify default-action handling (returns resolved name and its generated id)""" + if def_action is None: + return None, None + func, hints = def_action if isinstance(def_action, tuple) else (def_action, {}) + name = getattr(func, "__qualname__", getattr(func, "__name__", str(func))) + name = resolve_func_name(func, func_name_to_classflag) + return name, gen_symbol_id(name, ACTION) + +@lru_cache(maxsize=256) +def get_dash_enum_members(e): + """Return a list of (name, enum-member) sorted by numeric value.""" + members = [(name, val) for name, val in _getmembers(e) if not name.startswith("_") and isinstance(val, e)] + members.sort(key=lambda item: int(item[1])) + return members + +def make_enum_node(enum_cls): + """Build the enum representation node used in typeInfo.serializableEnums.""" + enum_node = OrderedDict() + bitwidth = getattr(enum_cls, "__bitwidth__", 16) + enum_node["underlyingType"] = {"bitwidth": bitwidth} + + members_node = [] + members = get_dash_enum_members(enum_cls) + + # If IntFlag and has NONE member, ensure it's first (preserves original behavior) + if issubclass(enum_cls, enum.IntFlag) and hasattr(enum_cls, "NONE"): + members = [m for m in members if m[0] != "NONE"] + members.insert(0, ("NONE", getattr(enum_cls, "NONE"))) + + bytes_needed = (bitwidth + 7) // 8 + for name, member in members: + int_value = int(member) + # encode to big-endian with sufficient bytes + b64_value = base64.b64encode(int_value.to_bytes(bytes_needed, "big", signed=False)).decode("ascii") + members_node.append(OrderedDict([("name", name), ("value", b64_value)])) + + enum_node["members"] = members_node + return enum_node + +@lru_cache(maxsize=1) +def get_dash_enum_list(): + """Return list of enum classes in current module that are IntEnum/IntFlag subclasses.""" + class_list = _getmembers(sys.modules[__name__], inspect.isclass) + return [ + cls + for name, cls in class_list + if not name.startswith("_") + and inspect.isclass(cls) + and issubclass(cls, _enum_types) + and cls not in _enum_types + and name not in ("BufferFlags",) + ] + +def make_table_node(table: "Table", table_name: str, tid: int): + """Construct table JSON node from Table instance (optimized).""" + global tab_alias_names, func_set, action_ids + + table_node = OrderedDict() + + # alias + alias_name = unique_alias(table_name, tab_alias_names) + preamble_node = OrderedDict(id=tid, name=table_name, alias=alias_name) + + # structured annotations for table preamble + str_annos = _get_str_annos_for_table(table.sai_table) + if str_annos: + preamble_node["structuredAnnotations"] = _make_str_annos_node(str_annos, 0) + table_node["preamble"] = preamble_node + + # matchFields + match_fields = [] + for mf_id, k in enumerate(table.key, start=1): + mf = OrderedDict( + id=mf_id, + name=str(k), + bitwidth=_read_width(k), + matchType=table.key[k].__name__, + ) + mf_str_annos = _get_str_annos_for_key(table.sai_val, k) + if mf_str_annos: + mf["structuredAnnotations"] = _make_str_annos_node(mf_str_annos, 1) + match_fields.append(mf) + table_node["matchFields"] = match_fields + + # Build a small cache of func name -> is_class_method to avoid repeated searches + func_name_to_classflag = {name: flag for name, flag in func_set} + + # default actions (resolved once) + def_act_name, def_act_id = _resolve_default_action(table.default_action, func_name_to_classflag) + const_def_act_name, const_def_act_id = _resolve_default_action(table.const_default_action, func_name_to_classflag) + + # compute def_hint only once + def_hint = any(isinstance(a, tuple) and a[1] for a in table.actions) + + # actions -> actionRefs + action_refs = [] + for action in table.actions: + func, hints = action if isinstance(action, tuple) else (action, {}) + act_name = getattr(func, "__qualname__", getattr(func, "__name__", str(func))) + + # resolve using cache/lookup + is_class_method = func_name_to_classflag.get(act_name, False) + if act_name not in func_name_to_classflag or is_class_method: + resolved = find_by_function_name(act_name) or act_name + act_name = resolved + + aid = gen_symbol_id(act_name, ACTION) + action_ids[aid] = act_name + + node: Dict[str, Any] = {"id": aid} + annotations = [] + + # set DEFAULT_ONLY for some conditions + _apply_action_scope(node, aid, const_def_act_id, def_hint, def_act_name, act_name, hints) + + action_refs.append(node) + + table_node["actionRefs"] = action_refs + + # prefer const default action id if provided (match original logic) + if table.const_default_action: + table_node["constDefaultActionId"] = const_def_act_id + + # attach direct counter if present + parts = table_name.split(".") + attempts = [".".join(parts[-3:]), ".".join(parts[-2:]), parts[-1]] + + for idx, tname in enumerate(attempts, start=1): + if tname in DashTableCounters._attachments: + ctr_name = DashTableCounters._attachments[tname] + ctr = DashTableCounters._counters.get(ctr_name) + full_ctr_name = f"{table_name.rsplit('.', 1)[0]}.{ctr_name}" + cid = gen_symbol_id(full_ctr_name, DIRECT_COUNTER) if ctr else None + if ctr: + table_node["directResourceIds"] = [cid] + break # stop after first successful match + + # size handling (prefer structured annotation size if present) + size = (str_annos or {}).get("size") if str_annos else None + table_node["size"] = size if size is not None else "1024" + + return table_node + +def make_action_node(act_name: str, annotations: dict, aid: int, flag: bool): + """Create action node from action name and its annotations mapping.""" + global act_alias_names + + action_node = OrderedDict() + + alias_name = unique_alias(act_name, act_alias_names) + preamble_node = OrderedDict(id=aid, name=act_name, alias=alias_name) + + # special-case NoAction + if act_name == "NoAction": + preamble_node["annotations"] = ['@noWarn("unused")'] + action_node["preamble"] = preamble_node + return action_node + + action_node["preamble"] = preamble_node + + # params from annotations + params = [] + for param_id, (k, anno) in enumerate((annotations or {}).items(), start=1): + param_node = OrderedDict(id=param_id, name=k) + bitwidth, str_annos = _extract_annotation_info(k, anno) + if bitwidth is not None: + param_node["bitwidth"] = bitwidth + if str_annos: + param_node["structuredAnnotations"] = _make_str_annos_node(str_annos, 1) + params.append(param_node) + + if params: + action_node["params"] = params + return action_node + +def make_counter_node(counter: "Counter"): + """Create a general counter node.""" + cfg = counter.config + node = OrderedDict() + + ctr_name = find_by_counter_name(cfg.ctr_name) or cfg.ctr_name + cid = gen_symbol_id(ctr_name, COUNTER) + + node["preamble"] = OrderedDict(id=cid, name=ctr_name, alias=cfg.ctr_name) + + str_annos = {} + if getattr(cfg, "name", None): + str_annos["name"] = cfg.name + if getattr(cfg, "attr_type", None): + str_annos["attr_type"] = cfg.attr_type + if getattr(cfg, "action_names", None): + str_annos["action_names"] = cfg.action_names + if getattr(cfg, "no_suffix", None): + str_annos["no_suffix"] = "true" + if str_annos: + node["preamble"]["structuredAnnotations"] = _make_str_annos_node(str_annos, 2) + + node["spec"] = OrderedDict(unit=cfg.counter_type.value) + node["size"] = str(cfg.size) + return node + +def make_direct_counter_node(counter: "DirectCounter", table_name: str, table_id: Optional[int] = None): + node = OrderedDict() + + ctr_name = f"{table_name.rsplit('.', 1)[0]}.{counter.name}" + ctr_name = find_by_direct_counter_name(ctr_name) or ctr_name + + d_cid = gen_symbol_id(ctr_name, DIRECT_COUNTER) + node["preamble"] = OrderedDict(id=d_cid, name=ctr_name, alias=counter.name) + node["spec"] = OrderedDict(unit=counter.counter_type.value) + if table_id is not None: + node["directTableId"] = table_id + return node + +def make_pyinfo(ignore_tables): + """Assemble top-level pyinfo structure (optimized).""" + global func_chain, func_set, table_chain, counter_chain, direct_counter_chain + global table_objs, action_objs, action_ids, table_ids + + pyinfo = OrderedDict(pkgInfo={"arch": "python-model"}) + + # build call graphs and sets once + func_set = generate_global_actions_chain(project_dir) + func_chain = generate_action_chain(project_dir) + table_chain = generate_table_chain(project_dir) + results = generate_counter_chain(project_dir) + counter_chain = results[0] + direct_counter_chain = results[1] + + # prepare a fast lookup for func-name -> is_class_method + func_name_to_classflag = {name: flag for name, flag in func_set} + + # tables + tables_node = [] + for tname, table in table_objs.items(): + if isinstance(table, Table) and table not in ignore_tables: + table_name = find_by_table_name(tname) or tname + tid = gen_symbol_id(table_name, TABLE) + tables_node.append(make_table_node(table, table_name, tid)) + table_ids[tid] = table_name + else: + # keep original behavior: log warning + print(f"Warning: No Table object found for {tname}") + pyinfo["tables"] = tables_node + + # actions + actions_node = [] + # first pass: functions from func_set + for func, tag in func_set: + if tag is True: + func = find_by_function_name(func) or func + aid = gen_symbol_id(func, ACTION) + action_ids[aid] = func + actions_node.append(make_action_node(func, {}, aid, False)) + + # second pass: explicit action_objs (may include annotations) + for act_name, func in action_objs.items(): + is_class_method = func_name_to_classflag.get(act_name, False) + if act_name not in [n for n, _ in func_set] or is_class_method: + newfunc, hints = func if isinstance(func, tuple) else (func, {}) + annotations = get_annotations(newfunc) or {} + resolved_name = find_by_function_name(act_name) or act_name + aid = gen_symbol_id(resolved_name, ACTION) + # ensure we record id -> name mapping + action_ids[aid] = resolved_name + actions_node.append(make_action_node(resolved_name, annotations, aid, True)) + + pyinfo["actions"] = actions_node + + # counters + pyinfo["counters"] = [make_counter_node(c) for c in DashCounters._counters.values()] + + # direct counters (attached mapping) + direct_counters = [] + for ctr_name, counter_name in DashTableCounters._attachments.items(): + attached_table = find_by_table_name(ctr_name) or ctr_name + ctr_obj = DashTableCounters._counters.get(counter_name) + table_id = gen_symbol_id(attached_table, TABLE) + direct_counters.append(make_direct_counter_node(ctr_obj, attached_table, table_id)) + pyinfo["directCounters"] = direct_counters + + # enums -> typeInfo + serializableEnums = OrderedDict() + for e in get_dash_enum_list(): + serializableEnums[e.__name__] = make_enum_node(e) + pyinfo["typeInfo"] = OrderedDict(serializableEnums=serializableEnums) + + return pyinfo + +if __name__ == "__main__": + output_dir = "py_model/dash_pipeline.py_model" + + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + ignore_tables = set() + + # Generate and serialize + pyinfo = make_pyinfo(ignore_tables=[]) + + # Create an empty dash_pipeline.json file with valid JSON + with open(os.path.join(output_dir, "dash_pipeline.json"), "w") as f: + json.dump({}, f) + + # Dump to Protobuf text-format string + textproto_output = dict_to_textproto(pyinfo) + with open(os.path.join(output_dir, "dash_pipeline_p4rt.txt"), "w") as f: + f.write(textproto_output + "\n") + + # Dump to Protobuf json-format string + with open(os.path.join(output_dir, "dash_pipeline_p4rt.json"), "w") as f: + json.dump(pyinfo, f, indent=2, sort_keys=False, cls=SafeEncoder) + + # Generate IR and save as JSON + ir = gen_ir() + with open(os.path.join(output_dir, "dash_pipeline_ir.json"), "w") as f: + json.dump(ir, f, indent=2, sort_keys=False) + + print("Finished generating artifacts at 'dash_pipeline.py_model/'") diff --git a/dash-pipeline/py_model/scripts/gen_action_chain.py b/dash-pipeline/py_model/scripts/gen_action_chain.py new file mode 100644 index 000000000..db06946ef --- /dev/null +++ b/dash-pipeline/py_model/scripts/gen_action_chain.py @@ -0,0 +1,213 @@ +#!/usr/bin/env python3 +""" +Multi-file call-graph extractor + +Start from classes defined in dash_pipeline.py and follow Class.method chains across files, +then expand leaf classes to list all their methods, including @staticmethod/@classmethod. +Prints flat list. + +This is the refactor of your original MultiFileCallGraph using the shared helper. +""" + +import ast +import os +from collections import defaultdict +from typing import List + +from py_model.scripts.gen_helper import parse_files, collect_class_methods, get_entry_class_names, get_full_name, called_classes + + +class MultiFileCallGraph: + def __init__(self, directory: str, entry_file: str = "dash_pipeline.py"): + self.directory = os.path.abspath(directory) + self.entry_file = os.path.join(self.directory, entry_file) + self.class_defs = defaultdict(dict) # {class_name: {method_name: ast.FunctionDef}} + self.file_class_map = {} # {class_name: file_path} + self.parsed_files = list(parse_files(self.directory)) + self._collect_defs() + + # --------------------------- + # Parse files & collect defs + # --------------------------- + def _collect_defs(self): + class_defs, file_class_map, global_funcs = collect_class_methods(self.parsed_files) + self.class_defs.update(class_defs) + self.file_class_map.update(file_class_map) + # ensure a slot for global functions + self.class_defs.setdefault("__global__", {}) + self.class_defs["__global__"].update(global_funcs) + + # --------------------------- + # Helpers: name resolution (get_full_name imported) + # --------------------------- + def _get_full_name(self, node, current_class=None): + return get_full_name(node) + + def _extract_called_class(self, dotted_target): + if not dotted_target: + return None + parts = dotted_target.split(".") + for i in reversed(range(len(parts))): + if parts[i] in self.class_defs: + return parts[i] + return None + + def _should_skip(self, name: str) -> bool: + if not name: + return True + base = name.split(".")[-1] + if base == "py_log" or base.isupper() or base == "get" or base == "hash" or base.endswith("_t"): + return True + return False + + def _called_targets(self, func_node, current_class): + called_classes_set, called_funcs = set(), set() + if func_node is None: + return called_classes_set, called_funcs + + for stmt in ast.walk(func_node): + if isinstance(stmt, ast.Call): + target = self._get_full_name(stmt.func, current_class) + if not target or self._should_skip(target): + continue + callee_class = self._extract_called_class(target) + if callee_class: + called_classes_set.add(callee_class) + else: + called_funcs.add(target) + return called_classes_set, called_funcs + + # --------------------------- + # Build graph + # --------------------------- + def build_graph(self, cls_name, visited=None): + visited = set(visited or []) + if cls_name in visited: + return {} + visited.add(cls_name) + + if cls_name == "__global__": + return {} + + methods = self.class_defs.get(cls_name, {}) + children = {} + + # expand all methods in this class + for mname, mnode in methods.items(): + called_classes, called_funcs = self._called_targets(mnode, cls_name) + subchildren = {} + for callee in sorted(called_classes): + subchildren.update(self.build_graph(callee, visited.copy())) + for func in sorted(called_funcs): + fn_node = None + if "." in func: + base, meth = func.split(".", 1) + if base == cls_name: + fn_node = methods.get(meth) + else: + fn_node = self.class_defs["__global__"].get(func) + if fn_node: + subchildren[func] = self._expand_function(func, fn_node, cls_name, visited.copy()) + else: + subchildren[func] = {} + children[mname] = subchildren + + return {cls_name: children} + + def _expand_function(self, name, fn_node, current_class, visited): + called_classes_set, called_funcs = self._called_targets(fn_node, current_class) + children = {} + for callee in sorted(called_classes_set): + children.update(self.build_graph(callee, visited.copy())) + for func in sorted(called_funcs): + fn_node2 = None + if "." in func: + base, meth = func.split(".", 1) + if base == current_class: + fn_node2 = self.class_defs[current_class].get(meth) + else: + fn_node2 = self.class_defs["__global__"].get(func) + if fn_node2: + children[func] = self._expand_function(func, fn_node2, current_class, visited.copy()) + else: + children[func] = {} + return children + + # --------------------------- + # Flatten graph -> dotted chains + # --------------------------- + def _flatten_graph(self, node, prefix=""): + results = [] + for name, children in node.items(): + full = f"{prefix}.{name}" if prefix else name + results.append(full) + if children: + results.extend(self._flatten_graph(children, full)) + return results + + # --------------------------- + # Entry point + # --------------------------- + def resolve_from_entry(self): + if not os.path.exists(self.entry_file): + raise RuntimeError(f"Entry file not found: {self.entry_file!r}") + + roots = get_entry_class_names(self.entry_file) + if not roots: + print(f"No classes found in entry file: {self.entry_file}") + return [] + + raw_graphs = {} + for root in roots: + raw_graphs[root] = self.build_graph(root) + + # flatten graphs + all_chains = [] + for graph in raw_graphs.values(): + all_chains.extend(self._flatten_graph(graph)) + + # Include all static/class methods that might not be called + for cls, methods in self.class_defs.items(): + if cls == "__global__": + continue + for mname, mnode in methods.items(): + full_name = f"{cls}.{mname}" + if full_name not in all_chains: + all_chains.append(full_name) + + return sorted(set(all_chains)) + + +from typing import List + + +def generate_action_chain(project_dir: str, entry_file: str = "dash_pipeline.py") -> List[str]: + cg = MultiFileCallGraph(project_dir, entry_file=entry_file) + chains = cg.resolve_from_entry() + + # Filter chains starting with 'dash_ingress.' and ignoring 'cls.' + dash_ingress_chains = [c for c in chains if c.startswith("dash_ingress.") and "cls." not in c] + + # Remove all '.apply.' and '.cls.' + cleaned_chains = [c.replace(".apply.", ".").replace(".cls.", ".") for c in dash_ingress_chains] + + # Ignore chains where the last function is 'apply' + cleaned_chains = [c for c in cleaned_chains if not c.split(".")[-1] == "apply"] + + # Skip trivial/builtin functions + skip_funcs = {"print", "bool", "get", "set"} + cleaned_chains = [c for c in cleaned_chains if c.split(".")[-1] not in skip_funcs] + + # Deduplicate and sort + final_chains = sorted(set(cleaned_chains)) + + return final_chains + + +if __name__ == "__main__": + project_dir = "py_model/data_plane" + chains = generate_action_chain(project_dir, entry_file="dash_pipeline.py") + + print("Filtered, cleaned, deduplicated call chain:") + for i, c in enumerate(chains, 1): + print(i, " : ", c) diff --git a/dash-pipeline/py_model/scripts/gen_counter_chain.py b/dash-pipeline/py_model/scripts/gen_counter_chain.py new file mode 100644 index 000000000..b1d15825f --- /dev/null +++ b/dash-pipeline/py_model/scripts/gen_counter_chain.py @@ -0,0 +1,156 @@ +#!/usr/bin/env python3 +""" +Optimized Multi-file Counter() chain extractor. + +Searches for DEFINE_*() and DEFINE_TABLE_*() calls inside classes, +extracting counter names and resolving dotted call chains. + +Refactored to use multi_helper while keeping original behavior identical. +""" + +import ast +import os +from collections import defaultdict +from typing import List, Tuple + +from py_model.scripts.gen_helper import parse_files, collect_class_methods, get_entry_class_names, extract_called_class_from_node, called_classes + + +class MultiFileCounterGraph: + def __init__(self, directory: str, entry_file: str = "dash_pipeline.py", verbose: bool = False): + self.directory = os.path.abspath(directory) + self.entry_file = os.path.join(self.directory, entry_file) + self.verbose = verbose + + # Data structures + self.class_defs = defaultdict(dict) # {class_name: {method_name: ast.FunctionDef}} + self.counters = defaultdict(set) # {class_name: {counter_names}} + self.directcounters = defaultdict(set) # {class_name: {table_counter_names}} + self.called_class_cache = {} # Cache per function node + + # Parse and collect + self.parsed_files = list(parse_files(self.directory)) + self._collect_defs_and_counters() + + # --------------------------- + # Collect class definitions and DEFINE_*() counters + # --------------------------- + def _collect_defs_and_counters(self): + class_defs, _, _ = collect_class_methods(self.parsed_files) + self.class_defs.update(class_defs) + + # Collect DEFINE_* and DEFINE_TABLE_* calls inside class ASTs + for path, tree in self.parsed_files: + for node in tree.body: + if not isinstance(node, ast.ClassDef): + continue + + # Walk over class subtree to find CALLS + for stmt in ast.walk(node): + if not isinstance(stmt, ast.Call): + continue + + func = stmt.func + func_name = ( + func.id if isinstance(func, ast.Name) + else getattr(func, "attr", None) + ) + if not func_name or not func_name.startswith("DEFINE_"): + continue + + if stmt.args and isinstance(stmt.args[0], ast.Constant) and isinstance(stmt.args[0].value, str): + cname = stmt.args[0].value + if func_name.startswith("DEFINE_TABLE_"): + self.directcounters[node.name].add(cname) + if self.verbose: + print(f"[TABLE] {node.name}: {cname}") + else: + self.counters[node.name].add(cname) + if self.verbose: + print(f"[COUNTER] {node.name}: {cname}") + + # --------------------------- + # Extract called class from a Call node (reuse helper function) + # --------------------------- + def _extract_called_class(self, func_node): + return extract_called_class_from_node(func_node, self.class_defs) + + def _called_classes(self, func_node): + if func_node is None: + return set() + if func_node in self.called_class_cache: + return self.called_class_cache[func_node] + + called = { + callee for stmt in ast.walk(func_node) + if isinstance(stmt, ast.Call) + and (callee := self._extract_called_class(stmt.func)) + } + self.called_class_cache[func_node] = called + return called + + # --------------------------- + # Recursive chain builder + # --------------------------- + def _build_chains(self, cls_name, counter_dict, prefix="", visited=None): + visited = visited or set() + if cls_name in visited: + return [] + + visited.add(cls_name) + chains = [] + subprefix = f"{prefix}.{cls_name}" if prefix else cls_name + + methods = self.class_defs.get(cls_name, {}) + for mnode in methods.values(): + for callee in self._called_classes(mnode): + chains.extend(self._build_chains(callee, counter_dict, subprefix, visited)) + + if cls_name in counter_dict: + chains.extend(f"{subprefix}.{c}" for c in counter_dict[cls_name]) + + visited.remove(cls_name) + return chains + + # --------------------------- + # Entry point + # --------------------------- + def resolve_from_entry(self) -> Tuple[list[str], list[str]]: + if not os.path.exists(self.entry_file): + raise RuntimeError(f"Entry file not found: {self.entry_file!r}") + + roots = get_entry_class_names(self.entry_file) + if not roots: + print(f"No classes found in entry file: {self.entry_file}") + return [], [] + + all_chains, dir_chains = [], [] + for root in roots: + all_chains += self._build_chains(root, self.counters) + dir_chains += self._build_chains(root, self.directcounters) + + all_chains = sorted({c for c in all_chains if c.startswith("dash_ingress.")}) + dir_chains = sorted({c for c in dir_chains if c.startswith("dash_ingress.")}) + return all_chains, dir_chains + + +# --------------------------- +# Wrapper +# --------------------------- +def generate_counter_chain(project_dir: str, entry_file: str = "dash_pipeline.py", verbose=False) -> Tuple[list[str], list[str]]: + cg = MultiFileCounterGraph(project_dir, entry_file=entry_file, verbose=verbose) + return cg.resolve_from_entry() + + +if __name__ == "__main__": + project_dir = "py_model/data_plane" + + chains, dir_chains = generate_counter_chain(project_dir, entry_file="dash_pipeline.py") + + print("Counter call chains:") + for i, c in enumerate(chains, 1): + print(i, ":", c) + + print(f"\nDirect counter call chains ({len(dir_chains)}):") + for i, c in enumerate(dir_chains, 1): + print(i, ":", c) diff --git a/dash-pipeline/py_model/scripts/gen_global_actions_chain.py b/dash-pipeline/py_model/scripts/gen_global_actions_chain.py new file mode 100644 index 000000000..8cc49bce3 --- /dev/null +++ b/dash-pipeline/py_model/scripts/gen_global_actions_chain.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python3 +""" +Build a cross-file call chain using Python AST. + +Features: +- Recursively scans .py files under 'data_plane'. +- Collects only calls inside 'apply' functions. +- Filters out unwanted names (apply, *_apply, *_t, uppercase, .apply, builtins). +- Returns a unique list of function calls (last part only). +""" + +import os, ast, json, builtins + +def get_python_files(project_dir="."): + """Return all .py files under project_dir containing 'data_plane'.""" + return [ + os.path.join(r, f) + for r, _, files in os.walk(project_dir) + for f in files if f.endswith(".py") and "data_plane" in r + ] + +class ImportTracker(ast.NodeVisitor): + """Tracks import statements in a file.""" + def __init__(self): + self.imports = {} + def visit_Import(self, node): + for a in node.names: + self.imports[a.asname or a.name] = a.name + def visit_ImportFrom(self, node): + for a in node.names: + self.imports[a.asname or a.name] = f"{node.module}.{a.name}" if node.module else a.name + +class CallGraphVisitor(ast.NodeVisitor): + """Collects calls inside 'apply' functions.""" + def __init__(self, imports): + self.imports, self.calls, self.current = imports, set(), None + self.skip = set(dir(builtins)) | { + "get","keys","values","items","update","append","extend", + "insert","pop","remove","clear","copy" + } + + def visit_FunctionDef(self, node): + if node.name == "apply": + self.current = node.name + self.generic_visit(node) + self.current = None + + def visit_Call(self, node): + if not self.current: + return + name = None + is_class_method = False + + if isinstance(node.func, ast.Name): + name = self.imports.get(node.func.id, node.func.id) + elif isinstance(node.func, ast.Attribute): + if isinstance(node.func.value, ast.Name) and node.func.value.id in ("self", "cls"): + is_class_method = True + name = node.func.attr + + if name: + name = name.rsplit(".", 1)[-1] + if not ( + name == "apply" or name == "py_log" or + name.endswith("_apply") or name.endswith("_t") or + name.isupper() or name in self.skip + ): + self.calls.add((name, is_class_method)) + +def generate_global_actions_chain(project_dir="."): + """Return unique list of function calls inside 'apply' functions.""" + all_calls = set() + for pyfile in get_python_files(project_dir): + try: + with open(pyfile, "r", encoding="utf-8") as f: + source = f.read() + if not source.strip(): + continue + tree = ast.parse(source, filename=pyfile) + tracker = ImportTracker(); tracker.visit(tree) + visitor = CallGraphVisitor(tracker.imports); visitor.visit(tree) + all_calls.update(visitor.calls) + except Exception as e: + print(f"Failed to parse {pyfile}: {e}") + return sorted(all_calls) + +if __name__ == "__main__": + project_dir = "py_model/data_plane" + + graph = generate_global_actions_chain(project_dir) + + tagged_calls = [ + f"[class] {name}" if is_class_method else f"[func] {name}" + for name, is_class_method in graph + ] + + print("\nCombined call chain:") + for item in tagged_calls: + print(item) diff --git a/dash-pipeline/py_model/scripts/gen_helper.py b/dash-pipeline/py_model/scripts/gen_helper.py new file mode 100644 index 000000000..babb5d9ce --- /dev/null +++ b/dash-pipeline/py_model/scripts/gen_helper.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python3 +""" +Shared AST / filesystem utilities used by the graph extractors. + +Provides: +- parse_files(directory) +- collect_class_methods(parsed_files) -> class_defs, file_class_map, global_funcs +- get_entry_class_nodes(entry_file) -> list of class names +- get_full_name(node) -> dotted name for Name/Attribute/Call +- extract_called_class_from_node(func_node, class_defs) -> class name or None +- called_classes(func_node, class_defs, cache=None) -> set of class names +""" + +import ast +import os +from collections import defaultdict +from typing import Generator, Tuple, Dict, Any, Set, List + + +def parse_files(directory: str, skip_dirs: set | None = None) -> Generator[Tuple[str, ast.AST], None, None]: + """Yield (path, ast.parse(tree)) for all .py files under directory. + + Skips common virtualenv / cache dirs by default. + """ + if skip_dirs is None: + skip_dirs = {"__pycache__", "venv", "site-packages", "tests"} + directory = os.path.abspath(directory) + for root, _, files in os.walk(directory): + if any(skip in root for skip in skip_dirs): + continue + for fname in files: + if not fname.endswith(".py"): + continue + path = os.path.join(root, fname) + try: + with open(path, "r", encoding="utf-8") as f: + yield path, ast.parse(f.read(), filename=path) + except SyntaxError as e: + print(f"Skipping {path}: {e}") + +def collect_class_methods(parsed_files: List[Tuple[str, ast.AST]] + ) -> Tuple[Dict[str, Dict[str, ast.FunctionDef]], Dict[str, str], Dict[str, ast.FunctionDef]]: + """From parsed_files produce: + - class_defs: {class_name: {method_name: ast.FunctionDef}} + - file_class_map: {class_name: file_path} + - global_funcs: {func_name: ast.FunctionDef} stored under key '__global__' inside class_defs in some callers. + """ + class_defs: Dict[str, Dict[str, ast.FunctionDef]] = defaultdict(dict) + file_class_map: Dict[str, str] = {} + global_funcs: Dict[str, ast.FunctionDef] = {} + + for path, tree in parsed_files: + for node in tree.body: + if isinstance(node, ast.ClassDef): + methods = {fn.name: fn for fn in node.body if isinstance(fn, ast.FunctionDef)} + if methods: + class_defs[node.name].update(methods) + else: + # ensure class exists even without methods + class_defs.setdefault(node.name, {}) + file_class_map[node.name] = path + elif isinstance(node, ast.FunctionDef): + global_funcs[node.name] = node + + return class_defs, file_class_map, global_funcs + +def get_entry_class_names(entry_file: str) -> List[str]: + """Return list of class names defined at top-level in entry_file.""" + if not os.path.exists(entry_file): + raise RuntimeError(f"Entry file not found: {entry_file!r}") + with open(entry_file, "r", encoding="utf-8") as f: + tree = ast.parse(f.read(), filename=entry_file) + return [n.name for n in tree.body if isinstance(n, ast.ClassDef)] + +def get_full_name(node: ast.AST) -> str | None: + """Return dotted full name for Name/Attribute/Call nodes or None.""" + if isinstance(node, ast.Name): + return node.id + if isinstance(node, ast.Attribute): + parent = get_full_name(node.value) + return (parent + "." + node.attr) if parent else node.attr + if isinstance(node, ast.Call): + return get_full_name(node.func) + return None + +def extract_called_class_from_node(func_node: ast.AST, class_defs: Dict[str, Any]) -> str | None: + """Return the class name if call target refers to a known class.""" + # Mirrors previous logic in the three scripts: check Attribute chain and Name nodes + if isinstance(func_node, ast.Attribute): + target = func_node + while isinstance(target, ast.Attribute): + if target.attr in class_defs: + return target.attr + target = target.value + if isinstance(target, ast.Name) and target.id in class_defs: + return target.id + elif isinstance(func_node, ast.Name) and func_node.id in class_defs: + return func_node.id + return None + +def called_classes(func_node: ast.AST, class_defs: Dict[str, Any], cache: Dict[ast.AST, Set[str]] | None = None) -> Set[str]: + """Return set of class names called inside a function. Uses optional cache keyed by function node.""" + if func_node is None: + return set() + if cache is not None and func_node in cache: + return cache[func_node] + + called = { + callee for stmt in ast.walk(func_node) + if isinstance(stmt, ast.Call) + and (callee := extract_called_class_from_node(stmt.func, class_defs)) + } + if cache is not None: + cache[func_node] = called + return called diff --git a/dash-pipeline/py_model/scripts/gen_ir.py b/dash-pipeline/py_model/scripts/gen_ir.py new file mode 100644 index 000000000..56d4ed339 --- /dev/null +++ b/dash-pipeline/py_model/scripts/gen_ir.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python3 + +import json +import itertools +from typing import Optional +from collections import OrderedDict + +from py_model.libs.__id_map import * +from py_model.libs.__jsonize import * +from py_model.libs.__counters import * +from py_model.dash_py_v1model import * + +node_id = itertools.count(1000000) +type_id = itertools.count(2000) + +def make_source_fragment(counter_obj, var_name="VarName") -> str: + # Case 1: SaiCounter-based Counter + if hasattr(counter_obj, "config") and isinstance(counter_obj.config, SaiCounter): + cfg = counter_obj.config + + # Build annotation part + attrs = [] + if cfg.name: + attrs.append(f'name="{cfg.name}"') + if cfg.no_suffix: + attrs.append('no_suffix="true"') + if cfg.attr_type: + attrs.append(f'attr_type="{cfg.attr_type}"') + if cfg.action_names: + if isinstance(cfg.action_names, str): + names = cfg.action_names + else: + names = ",".join(cfg.action_names) + attrs.append(f'action_names="{names}"') + + annotation = "@SaiCounter[" + ", ".join(attrs) + "]" + + # Counter type string (default to "packets" if not set) + ctype = getattr(cfg.counter_type, "name", "packets").lower() + + fragment = f'{annotation} counter({cfg.size}, CounterType.{ctype}) {cfg.ctr_name};, Type = counter' + return fragment.replace('"', '\\"') # escape quotes + + # Case 2: DirectCounter + elif isinstance(counter_obj, DirectCounter): + ctype = getattr(counter_obj.counter_type, "name", "packets_and_bytes").lower() + annotation = '@SaiCounter[attr_type="stats"]' + fragment = f'{annotation} counter(64, CounterType.{ctype}) {counter_obj.name};, Type = counter' + return fragment.replace('"', '\\"') + + else: + raise TypeError(f"Unsupported counter type: {type(counter_obj)}") + + +def make_vec_node(ctr_name: str) -> Optional[OrderedDict]: + filename, lineno = None, None + + ctr = DashCounters._counters.get(ctr_name) + if ctr is None: + print(f"Counter '{ctr_name}' not found in DashCounters._counters") + return None + + node = OrderedDict( + Node_ID=next(node_id), + Node_Type="Declaration_Instance", + name=ctr_name, + annotations={}, + type=OrderedDict( + Node_ID=next(type_id), + Node_Type="Type_Name", + path=OrderedDict(Node_ID=next(type_id), Node_Type="Path", name="counter") + ), + arguments={}, + properties={}, + Source_Info=OrderedDict(filename=filename, line=lineno, source_fragment=make_source_fragment(ctr)) + ) + + return node + + +def gen_ir() -> OrderedDict: + ir = OrderedDict(Node_ID=next(node_id), Node_Type="PyProgram") + + objects = OrderedDict(Node_ID=next(node_id), Node_Type="Vector") + + if not counter_ids: + # print("counter_ids is empty, using DashCounters._counters instead") + for key in DashCounters._counters.keys(): + counter_ids[key] = key + + obj_vecs = [] + for key in counter_ids: + node = make_vec_node(key) + if node: + obj_vecs.append(node) + + objects["vec"] = obj_vecs + ir["objects"] = objects + return ir + + +if __name__ == "__main__": + ir = gen_ir() + with open("py_ir.json", "w") as f: + json.dump(ir, f, indent=2, sort_keys=False) + print("py_ir.json written.") diff --git a/dash-pipeline/py_model/scripts/gen_table_chain.py b/dash-pipeline/py_model/scripts/gen_table_chain.py new file mode 100644 index 000000000..7a0489d82 --- /dev/null +++ b/dash-pipeline/py_model/scripts/gen_table_chain.py @@ -0,0 +1,136 @@ +#!/usr/bin/env python3 +""" +Multi-file Table() chain extractor. + +Start from classes in dash_pipeline.py, follow Class.method chains like in MultiFileCallGraph, +but instead of methods, stop at Table() objects defined inside classes. +Outputs dotted chains ending at Table object names. + +This file keeps the original behavior while using the shared helper. +""" + +import ast +import os +from collections import defaultdict +from typing import List + +from py_model.scripts.gen_helper import parse_files, collect_class_methods, get_entry_class_names, get_full_name + + +class MultiFileTableGraph: + def __init__(self, directory: str, entry_file: str = "dash_pipeline.py"): + self.directory = os.path.abspath(directory) + self.entry_file = os.path.join(self.directory, entry_file) + self.class_defs = defaultdict(dict) # {class_name: {method_name: ast.FunctionDef}} + self.tables = defaultdict(set) # {class_name: {table_var_names}} + + # parse & collect + self.parsed_files = list(parse_files(self.directory)) + self._collect_defs_and_tables() + + # --------------------------- + # Collect defs + Table() objects (class-level) + # --------------------------- + def _collect_defs_and_tables(self): + # Build class methods map first + class_defs, _, _ = collect_class_methods(self.parsed_files) + self.class_defs.update(class_defs) + + # Now collect Table() assignments inside classes (scan ASTs for Assign nodes inside class bodies) + for path, tree in self.parsed_files: + for node in tree.body: + if not isinstance(node, ast.ClassDef): + continue + + # collect Table() assignments inside class + for stmt in node.body: + if isinstance(stmt, ast.Assign) and isinstance(stmt.value, ast.Call): + call = stmt.value + if isinstance(call.func, ast.Name) and call.func.id == "Table": + for target in stmt.targets: + if isinstance(target, ast.Name): + self.tables[node.name].add(target.id) + + # --------------------------- + # Helpers: get_full_name already imported + # --------------------------- + def _extract_called_class(self, dotted_target: str | None) -> str | None: + """Given dotted_target like 'foo.bar.Baz' return the longest part matching a known class.""" + if not dotted_target: + return None + parts = dotted_target.split(".") + for i in reversed(range(len(parts))): + if parts[i] in self.class_defs: + return parts[i] + return None + + def _called_classes(self, func_node): + called_classes = set() + if func_node is None: + return called_classes + for stmt in ast.walk(func_node): + if isinstance(stmt, ast.Call): + target = get_full_name(stmt.func) + callee_class = self._extract_called_class(target) + if callee_class: + called_classes.add(callee_class) + return called_classes + + def build_table_chains(self, cls_name, prefix="", visited=None): + visited = set(visited or []) + if cls_name in visited: + return [] + visited.add(cls_name) + + chains = [] + methods = self.class_defs.get(cls_name, {}) + + for mname, mnode in methods.items(): + for callee in sorted(self._called_classes(mnode)): + subprefix = f"{prefix}.{cls_name}" if prefix else cls_name + chains.extend(self.build_table_chains(callee, subprefix, visited.copy())) + + # at this class, append all tables + if cls_name in self.tables: + subprefix = f"{prefix}.{cls_name}" if prefix else cls_name + for tname in sorted(self.tables[cls_name]): + chain = f"{subprefix}.{tname}" + chains.append(chain) + + return chains + + # --------------------------- + # Entry point + # --------------------------- + def resolve_from_entry(self) -> List[str]: + if not os.path.exists(self.entry_file): + raise RuntimeError(f"Entry file not found: {self.entry_file!r}") + + roots = get_entry_class_names(self.entry_file) + if not roots: + print(f"No classes found in entry file: {self.entry_file}") + return [] + + all_chains = [] + for root in roots: + all_chains.extend(self.build_table_chains(root)) + + # Filter only dash_ingress.* chains (same as original) + filtered = [c for c in all_chains if c.startswith("dash_ingress.")] + + return sorted(set(filtered)) + + +def generate_table_chain(project_dir: str, entry_file: str = "dash_pipeline.py") -> List[str]: + tg = MultiFileTableGraph(project_dir, entry_file=entry_file) + return tg.resolve_from_entry() + + +if __name__ == "__main__": + project_dir = "py_model/data_plane" + + chains = generate_table_chain(project_dir, entry_file="dash_pipeline.py") + + print("\nFinal Table Object Call Chains:") + for i, c in enumerate(chains, 1): + print(f"{i:2d}. {c}") diff --git a/dash-pipeline/requirements.txt b/dash-pipeline/requirements.txt new file mode 100644 index 000000000..a7e04c780 --- /dev/null +++ b/dash-pipeline/requirements.txt @@ -0,0 +1,10 @@ +bitarray +scapy +openpyxl +cerberus +python-docx +pyyaml +grpcio +google-cloud-storage +protobuf==3.20.* +googleapis-common-protos \ No newline at end of file diff --git a/documentation/general/dash-high-level-design.md b/documentation/general/dash-high-level-design.md index eb98a61fa..fc6359391 100644 --- a/documentation/general/dash-high-level-design.md +++ b/documentation/general/dash-high-level-design.md @@ -523,7 +523,7 @@ per scenario and document them in a repeatable format. The behavioral model is compiled as software to behave exactly as hardware. From there we will be able to auto-generate the APIs. The *implementation* itself does not have to be P4. -For more information see the [pipeline README](../../../dash-pipeline/README.md) file. +For more information see the [pipeline README](../../dash-pipeline/README.md) file. ## SONiC Integration testing diff --git a/documentation/general/images/sdn/eni-match-flow-direction.svg b/documentation/general/images/sdn/eni-match-flow-direction.svg index 719d306a2..338242ee5 100644 --- a/documentation/general/images/sdn/eni-match-flow-direction.svg +++ b/documentation/general/images/sdn/eni-match-flow-direction.svg @@ -1,4 +1,4 @@ -InboundInbound uses Inner DST MAC to select ENIENI match based on DEST MAC == ENI MACOutboundOutbound uses Inner SRC MAC to select ENIENI match based on SRC MAC == ENI MAC
Determine direction based on VNI
Determine direct...
Start
Start
Match?
Match?
Match?
Match?
Perform Inbound Fast Path
Perform Inbou...
Perform Inbound Slow Path
Perform Inbou...
Perform Outbound Slow Path
Perform Outbo...
Perform Outbound Fast Path
Perform Outbo...
Yes
Yes
Yes
Yes
No
No
No
No
Performed on the appliance
Performed on the appliance
Text is not SVG - cannot display
+InboundInbound uses Inner DST MAC to select ENIENI match based on DST MAC == ENI MACOutboundOutbound uses Inner SRC MAC to select ENIENI match based on SRC MAC == ENI MAC
Determine direction based on VNI
Determine direction bas...
Start
Start
Match?
Match?
Match?
Match?
Perform Inbound Fast Path
Perform Inbou...
Perform Inbound Slow Path
Perform Inbou...
Perform Outbound Slow Path
Perform Outbo...
Perform Outbound Fast Path
Perform Outbo...
Yes
Yes
Yes
Yes
No
No
No
No
Performed on the appliance
Performed on the appliance
Text is not SVG - cannot display
\ No newline at end of file diff --git a/documentation/general/images/sdn/inbound-fast-path-flow.svg b/documentation/general/images/sdn/inbound-fast-path-flow.svg index 7c313c151..f34149aa7 100644 --- a/documentation/general/images/sdn/inbound-fast-path-flow.svg +++ b/documentation/general/images/sdn/inbound-fast-path-flow.svg @@ -1,4 +1,4 @@ - + -Lookup Flow Table 5-tuple matchMatch found; direction based on SRC or DST MACCorresponding Match Action, do not enter rule processingFinal packet transpositionRefresh flow TTLConnection TableState machine metadataConditional DecapSLB decap if needed; Decap VNET GRE KeyDecap only if Outer PA matches w/mapping tableElse, drop and terminate pipeline
Perform Inbound Fast Path
Perform Inbou...
Performed on the appliance
Performed on the appliance
Text is not SVG - cannot display
+Lookup Flow Table 5-tuple matchMatch found; direction based on SRC or DST MACCorresponding Match Action, do not enter rule processingFinal packet transpositionRefresh flow TTLConnection TableState machine metadataConditional DecapSLB decap if needed; Decap VNET GRE KeyDecap only if Outer PA matches w/mapping tableelse, drop and terminate pipeline
Perform Inbound Fast Path
Performed on the appliance
\ No newline at end of file diff --git a/documentation/general/images/sdn/inbound-slow-path-flow.svg b/documentation/general/images/sdn/inbound-slow-path-flow.svg index 1cbad13bb..c4b8c8d81 100644 --- a/documentation/general/images/sdn/inbound-slow-path-flow.svg +++ b/documentation/general/images/sdn/inbound-slow-path-flow.svg @@ -1,4 +1,4 @@ -
Perform Inbound Slow Path
Perform Inbou...
Conditional DecapSLB decap if needed; Decap VNET GRE KeyDecap only if Outer PA matches w/mapping tableElse, drop and terminate pipelineLookup Flow Table Match Not Found; ENI rule processing beginsDST MAC in the packet matches the ENI MACACL1SRC,DST,SPort,DPort,Protocol,Action,Priority, Exit ACL pipeline on hit (Terminate)?Controlled by Azure/MSFT, contains default rulesBlock, Soft Block, Allow, DenyIf rule w/bit exit ACL pipeline on hit is matched, the ACL pipeline is abandonedACL2SRC,DST,SPort,DPort,Protocol,Action,Priority, Exit ACL pipeline on hit (Terminate)?Customer ControlledACL3SRC,DST,SPort,DPort,Protocol,Action,Priority,Exit ACL pipeline on hit (Terminate)?Customer ControlledTranspose ActionRewrite MACsTranspose IPs / portsEncap/DecapInbound Route ProcessingLookup table is per ENI; could be GlobalEach route entry has prefix and separate action entryOuter encap IPv4 permits within-Region routingCould be multiple Global lookup tables per ENIsAcross Regions we use IPv6Create tthe FlowExample of Mapping and Routing Tables Mapping Table for a V-Port / ENI
Customer Address (CA)
Customer Ad...
Physical Address v4 (PA)
Physical Ad...
Physical Address v6 (PA)
Physical Ad...
MAC Address for D-MAC Rewrite
MAC Address for D-...
VNI to use
VNI to use
10.0.0.1
10.0.0.1
100.0.0.1
100.0.0.1
3ffe::1
3ffe::1
E4-A7-A0-99-0E-17
E4-A7-A0-99-0E-17
10001
10001
Example Route Table for a V-Port / ENI
Route
Route
Action
Action
Route Type (Choices)

* Encap_with_lookup_V4_underlay: Encap per mapping table.V4 underlay
* Encap_with_lookup_V6_underlay: Encap per mapping table.V6 underlay
* Encap_with_Provided_data (PA): Encap per proided data (Multiple PA can be provided)
* Outbound NAT(SNAT)_L3: execute on SRC IP w/provided data
* Outbound NAT (SNAT)_L4: execute on SRC IP, src port based on provided data
* Nul:block trafficl
* Private Link
Route Type (Choices)...
Route ID
Route ID
10.0.0.0/24,
...more prefixes
10.0.0.0/24,...
Encap: VXLAN

Action:
check mapping table for exact DST, VNI, ANd D-MAC rewrite info
Encap: VXLAN...
Encap_with_lookup_V4_underlay
Encap_with_lookup_V4_underlay
1
1
Performed on the appliance
Performed on the appliance
Text is not SVG - cannot display
+
Perform Inbound Slow Path
Perform Inbou...
Conditional DecapSLB decap if needed; Decap VNET GRE KeyDecap only if Outer PA matches w/mapping tableElse, drop and terminate pipelineLookup Flow Table Match Not Found; ENI rule processing beginsDST MAC in the packet matches the ENI MACACL1SRC,DST,SPort,DPort,Protocol,Action,Priority, Exit ACL pipeline on hit (Terminate)?Controlled by Azure/MSFT, contains default rulesBlock, Soft Block, Allow, DenyIf rule w/bit exit ACL pipeline on hit is matched, the ACL pipeline is abandonedACL2SRC,DST,SPort,DPort,Protocol,Action,Priority, Exit ACL pipeline on hit (Terminate)?Customer ControlledACL3SRC,DST,SPort,DPort,Protocol,Action,Priority,Exit ACL pipeline on hit (Terminate)?Customer ControlledTranspose ActionRewrite MACsTranspose IPs / portsEncap/DecapInbound Route ProcessingLookup table is per ENI; could be GlobalEach route entry has prefix and separate action entryOuter encap IPv4 permits within-Region routingCould be multiple Global lookup tables per ENIsAcross Regions we use IPv6Create the FlowExample of Mapping and Routing Tables Mapping Table for a V-Port / ENI
Customer Address (CA)
Customer Ad...
Physical Address v4 (PA)
Physical Ad...
Physical Address v6 (PA)
Physical Ad...
MAC Address for D-MAC Rewrite
MAC Address for D-...
VNI to use
VNI to use
10.0.0.1
10.0.0.1
100.0.0.1
100.0.0.1
3ffe::1
3ffe::1
E4-A7-A0-99-0E-17
E4-A7-A0-99-0E-17
10001
10001
Example Route Table for a V-Port / ENI
Route
Route
Action
Action
Route Type (Choices)

* Encap_with_lookup_V4_underlay: Encap per mapping table.V4 underlay
* Encap_with_lookup_V6_underlay: Encap per mapping table.V6 underlay
* Encap_with_Provided_data (PA): Encap per provided data (Multiple PA can be provided)
* Outbound NAT(SNAT)_L3: execute on SRC IP w/provided data
* Outbound NAT (SNAT)_L4: execute on SRC IP, src port based on provided data
* Null:block traffic
* Private Link
Route Type (Choices)...
Route ID
Route ID
10.0.0.0/24,
...more prefixes
10.0.0.0/24,...
Encap: VXLAN

Action:
check mapping table for exact DST, VNI, and D-MAC rewrite info
Encap: VXLAN...
Encap_with_lookup_V4_underlay
Encap_with_lookup_V4_underlay
1
1
Performed on the appliance
Performed on the appliance
Text is not SVG - cannot display
\ No newline at end of file diff --git a/documentation/general/images/sdn/outbound-slow-path-flow.svg b/documentation/general/images/sdn/outbound-slow-path-flow.svg index e54573e45..dbc4dd238 100644 --- a/documentation/general/images/sdn/outbound-slow-path-flow.svg +++ b/documentation/general/images/sdn/outbound-slow-path-flow.svg @@ -1,4 +1,4 @@ -Conditional DecapSLB decap if needed; Decap VNET GRE KeyDecap only if Outer PA matches thePA configured for the ENIElse, drop and terminate pipelineLookup Flow Table Match Not Found; ENI rule processing beginsPossible to insert corresponding inbound flowACL1SRC,DST,SPort,DPort,Protocol,Action,Priority, Exit ACL pipeline on hit (Terminate)?Controlled by Azure/MSFT, contains default rulesBlock, Soft Block, Allow, DenyIf rule w/bit exit ACL pipeline on hit is matched, the ACL pipeline is abandonedACL2SRC,DST,SPort,DPort,Protocol,Action,Priority, Exit ACL pipeline on hit (Terminate)?Customer ControlledACL3SRC,DST,SPort,DPort,Protocol,Action,Priority,Exit ACL pipeline on hit (Terminate)?Customer ControlledTranspose ActionRewrite MACsTranspose IPs / portsEncap/DecapInbound Route ProcessingSet DSCP bit value to XProcess relevant routeEach route entry has prefix and separate actionentryCreate tthe FlowExample of Mapping and Routing Tables Mapping Table for a V-Port / ENI
Customer Address (CA)
Customer Ad...
Physical Address v4 (PA)
Physical Ad...
Physical Address v6 (PA)
Physical Ad...
MAC Address for D-MAC Rewrite
MAC Address for D-...
VNI to use
VNI to use
10.0.0.1
10.0.0.1
100.0.0.1
100.0.0.1
3ffe::1
3ffe::1
E4-A7-A0-99-0E-17
E4-A7-A0-99-0E-17
10001
10001
Example Route Table for a V-Port / ENI
Route
Route
Action
Action
Route Type (Choices)

* Encap_with_lookup_V4_underlay: Encap per mapping table.V4 underlay
* Encap_with_lookup_V6_underlay: Encap per mapping table.V6 underlay
* Encap_with_Provided_data (PA): Encap per proided data (Multiple PA can be provided)
* Outbound NAT(SNAT)_L3: execute on SRC IP w/provided data
* Outbound NAT (SNAT)_L4: execute on SRC IP, src port based on provided data
* Nul:block trafficl
* Private Link
Route Type (Choices)...
Route ID
Route ID
10.0.0.0/24,
...more prefixes
10.0.0.0/24,...
Encap: VXLAN

Action:
check mapping table for exact DST, VNI, ANd D-MAC rewrite info
Encap: VXLAN...
Encap_with_lookup_V4_underlay
Encap_with_lookup_V4_underlay
1
1
Perform Outbound Slow Path
Perform Outbo...
Performed on the appliance
Performed on the appliance
Text is not SVG - cannot display
+Conditional DecapSLB decap if needed; Decap VNET GRE KeyDecap only if Outer PA matches thePA configured for the ENIElse, drop and terminate pipelineLookup Flow Table Match Not Found; ENI rule processing beginsPossible to insert corresponding inbound flowACL1SRC,DST,SPort,DPort,Protocol,Action,Priority, Exit ACL pipeline on hit (Terminate)?Controlled by Azure/MSFT, contains default rulesBlock, Soft Block, Allow, DenyIf rule w/bit exit ACL pipeline on hit is matched, the ACL pipeline is abandonedACL2SRC,DST,SPort,DPort,Protocol,Action,Priority, Exit ACL pipeline on hit (Terminate)?Customer ControlledACL3SRC,DST,SPort,DPort,Protocol,Action,Priority,Exit ACL pipeline on hit (Terminate)?Customer ControlledTranspose ActionRewrite MACsTranspose IPs / portsEncap/DecapInbound Route ProcessingSet DSCP bit value to XProcess relevant routeEach route entry has prefix and separate actionentryCreate the FlowExample of Mapping and Routing Tables Mapping Table for a V-Port / ENI
Customer Address (CA)
Customer Ad...
Physical Address v4 (PA)
Physical Ad...
Physical Address v6 (PA)
Physical Ad...
MAC Address for D-MAC Rewrite
MAC Address for D-...
VNI to use
VNI to use
10.0.0.1
10.0.0.1
100.0.0.1
100.0.0.1
3ffe::1
3ffe::1
E4-A7-A0-99-0E-17
E4-A7-A0-99-0E-17
10001
10001
Example Route Table for a V-Port / ENI
Route
Route
Action
Action
Route Type (Choices)

* Encap_with_lookup_V4_underlay: Encap per mapping table.V4 underlay
* Encap_with_lookup_V6_underlay: Encap per mapping table.V6 underlay
* Encap_with_Provided_data (PA): Encap per provided data (Multiple PA can be provided)
* Outbound NAT(SNAT)_L3: execute on SRC IP w/provided data
* Outbound NAT (SNAT)_L4: execute on SRC IP, src port based on provided data
* Null:block traffic
* Private Link
Route Type (Choices)...
Route ID
Route ID
10.0.0.0/24,
...more prefixes
10.0.0.0/24,...
Encap: VXLAN

Action:
check mapping table for exact DST, VNI, and D-MAC rewrite info
Encap: VXLAN...
Encap_with_lookup_V4_underlay
Encap_with_lookup_V4_underlay
1
1
Perform Outbound Slow Path
Perform Outbo...
Performed on the appliance
Performed on the appliance
Text is not SVG - cannot display
\ No newline at end of file diff --git a/documentation/general/images/vport.png b/documentation/general/images/vport.png index 01837f4dc..8f922eb7a 100644 Binary files a/documentation/general/images/vport.png and b/documentation/general/images/vport.png differ diff --git a/documentation/general/sdn-pipeline-basic-elements.md b/documentation/general/sdn-pipeline-basic-elements.md index b5d2fe1d4..85758580e 100644 --- a/documentation/general/sdn-pipeline-basic-elements.md +++ b/documentation/general/sdn-pipeline-basic-elements.md @@ -210,7 +210,7 @@ ACL groups need to be evaluated in order. The following rules apply: - Priority is only within rules in the same group. No priorities across groups are allowed. - A smaller priority number means the rule will be evaluated first. - - Priorities are unique withing an ACL group. Priorities might overlap across + - Priorities are unique within an ACL group. Priorities might overlap across ACL groups. #### ACL levels diff --git a/documentation/high-avail/images/dash-ha-smart-switch.svg b/documentation/high-avail/images/dash-ha-smart-switch.svg index f3eea4cb8..33b165715 100644 --- a/documentation/high-avail/images/dash-ha-smart-switch.svg +++ b/documentation/high-avail/images/dash-ha-smart-switch.svg @@ -1,4 +1,4 @@ - + -
DASH Applliance/
Smart Switch
DASH Applliance/...
Captive DPU complex
Captive DPU...
DASH Applliance/
Smart Switch
DASH Applliance/...
Inter-DPU HA communications channel routed through network
Inter-DPU HA communications channel...
to upper tiered switches or network
to upper tiered switches or network
1
1
2
2
N
N
Captive DPU complex
Captive DPU...
1
1
2
2
N
N
Switch ASIC
Switch A...
Switch ASIC
Switch A...
DASH Applliance/
Smart Switch
DASH Applliance/...
Captive DPU complex
Captive DPU...
1
1
2
2
N
N
Switch ASIC
Switch A...
High-Availability Peers
High-Availability Pee...
Backup Device
Backup Device
Links are illustrative only, actual qty and speed varies.
Links are illustrativ...
Devices may be co-located or remotely-located
Devices may be co-located or remotely-located
Viewer does not support full SVG 1.1
+
DASH Appliance/
Smart Switch
Captive DPU complex
DASH Appliance/
Smart Switch
Inter-DPU HA communications channel routed through network
to upper tiered switches or network
1
2
N
Captive DPU complex
1
2
N
Switch ASIC
Switch ASIC
DASH Appliance/
Smart Switch
Captive DPU complex
1
2
N
Switch ASIC
High-Availability Peers
Backup Device
Links are illustrative only, actual qty and speed varies.
Devices may be co-located or remotely-located
\ No newline at end of file diff --git a/slides/SONiC-DASH-Python-Model.pdf b/slides/SONiC-DASH-Python-Model.pdf new file mode 100644 index 000000000..05e596de0 Binary files /dev/null and b/slides/SONiC-DASH-Python-Model.pdf differ